summaryrefslogtreecommitdiff
path: root/drivers/platform
diff options
context:
space:
mode:
authorLinux Build Service Account <lnxbuild@localhost>2017-03-15 10:25:27 -0700
committerGerrit - the friendly Code Review server <code-review@localhost>2017-03-15 10:25:27 -0700
commit1e3cff4cba08ae1f9cbe1669f9ad2c56ae850e0f (patch)
tree7b0751f1cf719c6f93c224093a1f5c5e16886c86 /drivers/platform
parentad64f3159b1a40af3df4045ce96a3b5b48214364 (diff)
parent32baae206b4007a07c1d4107c72fb4b58673a33c (diff)
Merge "mhi: mhi_rmnet: pass maximum payload size to mhi during registration"
Diffstat (limited to 'drivers/platform')
-rw-r--r--drivers/platform/msm/mhi/mhi.h71
-rw-r--r--drivers/platform/msm/mhi/mhi_bhi.c82
-rw-r--r--drivers/platform/msm/mhi/mhi_bhi.h4
-rw-r--r--drivers/platform/msm/mhi/mhi_event.c93
-rw-r--r--drivers/platform/msm/mhi/mhi_iface.c421
-rw-r--r--drivers/platform/msm/mhi/mhi_init.c205
-rw-r--r--drivers/platform/msm/mhi/mhi_isr.c129
-rw-r--r--drivers/platform/msm/mhi/mhi_macros.h9
-rw-r--r--drivers/platform/msm/mhi/mhi_main.c728
-rw-r--r--drivers/platform/msm/mhi/mhi_mmio_ops.c139
-rw-r--r--drivers/platform/msm/mhi/mhi_pm.c134
-rw-r--r--drivers/platform/msm/mhi/mhi_ring_ops.c33
-rw-r--r--drivers/platform/msm/mhi/mhi_ssr.c114
-rw-r--r--drivers/platform/msm/mhi/mhi_states.c124
-rw-r--r--drivers/platform/msm/mhi/mhi_sys.c88
-rw-r--r--drivers/platform/msm/mhi/mhi_sys.h9
-rw-r--r--drivers/platform/msm/mhi_uci/mhi_uci.c71
17 files changed, 1272 insertions, 1182 deletions
diff --git a/drivers/platform/msm/mhi/mhi.h b/drivers/platform/msm/mhi/mhi.h
index 3d40d114437a..9d3862707595 100644
--- a/drivers/platform/msm/mhi/mhi.h
+++ b/drivers/platform/msm/mhi/mhi.h
@@ -29,7 +29,6 @@
#include <linux/list.h>
#include <linux/dma-mapping.h>
-extern struct mhi_pcie_devices mhi_devices;
struct mhi_device_ctxt;
enum MHI_DEBUG_LEVEL {
@@ -49,11 +48,13 @@ struct pcie_core_info {
u32 mhi_ver;
void __iomem *bar0_base;
void __iomem *bar0_end;
- void __iomem *bar2_base;
- void __iomem *bar2_end;
u32 irq_base;
u32 max_nr_msis;
+ u32 domain;
+ u32 bus;
+ u32 slot;
struct pci_saved_state *pcie_state;
+ bool pci_master;
};
struct bhi_ctxt_t {
@@ -64,7 +65,6 @@ struct bhi_ctxt_t {
void *unaligned_image_loc;
dev_t bhi_dev;
struct cdev cdev;
- struct class *bhi_class;
struct device *dev;
};
@@ -382,7 +382,7 @@ struct mhi_chan_cfg {
union mhi_cmd_pkt cmd_pkt;
};
-struct mhi_client_handle {
+struct mhi_client_config {
struct mhi_chan_info chan_info;
struct mhi_device_ctxt *mhi_dev_ctxt;
struct mhi_client_info_t client_info;
@@ -434,6 +434,8 @@ struct mhi_counters {
atomic_t events_pending;
u32 *msi_counter;
u32 mhi_reset_cntr;
+ u32 link_down_cntr;
+ u32 link_up_cntr;
};
struct mhi_flags {
@@ -486,13 +488,17 @@ struct mhi_dev_space {
};
struct mhi_device_ctxt {
+ struct list_head node;
+ struct pcie_core_info core;
+ struct msm_pcie_register_event mhi_pci_link_event;
+ struct pci_dev *pcie_device;
+ struct bhi_ctxt_t bhi_ctxt;
+ struct platform_device *plat_dev;
enum MHI_PM_STATE mhi_pm_state; /* Host driver state */
enum MHI_STATE mhi_state; /* protocol state */
enum MHI_EXEC_ENV dev_exec_env;
struct mhi_dev_space dev_space;
- struct mhi_pcie_dev_info *dev_info;
- struct pcie_core_info *dev_props;
struct mhi_ring chan_bb_list[MHI_MAX_CHANNELS];
struct mhi_ring mhi_local_chan_ctxt[MHI_MAX_CHANNELS];
@@ -500,7 +506,6 @@ struct mhi_device_ctxt {
struct mhi_ring mhi_local_cmd_ctxt[NR_OF_CMD_RINGS];
struct mhi_chan_cfg mhi_chan_cfg[MHI_MAX_CHANNELS];
-
struct mhi_client_handle *client_handle_list[MHI_MAX_CHANNELS];
struct mhi_event_ring_cfg *ev_ring_props;
struct task_struct *event_thread_handle;
@@ -517,7 +522,9 @@ struct mhi_device_ctxt {
struct hrtimer m1_timer;
ktime_t m1_timeout;
+ u32 poll_reset_timeout_ms;
+ struct notifier_block mhi_ssr_nb;
struct esoc_desc *esoc_handle;
void *esoc_ssr_handle;
@@ -534,30 +541,30 @@ struct mhi_device_ctxt {
struct wakeup_source w_lock;
char *chan_info;
- struct dentry *mhi_parent_folder;
-};
-
-struct mhi_pcie_dev_info {
- struct pcie_core_info core;
- struct mhi_device_ctxt mhi_ctxt;
- struct msm_pcie_register_event mhi_pci_link_event;
- struct pci_dev *pcie_device;
- struct pci_driver *mhi_pcie_driver;
- struct bhi_ctxt_t bhi_ctxt;
- struct platform_device *plat_dev;
- u32 link_down_cntr;
- u32 link_up_cntr;
+ struct dentry *child;
+ struct dentry *parent;
+ void *mhi_ipc_log;
+
+ /* Shadow functions since not all device supports runtime pm */
+ void (*runtime_get)(struct mhi_device_ctxt *mhi_dev_ctxt);
+ void (*runtime_put)(struct mhi_device_ctxt *mhi_dev_ctxt);
+ void (*assert_wake)(struct mhi_device_ctxt *mhi_dev_ctxt,
+ bool force_set);
+ void (*deassert_wake)(struct mhi_device_ctxt *mhi_dev_ctxt);
};
-struct mhi_pcie_devices {
- struct mhi_pcie_dev_info device_list[MHI_MAX_SUPPORTED_DEVICES];
- s32 nr_of_devices;
+struct mhi_device_driver {
+ struct mutex lock;
+ struct list_head head;
+ struct class *mhi_bhi_class;
+ struct dentry *parent;
};
struct mhi_event_ring_cfg {
u32 nr_desc;
u32 msi_vec;
u32 intmod;
+ enum MHI_CLIENT_CHANNEL chan;
u32 flags;
enum MHI_RING_CLASS class;
enum MHI_EVENT_RING_STATE state;
@@ -570,18 +577,20 @@ struct mhi_data_buf {
u32 bounce_flag;
};
+extern struct mhi_device_driver *mhi_device_drv;
+
irqreturn_t mhi_msi_ipa_handlr(int irq_number, void *dev_id);
int mhi_reset_all_thread_queues(
struct mhi_device_ctxt *mhi_dev_ctxt);
int mhi_add_elements_to_event_rings(
struct mhi_device_ctxt *mhi_dev_ctxt,
enum STATE_TRANSITION new_state);
-int get_nr_avail_ring_elements(struct mhi_ring *ring);
+int get_nr_avail_ring_elements(struct mhi_device_ctxt *mhi_dev_ctxt,
+ struct mhi_ring *ring);
int get_nr_enclosed_el(struct mhi_ring *ring, void *loc_1,
void *loc_2, u32 *nr_el);
int mhi_init_mmio(struct mhi_device_ctxt *mhi_dev_ctxt);
-int mhi_init_device_ctxt(struct mhi_pcie_dev_info *dev_info,
- struct mhi_device_ctxt *mhi_dev_ctxt);
+int mhi_init_device_ctxt(struct mhi_device_ctxt *mhi_dev_ctxt);
int mhi_init_local_event_ring(struct mhi_device_ctxt *mhi_dev_ctxt,
u32 nr_ev_el, u32 event_ring_index);
int mhi_send_cmd(struct mhi_device_ctxt *dest_device,
@@ -632,11 +641,11 @@ int mhi_wait_for_mdm(struct mhi_device_ctxt *mhi_dev_ctxt);
enum hrtimer_restart mhi_initiate_m1(struct hrtimer *timer);
int mhi_pci_suspend(struct device *dev);
int mhi_pci_resume(struct device *dev);
-int mhi_init_pcie_device(struct mhi_pcie_dev_info *mhi_pcie_dev);
+int mhi_init_pcie_device(struct mhi_device_ctxt *mhi_dev_ctxt);
int mhi_init_pm_sysfs(struct device *dev);
void mhi_rem_pm_sysfs(struct device *dev);
void mhi_pci_remove(struct pci_dev *mhi_device);
-int mhi_ctxt_init(struct mhi_pcie_dev_info *mhi_pcie_dev);
+int mhi_ctxt_init(struct mhi_device_ctxt *mhi_dev_ctxt);
int mhi_get_chan_max_buffers(u32 chan);
int mhi_esoc_register(struct mhi_device_ctxt *mhi_dev_ctxt);
void mhi_link_state_cb(struct msm_pcie_notify *notify);
@@ -644,6 +653,8 @@ void mhi_notify_clients(struct mhi_device_ctxt *mhi_dev_ctxt,
enum MHI_CB_REASON reason);
void mhi_notify_client(struct mhi_client_handle *client_handle,
enum MHI_CB_REASON reason);
+void mhi_runtime_get(struct mhi_device_ctxt *mhi_dev_ctxt);
+void mhi_runtime_put(struct mhi_device_ctxt *mhi_dev_ctxt);
void mhi_deassert_device_wake(struct mhi_device_ctxt *mhi_dev_ctxt);
void mhi_assert_device_wake(struct mhi_device_ctxt *mhi_dev_ctxt,
bool force_set);
@@ -691,7 +702,7 @@ void init_event_ctxt_array(struct mhi_device_ctxt *mhi_dev_ctxt);
int create_local_ev_ctxt(struct mhi_device_ctxt *mhi_dev_ctxt);
enum MHI_STATE mhi_get_m_state(struct mhi_device_ctxt *mhi_dev_ctxt);
void process_m1_transition(struct work_struct *work);
-int set_mhi_base_state(struct mhi_pcie_dev_info *mhi_pcie_dev);
+int set_mhi_base_state(struct mhi_device_ctxt *mhi_dev_ctxt);
void mhi_set_m_state(struct mhi_device_ctxt *mhi_dev_ctxt,
enum MHI_STATE new_state);
const char *state_transition_str(enum STATE_TRANSITION state);
diff --git a/drivers/platform/msm/mhi/mhi_bhi.c b/drivers/platform/msm/mhi/mhi_bhi.c
index 113791a62c38..447d52bc7a46 100644
--- a/drivers/platform/msm/mhi/mhi_bhi.c
+++ b/drivers/platform/msm/mhi/mhi_bhi.c
@@ -23,7 +23,12 @@
static int bhi_open(struct inode *mhi_inode, struct file *file_handle)
{
- file_handle->private_data = &mhi_devices.device_list[0];
+ struct mhi_device_ctxt *mhi_dev_ctxt;
+
+ mhi_dev_ctxt = container_of(mhi_inode->i_cdev,
+ struct mhi_device_ctxt,
+ bhi_ctxt.cdev);
+ file_handle->private_data = mhi_dev_ctxt;
return 0;
}
@@ -34,10 +39,9 @@ static ssize_t bhi_write(struct file *file,
int ret_val = 0;
u32 pcie_word_val = 0;
u32 i = 0;
- struct bhi_ctxt_t *bhi_ctxt =
- &(((struct mhi_pcie_dev_info *)file->private_data)->bhi_ctxt);
- struct mhi_device_ctxt *mhi_dev_ctxt =
- &((struct mhi_pcie_dev_info *)file->private_data)->mhi_ctxt;
+ struct mhi_device_ctxt *mhi_dev_ctxt = file->private_data;
+ struct bhi_ctxt_t *bhi_ctxt = &mhi_dev_ctxt->bhi_ctxt;
+
size_t amount_copied = 0;
uintptr_t align_len = 0x1000;
u32 tx_db_val = 0;
@@ -58,22 +62,19 @@ static ssize_t bhi_write(struct file *file,
if (timeout <= 0 && mhi_dev_ctxt->mhi_state != MHI_STATE_BHI)
return -EIO;
- mhi_log(MHI_MSG_INFO, "Entered. User Image size 0x%zx\n", count);
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "Entered. User Image size 0x%zx\n", count);
bhi_ctxt->unaligned_image_loc = kmalloc(count + (align_len - 1),
GFP_KERNEL);
if (bhi_ctxt->unaligned_image_loc == NULL)
return -ENOMEM;
- mhi_log(MHI_MSG_INFO, "Unaligned Img Loc: %p\n",
- bhi_ctxt->unaligned_image_loc);
bhi_ctxt->image_loc =
(void *)((uintptr_t)bhi_ctxt->unaligned_image_loc +
(align_len - (((uintptr_t)bhi_ctxt->unaligned_image_loc) %
align_len)));
- mhi_log(MHI_MSG_INFO, "Aligned Img Loc: %p\n", bhi_ctxt->image_loc);
-
bhi_ctxt->image_size = count;
if (0 != copy_from_user(bhi_ctxt->image_loc, buf, count)) {
@@ -83,10 +84,9 @@ static ssize_t bhi_write(struct file *file,
amount_copied = count;
/* Flush the writes, in anticipation for a device read */
wmb();
- mhi_log(MHI_MSG_INFO,
- "Copied image from user at addr: %p\n", bhi_ctxt->image_loc);
+
bhi_ctxt->phy_image_loc = dma_map_single(
- &mhi_dev_ctxt->dev_info->plat_dev->dev,
+ &mhi_dev_ctxt->plat_dev->dev,
bhi_ctxt->image_loc,
bhi_ctxt->image_size,
DMA_TO_DEVICE);
@@ -95,9 +95,8 @@ static ssize_t bhi_write(struct file *file,
ret_val = -EIO;
goto bhi_copy_error;
}
- mhi_log(MHI_MSG_INFO,
- "Mapped image to DMA addr 0x%lx:\n",
- (uintptr_t)bhi_ctxt->phy_image_loc);
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "Mapped image to DMA addr 0x%llx:\n", bhi_ctxt->phy_image_loc);
bhi_ctxt->image_size = count;
@@ -149,18 +148,18 @@ static ssize_t bhi_write(struct file *file,
BHI_STATUS_MASK,
BHI_STATUS_SHIFT);
read_unlock_bh(pm_xfer_lock);
- mhi_log(MHI_MSG_CRITICAL,
- "BHI STATUS 0x%x, err:0x%x errdbg1:0x%x errdbg2:0x%x errdbg3:0x%x\n",
+ mhi_log(mhi_dev_ctxt, MHI_MSG_CRITICAL,
+ "BHI STATUS 0x%x, err:0x%x errdbg1:0x%x errdbg2:0x%x errdbg3:0x%x\n",
tx_db_val, err, errdbg1, errdbg2, errdbg3);
if (BHI_STATUS_SUCCESS != tx_db_val)
- mhi_log(MHI_MSG_CRITICAL,
+ mhi_log(mhi_dev_ctxt, MHI_MSG_CRITICAL,
"Incorrect BHI status: %d retry: %d\n",
tx_db_val, i);
else
break;
usleep_range(20000, 25000);
}
- dma_unmap_single(&mhi_dev_ctxt->dev_info->plat_dev->dev,
+ dma_unmap_single(&mhi_dev_ctxt->plat_dev->dev,
bhi_ctxt->phy_image_loc,
bhi_ctxt->image_size, DMA_TO_DEVICE);
@@ -169,8 +168,8 @@ static ssize_t bhi_write(struct file *file,
ret_val = mhi_init_state_transition(mhi_dev_ctxt,
STATE_TRANSITION_RESET);
if (ret_val) {
- mhi_log(MHI_MSG_CRITICAL,
- "Failed to start state change event\n");
+ mhi_log(mhi_dev_ctxt, MHI_MSG_CRITICAL,
+ "Failed to start state change event\n");
}
return amount_copied;
@@ -184,48 +183,43 @@ static const struct file_operations bhi_fops = {
.open = bhi_open,
};
-int bhi_probe(struct mhi_pcie_dev_info *mhi_pcie_device)
+int bhi_probe(struct mhi_device_ctxt *mhi_dev_ctxt)
{
- struct bhi_ctxt_t *bhi_ctxt = &mhi_pcie_device->bhi_ctxt;
+ struct bhi_ctxt_t *bhi_ctxt = &mhi_dev_ctxt->bhi_ctxt;
+ const struct pcie_core_info *core = &mhi_dev_ctxt->core;
int ret_val = 0;
int r;
+ char node_name[32];
- if (NULL == mhi_pcie_device || 0 == mhi_pcie_device->core.bar0_base
- || 0 == mhi_pcie_device->core.bar0_end)
+ if (bhi_ctxt->bhi_base == NULL)
return -EIO;
ret_val = alloc_chrdev_region(&bhi_ctxt->bhi_dev, 0, 1, "bhi");
if (IS_ERR_VALUE(ret_val)) {
- mhi_log(MHI_MSG_CRITICAL,
- "Failed to alloc char device %d\n",
- ret_val);
+ mhi_log(mhi_dev_ctxt, MHI_MSG_CRITICAL,
+ "Failed to alloc char device %d\n", ret_val);
return -EIO;
}
- bhi_ctxt->bhi_class = class_create(THIS_MODULE, "bhi");
- if (IS_ERR(bhi_ctxt->bhi_class)) {
- mhi_log(MHI_MSG_CRITICAL,
- "Failed to instantiate class %d\n",
- ret_val);
- r = PTR_RET(bhi_ctxt->bhi_class);
- goto err_class_create;
- }
cdev_init(&bhi_ctxt->cdev, &bhi_fops);
bhi_ctxt->cdev.owner = THIS_MODULE;
ret_val = cdev_add(&bhi_ctxt->cdev, bhi_ctxt->bhi_dev, 1);
- bhi_ctxt->dev = device_create(bhi_ctxt->bhi_class, NULL,
- bhi_ctxt->bhi_dev, NULL,
- "bhi");
+ snprintf(node_name, sizeof(node_name),
+ "bhi_%04X_%02u.%02u.%02u",
+ core->dev_id, core->domain, core->bus, core->slot);
+ bhi_ctxt->dev = device_create(mhi_device_drv->mhi_bhi_class,
+ NULL,
+ bhi_ctxt->bhi_dev,
+ NULL,
+ node_name);
if (IS_ERR(bhi_ctxt->dev)) {
- mhi_log(MHI_MSG_CRITICAL,
- "Failed to add bhi cdev\n");
+ mhi_log(mhi_dev_ctxt, MHI_MSG_CRITICAL,
+ "Failed to add bhi cdev\n");
r = PTR_RET(bhi_ctxt->dev);
goto err_dev_create;
}
return 0;
err_dev_create:
cdev_del(&bhi_ctxt->cdev);
- class_destroy(bhi_ctxt->bhi_class);
-err_class_create:
unregister_chrdev_region(MAJOR(bhi_ctxt->bhi_dev), 1);
return r;
}
diff --git a/drivers/platform/msm/mhi/mhi_bhi.h b/drivers/platform/msm/mhi/mhi_bhi.h
index ca44f69cea42..943226408ce3 100644
--- a/drivers/platform/msm/mhi/mhi_bhi.h
+++ b/drivers/platform/msm/mhi/mhi_bhi.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014, 2016 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -55,6 +55,6 @@
#define BHI_POLL_SLEEP_TIME 1000
#define BHI_POLL_NR_RETRIES 10
-int bhi_probe(struct mhi_pcie_dev_info *mhi_pcie_device);
+int bhi_probe(struct mhi_device_ctxt *mhi_dev_ctxt);
#endif
diff --git a/drivers/platform/msm/mhi/mhi_event.c b/drivers/platform/msm/mhi/mhi_event.c
index fe163f3895a5..3103ade8c8d4 100644
--- a/drivers/platform/msm/mhi/mhi_event.c
+++ b/drivers/platform/msm/mhi/mhi_event.c
@@ -26,41 +26,55 @@ int mhi_populate_event_cfg(struct mhi_device_ctxt *mhi_dev_ctxt)
int r, i;
char dt_prop[MAX_BUF_SIZE];
const struct device_node *np =
- mhi_dev_ctxt->dev_info->plat_dev->dev.of_node;
+ mhi_dev_ctxt->plat_dev->dev.of_node;
r = of_property_read_u32(np, "mhi-event-rings",
&mhi_dev_ctxt->mmio_info.nr_event_rings);
if (r) {
- mhi_log(MHI_MSG_CRITICAL,
+ mhi_log(mhi_dev_ctxt, MHI_MSG_CRITICAL,
"Failed to pull event ring info from DT, %d\n", r);
- goto dt_error;
+ return -EINVAL;
}
mhi_dev_ctxt->ev_ring_props =
kzalloc(sizeof(struct mhi_event_ring_cfg) *
mhi_dev_ctxt->mmio_info.nr_event_rings,
GFP_KERNEL);
- if (!mhi_dev_ctxt->ev_ring_props) {
- r = -ENOMEM;
- goto dt_error;
- }
+ if (!mhi_dev_ctxt->ev_ring_props)
+ return -ENOMEM;
for (i = 0; i < mhi_dev_ctxt->mmio_info.nr_event_rings; ++i) {
+ u32 dt_configs[5];
+ int len;
+
scnprintf(dt_prop, MAX_BUF_SIZE, "%s%d", "mhi-event-cfg-", i);
- r = of_property_read_u32_array(np, dt_prop,
- (u32 *)&mhi_dev_ctxt->ev_ring_props[i],
- 4);
+ if (!of_find_property(np, dt_prop, &len))
+ goto dt_error;
+ if (len != sizeof(dt_configs))
+ goto dt_error;
+ r = of_property_read_u32_array(
+ np,
+ dt_prop,
+ dt_configs,
+ sizeof(dt_configs) / sizeof(u32));
if (r) {
- mhi_log(MHI_MSG_CRITICAL,
+ mhi_log(mhi_dev_ctxt, MHI_MSG_CRITICAL,
"Failed to pull ev ring %d info from DT %d\n",
i, r);
goto dt_error;
}
- mhi_log(MHI_MSG_INFO,
- "Pulled ev ring %d,desc:0x%x,msi_vec:0x%x,intmod%d flags0x%x\n",
- i, mhi_dev_ctxt->ev_ring_props[i].nr_desc,
- mhi_dev_ctxt->ev_ring_props[i].msi_vec,
- mhi_dev_ctxt->ev_ring_props[i].intmod,
- mhi_dev_ctxt->ev_ring_props[i].flags);
+ mhi_dev_ctxt->ev_ring_props[i].nr_desc = dt_configs[0];
+ mhi_dev_ctxt->ev_ring_props[i].msi_vec = dt_configs[1];
+ mhi_dev_ctxt->ev_ring_props[i].intmod = dt_configs[2];
+ mhi_dev_ctxt->ev_ring_props[i].chan = dt_configs[3];
+ mhi_dev_ctxt->ev_ring_props[i].flags = dt_configs[4];
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "ev ring %d,desc:0x%x,msi:0x%x,intmod%d chan:%u flags0x%x\n",
+ i,
+ mhi_dev_ctxt->ev_ring_props[i].nr_desc,
+ mhi_dev_ctxt->ev_ring_props[i].msi_vec,
+ mhi_dev_ctxt->ev_ring_props[i].intmod,
+ mhi_dev_ctxt->ev_ring_props[i].chan,
+ mhi_dev_ctxt->ev_ring_props[i].flags);
if (GET_EV_PROPS(EV_MANAGED,
mhi_dev_ctxt->ev_ring_props[i].flags))
mhi_dev_ctxt->ev_ring_props[i].mhi_handler_ptr =
@@ -76,14 +90,18 @@ int mhi_populate_event_cfg(struct mhi_device_ctxt *mhi_dev_ctxt)
mhi_dev_ctxt->ev_ring_props[i].class = MHI_SW_RING;
mhi_dev_ctxt->mmio_info.nr_sw_event_rings++;
}
- mhi_log(MHI_MSG_INFO,
- "Detected %d SW EV rings and %d HW EV rings out of %d EV rings\n",
- mhi_dev_ctxt->mmio_info.nr_sw_event_rings,
- mhi_dev_ctxt->mmio_info.nr_hw_event_rings,
- mhi_dev_ctxt->mmio_info.nr_event_rings);
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "Detected %d SW EV rings and %d HW EV rings out of %d EV rings\n",
+ mhi_dev_ctxt->mmio_info.nr_sw_event_rings,
+ mhi_dev_ctxt->mmio_info.nr_hw_event_rings,
+ mhi_dev_ctxt->mmio_info.nr_event_rings);
}
+
+ return 0;
dt_error:
- return r;
+ kfree(mhi_dev_ctxt->ev_ring_props);
+ mhi_dev_ctxt->ev_ring_props = NULL;
+ return -EINVAL;
}
int create_local_ev_ctxt(struct mhi_device_ctxt *mhi_dev_ctxt)
@@ -195,7 +213,7 @@ int init_local_ev_ring_by_type(struct mhi_device_ctxt *mhi_dev_ctxt,
int ret_val = 0;
u32 i;
- mhi_log(MHI_MSG_INFO, "Entered\n");
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO, "Entered\n");
for (i = 0; i < mhi_dev_ctxt->mmio_info.nr_event_rings; i++) {
if (GET_EV_PROPS(EV_TYPE,
mhi_dev_ctxt->ev_ring_props[i].flags) == type &&
@@ -207,9 +225,10 @@ int init_local_ev_ring_by_type(struct mhi_device_ctxt *mhi_dev_ctxt,
return ret_val;
}
ring_ev_db(mhi_dev_ctxt, i);
- mhi_log(MHI_MSG_INFO, "Finished ev ring init %d\n", i);
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "Finished ev ring init %d\n", i);
}
- mhi_log(MHI_MSG_INFO, "Exited\n");
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO, "Exited\n");
return 0;
}
@@ -228,7 +247,7 @@ int mhi_add_elements_to_event_rings(struct mhi_device_ctxt *mhi_dev_ctxt,
MHI_ER_DATA_TYPE);
break;
default:
- mhi_log(MHI_MSG_ERROR,
+ mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
"Unrecognized event stage, %d\n", new_state);
ret_val = -EINVAL;
break;
@@ -247,23 +266,18 @@ int mhi_init_local_event_ring(struct mhi_device_ctxt *mhi_dev_ctxt,
&mhi_dev_ctxt->mhi_local_event_ctxt[ring_index];
spinlock_t *lock = &event_ctxt->ring_lock;
- if (NULL == mhi_dev_ctxt || 0 == nr_ev_el) {
- mhi_log(MHI_MSG_ERROR, "Bad Input data, quitting\n");
- return -EINVAL;
- }
-
spin_lock_irqsave(lock, flags);
- mhi_log(MHI_MSG_INFO, "mmio_addr = 0x%p, mmio_len = 0x%llx\n",
- mhi_dev_ctxt->mmio_info.mmio_addr,
- mhi_dev_ctxt->mmio_info.mmio_len);
- mhi_log(MHI_MSG_INFO, "Initializing event ring %d with %d desc\n",
- ring_index, nr_ev_el);
+ mhi_log(mhi_dev_ctxt,
+ MHI_MSG_INFO,
+ "Initializing event ring %d with %d desc\n",
+ ring_index,
+ nr_ev_el);
for (i = 0; i < nr_ev_el - 1; ++i) {
ret_val = ctxt_add_element(event_ctxt, (void *)&ev_pkt);
if (0 != ret_val) {
- mhi_log(MHI_MSG_ERROR,
+ mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
"Failed to insert el in ev ctxt\n");
break;
}
@@ -279,7 +293,8 @@ void mhi_reset_ev_ctxt(struct mhi_device_ctxt *mhi_dev_ctxt,
struct mhi_event_ctxt *ev_ctxt;
struct mhi_ring *local_ev_ctxt;
- mhi_log(MHI_MSG_VERBOSE, "Resetting event index %d\n", index);
+ mhi_log(mhi_dev_ctxt, MHI_MSG_VERBOSE,
+ "Resetting event index %d\n", index);
ev_ctxt =
&mhi_dev_ctxt->dev_space.ring_ctxt.ec_list[index];
local_ev_ctxt =
diff --git a/drivers/platform/msm/mhi/mhi_iface.c b/drivers/platform/msm/mhi/mhi_iface.c
index 395e19c91f35..606388e78dbd 100644
--- a/drivers/platform/msm/mhi/mhi_iface.c
+++ b/drivers/platform/msm/mhi/mhi_iface.c
@@ -32,12 +32,11 @@
#include "mhi_hwio.h"
#include "mhi_bhi.h"
-struct mhi_pcie_devices mhi_devices;
+struct mhi_device_driver *mhi_device_drv;
static int mhi_pci_probe(struct pci_dev *pcie_device,
const struct pci_device_id *mhi_device_id);
static int __exit mhi_plat_remove(struct platform_device *pdev);
-void *mhi_ipc_log;
static DEFINE_PCI_DEVICE_TABLE(mhi_pcie_device_id) = {
{ MHI_PCIE_VENDOR_ID, MHI_PCIE_DEVICE_ID_9x35,
@@ -59,129 +58,73 @@ static const struct of_device_id mhi_plat_match[] = {
static void mhi_msm_fixup(struct pci_dev *pcie_device)
{
if (pcie_device->class == PCI_CLASS_NOT_DEFINED) {
- mhi_log(MHI_MSG_INFO, "Setting msm pcie class\n");
pcie_device->class = PCI_CLASS_STORAGE_SCSI;
}
}
-int mhi_ctxt_init(struct mhi_pcie_dev_info *mhi_pcie_dev)
+int mhi_ctxt_init(struct mhi_device_ctxt *mhi_dev_ctxt)
{
int ret_val = 0;
- u32 i = 0, j = 0;
- u32 requested_msi_number = 32, actual_msi_number = 0;
- struct mhi_device_ctxt *mhi_dev_ctxt = NULL;
- struct pci_dev *pcie_device = NULL;
+ u32 j = 0;
- if (NULL == mhi_pcie_dev)
- return -EINVAL;
- pcie_device = mhi_pcie_dev->pcie_device;
-
- ret_val = mhi_init_pcie_device(mhi_pcie_dev);
- if (ret_val) {
- mhi_log(MHI_MSG_CRITICAL,
- "Failed to initialize pcie device, ret %d\n",
- ret_val);
- return -ENODEV;
- }
- ret_val = mhi_init_device_ctxt(mhi_pcie_dev, &mhi_pcie_dev->mhi_ctxt);
+ ret_val = mhi_init_device_ctxt(mhi_dev_ctxt);
if (ret_val) {
- mhi_log(MHI_MSG_CRITICAL,
- "Failed to initialize main MHI ctxt ret %d\n",
- ret_val);
- goto msi_config_err;
- }
- ret_val = mhi_esoc_register(&mhi_pcie_dev->mhi_ctxt);
- if (ret_val) {
- mhi_log(MHI_MSG_ERROR,
- "Failed to register with esoc ret %d.\n",
- ret_val);
- }
-
- device_disable_async_suspend(&pcie_device->dev);
- ret_val = pci_enable_msi_range(pcie_device, 1, requested_msi_number);
- if (IS_ERR_VALUE(ret_val)) {
- mhi_log(MHI_MSG_ERROR,
- "Failed to enable MSIs for pcie dev ret_val %d.\n",
- ret_val);
- goto msi_config_err;
- } else if (ret_val) {
- mhi_log(MHI_MSG_INFO,
- "Hrmmm, got fewer MSIs than we requested. Requested %d, got %d.\n",
- requested_msi_number, ret_val);
- actual_msi_number = ret_val;
- } else {
- mhi_log(MHI_MSG_VERBOSE,
- "Got all requested MSIs, moving on\n");
+ mhi_log(mhi_dev_ctxt, MHI_MSG_CRITICAL,
+ "Failed to initialize main MHI ctxt ret %d\n", ret_val);
+ return ret_val;
}
- mhi_dev_ctxt = &mhi_pcie_dev->mhi_ctxt;
for (j = 0; j < mhi_dev_ctxt->mmio_info.nr_event_rings; j++) {
- mhi_log(MHI_MSG_VERBOSE,
- "MSI_number = %d, event ring number = %d\n",
- mhi_dev_ctxt->ev_ring_props[j].msi_vec, j);
-
- ret_val = request_irq(pcie_device->irq +
+ mhi_log(mhi_dev_ctxt, MHI_MSG_VERBOSE,
+ "MSI_number = %d, event ring number = %d\n",
+ mhi_dev_ctxt->ev_ring_props[j].msi_vec, j);
+
+ /* outside of requested irq boundary */
+ if (mhi_dev_ctxt->core.max_nr_msis <=
+ mhi_dev_ctxt->ev_ring_props[j].msi_vec) {
+ mhi_log(mhi_dev_ctxt, MHI_MSG_CRITICAL,
+ "max msi supported:%d request:%d ev:%d\n",
+ mhi_dev_ctxt->core.max_nr_msis,
+ mhi_dev_ctxt->ev_ring_props[j].msi_vec,
+ j);
+ goto irq_error;
+ }
+ ret_val = request_irq(mhi_dev_ctxt->core.irq_base +
mhi_dev_ctxt->ev_ring_props[j].msi_vec,
mhi_dev_ctxt->ev_ring_props[j].mhi_handler_ptr,
IRQF_NO_SUSPEND,
"mhi_drv",
- (void *)&pcie_device->dev);
+ (void *)mhi_dev_ctxt);
if (ret_val) {
- mhi_log(MHI_MSG_ERROR,
- "Failed to register handler for MSI ret_val = %d\n",
- ret_val);
- goto msi_config_err;
+ mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
+ "Failed to register handler for MSI ret_val = %d\n",
+ ret_val);
+ goto irq_error;
}
}
- mhi_pcie_dev->core.irq_base = pcie_device->irq;
- mhi_log(MHI_MSG_VERBOSE,
- "Setting IRQ Base to 0x%x\n", mhi_pcie_dev->core.irq_base);
- mhi_pcie_dev->core.max_nr_msis = requested_msi_number;
- ret_val = mhi_init_pm_sysfs(&pcie_device->dev);
- if (ret_val) {
- mhi_log(MHI_MSG_ERROR, "Failed to setup sysfs ret %d\n",
- ret_val);
- goto sysfs_config_err;
- }
- if (!mhi_init_debugfs(&mhi_pcie_dev->mhi_ctxt))
- mhi_log(MHI_MSG_ERROR, "Failed to init debugfs.\n");
-
- mhi_pcie_dev->mhi_ctxt.mmio_info.mmio_addr =
- mhi_pcie_dev->core.bar0_base;
- pcie_device->dev.platform_data = &mhi_pcie_dev->mhi_ctxt;
- mhi_pcie_dev->mhi_ctxt.dev_info->plat_dev->dev.platform_data =
- &mhi_pcie_dev->mhi_ctxt;
- ret_val = mhi_reg_notifiers(&mhi_pcie_dev->mhi_ctxt);
- if (ret_val) {
- mhi_log(MHI_MSG_ERROR, "Failed to register for notifiers\n");
- goto mhi_state_transition_error;
- }
- mhi_log(MHI_MSG_INFO,
- "Finished all driver probing returning ret_val %d.\n",
- ret_val);
- return ret_val;
-mhi_state_transition_error:
+ mhi_dev_ctxt->mmio_info.mmio_addr = mhi_dev_ctxt->core.bar0_base;
+
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO, "exit\n");
+ return 0;
+
+irq_error:
kfree(mhi_dev_ctxt->state_change_work_item_list.q_lock);
kfree(mhi_dev_ctxt->mhi_ev_wq.mhi_event_wq);
kfree(mhi_dev_ctxt->mhi_ev_wq.state_change_event);
kfree(mhi_dev_ctxt->mhi_ev_wq.m0_event);
kfree(mhi_dev_ctxt->mhi_ev_wq.m3_event);
kfree(mhi_dev_ctxt->mhi_ev_wq.bhi_event);
- dma_free_coherent(&mhi_dev_ctxt->dev_info->plat_dev->dev,
+ dma_free_coherent(&mhi_dev_ctxt->plat_dev->dev,
mhi_dev_ctxt->dev_space.dev_mem_len,
mhi_dev_ctxt->dev_space.dev_mem_start,
mhi_dev_ctxt->dev_space.dma_dev_mem_start);
kfree(mhi_dev_ctxt->ev_ring_props);
- mhi_rem_pm_sysfs(&pcie_device->dev);
-sysfs_config_err:
- for (; i >= 0; --i)
- free_irq(pcie_device->irq + i, &pcie_device->dev);
- debugfs_remove_recursive(mhi_pcie_dev->mhi_ctxt.mhi_parent_folder);
-msi_config_err:
- pci_disable_device(pcie_device);
- return ret_val;
+ for (j = j - 1; j >= 0; --j)
+ free_irq(mhi_dev_ctxt->core.irq_base + j, NULL);
+
+ return -EINVAL;
}
static const struct dev_pm_ops pm_ops = {
@@ -204,22 +147,47 @@ static int mhi_pci_probe(struct pci_dev *pcie_device,
const struct pci_device_id *mhi_device_id)
{
int ret_val = 0;
- struct mhi_pcie_dev_info *mhi_pcie_dev = NULL;
struct platform_device *plat_dev;
- struct mhi_device_ctxt *mhi_dev_ctxt;
- u32 nr_dev = mhi_devices.nr_of_devices;
+ struct mhi_device_ctxt *mhi_dev_ctxt = NULL, *itr;
+ u32 domain = pci_domain_nr(pcie_device->bus);
+ u32 bus = pcie_device->bus->number;
+ u32 dev_id = pcie_device->device;
+ u32 slot = PCI_SLOT(pcie_device->devfn);
+ unsigned long msi_requested, msi_required;
+ struct msm_pcie_register_event *mhi_pci_link_event;
+
+ /* Find correct device context based on bdf & dev_id */
+ mutex_lock(&mhi_device_drv->lock);
+ list_for_each_entry(itr, &mhi_device_drv->head, node) {
+ struct pcie_core_info *core = &itr->core;
+
+ if (core->domain == domain &&
+ core->bus == bus &&
+ core->dev_id == dev_id &&
+ core->slot == slot) {
+ mhi_dev_ctxt = itr;
+ break;
+ }
+ }
+ mutex_unlock(&mhi_device_drv->lock);
+ if (!mhi_dev_ctxt)
+ return -EPROBE_DEFER;
- mhi_log(MHI_MSG_INFO, "Entering\n");
- mhi_pcie_dev = &mhi_devices.device_list[mhi_devices.nr_of_devices];
- if (mhi_devices.nr_of_devices + 1 > MHI_MAX_SUPPORTED_DEVICES) {
- mhi_log(MHI_MSG_ERROR, "Error: Too many devices\n");
- return -ENOMEM;
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "Processing Domain:%02u Bus:%04u dev:0x%04x slot:%04u\n",
+ domain, bus, dev_id, slot);
+
+ ret_val = of_property_read_u32(mhi_dev_ctxt->plat_dev->dev.of_node,
+ "mhi-event-rings",
+ (u32 *)&msi_required);
+ if (ret_val) {
+ mhi_log(mhi_dev_ctxt, MHI_MSG_CRITICAL,
+ "Failed to pull ev ring info from DT, %d\n", ret_val);
+ return ret_val;
}
- mhi_devices.nr_of_devices++;
- plat_dev = mhi_devices.device_list[nr_dev].plat_dev;
+ plat_dev = mhi_dev_ctxt->plat_dev;
pcie_device->dev.of_node = plat_dev->dev.of_node;
- mhi_dev_ctxt = &mhi_devices.device_list[nr_dev].mhi_ctxt;
mhi_dev_ctxt->mhi_pm_state = MHI_PM_DISABLE;
INIT_WORK(&mhi_dev_ctxt->process_m1_worker, process_m1_transition);
mutex_init(&mhi_dev_ctxt->pm_lock);
@@ -230,47 +198,105 @@ static int mhi_pci_probe(struct pci_dev *pcie_device,
(unsigned long)mhi_dev_ctxt);
mhi_dev_ctxt->flags.link_up = 1;
- ret_val = mhi_set_bus_request(mhi_dev_ctxt, 1);
- mhi_pcie_dev->pcie_device = pcie_device;
- mhi_pcie_dev->mhi_pcie_driver = &mhi_pcie_driver;
- mhi_pcie_dev->mhi_pci_link_event.events =
- (MSM_PCIE_EVENT_LINKDOWN | MSM_PCIE_EVENT_WAKEUP);
- mhi_pcie_dev->mhi_pci_link_event.user = pcie_device;
- mhi_pcie_dev->mhi_pci_link_event.callback = mhi_link_state_cb;
- mhi_pcie_dev->mhi_pci_link_event.notify.data = mhi_pcie_dev;
- ret_val = msm_pcie_register_event(&mhi_pcie_dev->mhi_pci_link_event);
+
+ /* Setup bus scale */
+ mhi_dev_ctxt->bus_scale_table = msm_bus_cl_get_pdata(plat_dev);
+ if (!mhi_dev_ctxt->bus_scale_table)
+ return -ENODATA;
+ mhi_dev_ctxt->bus_client = msm_bus_scale_register_client
+ (mhi_dev_ctxt->bus_scale_table);
+ if (!mhi_dev_ctxt->bus_client)
+ return -EINVAL;
+ mhi_set_bus_request(mhi_dev_ctxt, 1);
+
+ mhi_dev_ctxt->pcie_device = pcie_device;
+
+ mhi_pci_link_event = &mhi_dev_ctxt->mhi_pci_link_event;
+ mhi_pci_link_event->events =
+ (MSM_PCIE_EVENT_LINKDOWN | MSM_PCIE_EVENT_WAKEUP);
+ mhi_pci_link_event->user = pcie_device;
+ mhi_pci_link_event->callback = mhi_link_state_cb;
+ mhi_pci_link_event->notify.data = mhi_dev_ctxt;
+ ret_val = msm_pcie_register_event(mhi_pci_link_event);
+ if (ret_val) {
+ mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
+ "Failed to reg for link notifications %d\n", ret_val);
+ return ret_val;
+ }
+
+ dev_set_drvdata(&pcie_device->dev, mhi_dev_ctxt);
+
+ mhi_dev_ctxt->core.pci_master = true;
+ ret_val = mhi_init_pcie_device(mhi_dev_ctxt);
if (ret_val) {
- mhi_log(MHI_MSG_ERROR,
- "Failed to register for link notifications %d.\n",
+ mhi_log(mhi_dev_ctxt,
+ MHI_MSG_CRITICAL,
+ "Failed to initialize pcie device, ret %d\n",
ret_val);
return ret_val;
}
+ pci_set_master(pcie_device);
+ device_disable_async_suspend(&pcie_device->dev);
- /* Initialize MHI CNTXT */
- ret_val = mhi_ctxt_init(mhi_pcie_dev);
+ ret_val = mhi_esoc_register(mhi_dev_ctxt);
if (ret_val) {
- mhi_log(MHI_MSG_ERROR,
- "MHI Initialization failed, ret %d\n",
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "Failed to reg with esoc ret %d\n", ret_val);
+ }
+
+ /* # of MSI requested must be power of 2 */
+ msi_requested = 1 << find_last_bit(&msi_required, 32);
+ if (msi_requested < msi_required)
+ msi_requested <<= 1;
+
+ ret_val = pci_enable_msi_range(pcie_device, 1, msi_requested);
+ if (IS_ERR_VALUE(ret_val) || (ret_val < msi_requested)) {
+ mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
+ "Failed to enable MSIs for pcie dev ret_val %d.\n",
ret_val);
+ return -EIO;
+ }
+
+ mhi_dev_ctxt->core.max_nr_msis = msi_requested;
+ mhi_dev_ctxt->core.irq_base = pcie_device->irq;
+ mhi_log(mhi_dev_ctxt, MHI_MSG_VERBOSE,
+ "Setting IRQ Base to 0x%x\n", mhi_dev_ctxt->core.irq_base);
+
+ /* Initialize MHI CNTXT */
+ ret_val = mhi_ctxt_init(mhi_dev_ctxt);
+ if (ret_val) {
+ mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
+ "MHI Initialization failed, ret %d\n", ret_val);
goto deregister_pcie;
}
- pci_set_master(mhi_pcie_dev->pcie_device);
+
+ mhi_init_pm_sysfs(&pcie_device->dev);
+ mhi_init_debugfs(mhi_dev_ctxt);
+ mhi_reg_notifiers(mhi_dev_ctxt);
+
+ /* setup shadow pm functions */
+ mhi_dev_ctxt->assert_wake = mhi_assert_device_wake;
+ mhi_dev_ctxt->deassert_wake = mhi_deassert_device_wake;
+ mhi_dev_ctxt->runtime_get = mhi_runtime_get;
+ mhi_dev_ctxt->runtime_put = mhi_runtime_put;
mutex_lock(&mhi_dev_ctxt->pm_lock);
write_lock_irq(&mhi_dev_ctxt->pm_xfer_lock);
mhi_dev_ctxt->mhi_pm_state = MHI_PM_POR;
- ret_val = set_mhi_base_state(mhi_pcie_dev);
+ ret_val = set_mhi_base_state(mhi_dev_ctxt);
+
write_unlock_irq(&mhi_dev_ctxt->pm_xfer_lock);
if (ret_val) {
- mhi_log(MHI_MSG_ERROR,
+ mhi_log(mhi_dev_ctxt,
+ MHI_MSG_ERROR,
"Error Setting MHI Base State %d\n", ret_val);
goto unlock_pm_lock;
}
if (mhi_dev_ctxt->base_state == STATE_TRANSITION_BHI) {
- ret_val = bhi_probe(mhi_pcie_dev);
+ ret_val = bhi_probe(mhi_dev_ctxt);
if (ret_val) {
- mhi_log(MHI_MSG_ERROR,
+ mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
"Error with bhi_probe ret:%d", ret_val);
goto unlock_pm_lock;
}
@@ -312,33 +338,99 @@ static int mhi_pci_probe(struct pci_dev *pcie_device,
unlock_pm_lock:
mutex_unlock(&mhi_dev_ctxt->pm_lock);
deregister_pcie:
- msm_pcie_deregister_event(&mhi_pcie_dev->mhi_pci_link_event);
+ msm_pcie_deregister_event(&mhi_dev_ctxt->mhi_pci_link_event);
return ret_val;
}
static int mhi_plat_probe(struct platform_device *pdev)
{
- u32 nr_dev = mhi_devices.nr_of_devices;
+ int r = 0, len;
struct mhi_device_ctxt *mhi_dev_ctxt;
- int r = 0;
+ struct pcie_core_info *core;
+ char node[32];
+ struct device_node *of_node = pdev->dev.of_node;
+ u64 address_window[2];
- mhi_log(MHI_MSG_INFO, "Entered\n");
- mhi_dev_ctxt = &mhi_devices.device_list[nr_dev].mhi_ctxt;
+ if (of_node == NULL)
+ return -ENODEV;
- mhi_dev_ctxt->bus_scale_table = msm_bus_cl_get_pdata(pdev);
- if (!mhi_dev_ctxt->bus_scale_table)
- return -ENODATA;
- mhi_dev_ctxt->bus_client = msm_bus_scale_register_client
- (mhi_dev_ctxt->bus_scale_table);
- if (!mhi_dev_ctxt->bus_client)
- return -EINVAL;
+ pdev->id = of_alias_get_id(of_node, "mhi");
+ if (pdev->id < 0)
+ return -ENODEV;
- mhi_devices.device_list[nr_dev].plat_dev = pdev;
- r = dma_set_mask(&pdev->dev, MHI_DMA_MASK);
+ mhi_dev_ctxt = devm_kzalloc(&pdev->dev,
+ sizeof(*mhi_dev_ctxt),
+ GFP_KERNEL);
+ if (!mhi_dev_ctxt)
+ return -ENOMEM;
+
+ if (!of_find_property(of_node, "qcom,mhi-address-window", &len))
+ return -ENODEV;
+
+ if (len != sizeof(address_window))
+ return -ENODEV;
+
+ r = of_property_read_u64_array(of_node,
+ "qcom,mhi-address-window",
+ address_window,
+ sizeof(address_window) / sizeof(u64));
if (r)
- mhi_log(MHI_MSG_CRITICAL,
+ return r;
+
+ core = &mhi_dev_ctxt->core;
+ r = of_property_read_u32(of_node, "qcom,pci-dev_id", &core->dev_id);
+ if (r)
+ return r;
+
+ r = of_property_read_u32(of_node, "qcom,pci-slot", &core->slot);
+ if (r)
+ return r;
+
+ r = of_property_read_u32(of_node, "qcom,pci-domain", &core->domain);
+ if (r)
+ return r;
+
+ r = of_property_read_u32(of_node, "qcom,pci-bus", &core->bus);
+ if (r)
+ return r;
+
+ snprintf(node, sizeof(node),
+ "mhi_%04x_%02u.%02u.%02u",
+ core->dev_id, core->domain, core->bus, core->slot);
+ mhi_dev_ctxt->mhi_ipc_log =
+ ipc_log_context_create(MHI_IPC_LOG_PAGES, node, 0);
+ if (!mhi_dev_ctxt->mhi_ipc_log)
+ pr_err("%s: Error creating ipc_log buffer\n", __func__);
+
+ r = of_property_read_u32(of_node, "qcom,mhi-ready-timeout",
+ &mhi_dev_ctxt->poll_reset_timeout_ms);
+ if (r)
+ mhi_dev_ctxt->poll_reset_timeout_ms =
+ MHI_READY_STATUS_TIMEOUT_MS;
+
+ mhi_dev_ctxt->dev_space.start_win_addr = address_window[0];
+ mhi_dev_ctxt->dev_space.end_win_addr = address_window[1];
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "Start Addr:0x%llx End_Addr:0x%llx\n",
+ mhi_dev_ctxt->dev_space.start_win_addr,
+ mhi_dev_ctxt->dev_space.end_win_addr);
+
+ mhi_dev_ctxt->plat_dev = pdev;
+ platform_set_drvdata(pdev, mhi_dev_ctxt);
+
+ r = dma_set_mask(&pdev->dev, MHI_DMA_MASK);
+ if (r) {
+ mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
"Failed to set mask for DMA ret %d\n", r);
- mhi_log(MHI_MSG_INFO, "Exited\n");
+ return r;
+ }
+
+ mhi_dev_ctxt->parent = mhi_device_drv->parent;
+ mutex_lock(&mhi_device_drv->lock);
+ list_add_tail(&mhi_dev_ctxt->node, &mhi_device_drv->head);
+ mutex_unlock(&mhi_device_drv->lock);
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO, "Exited\n");
+
return 0;
}
@@ -354,42 +446,59 @@ static struct platform_driver mhi_plat_driver = {
static void __exit mhi_exit(void)
{
- ipc_log_context_destroy(mhi_ipc_log);
pci_unregister_driver(&mhi_pcie_driver);
platform_driver_unregister(&mhi_plat_driver);
}
static int __exit mhi_plat_remove(struct platform_device *pdev)
{
- platform_driver_unregister(&mhi_plat_driver);
+ struct mhi_device_ctxt *mhi_dev_ctxt = platform_get_drvdata(pdev);
+
+ ipc_log_context_destroy(mhi_dev_ctxt->mhi_ipc_log);
return 0;
}
static int __init mhi_init(void)
{
int r;
+ struct mhi_device_driver *mhi_dev_drv;
+
+ mhi_dev_drv = kmalloc(sizeof(*mhi_dev_drv), GFP_KERNEL);
+ if (mhi_dev_drv == NULL)
+ return -ENOMEM;
+
+ mutex_init(&mhi_dev_drv->lock);
+ mutex_lock(&mhi_dev_drv->lock);
+ INIT_LIST_HEAD(&mhi_dev_drv->head);
+ mutex_unlock(&mhi_dev_drv->lock);
+ mhi_dev_drv->mhi_bhi_class = class_create(THIS_MODULE, "bhi");
+ if (IS_ERR(mhi_dev_drv->mhi_bhi_class)) {
+ pr_err("Error creating mhi_bhi_class\n");
+ goto class_error;
+ }
+ mhi_dev_drv->parent = debugfs_create_dir("mhi", NULL);
+ mhi_device_drv = mhi_dev_drv;
- mhi_log(MHI_MSG_INFO, "Entered\n");
r = platform_driver_register(&mhi_plat_driver);
if (r) {
- mhi_log(MHI_MSG_INFO, "Failed to probe platform ret %d\n", r);
- return r;
+ pr_err("%s: Failed to probe platform ret %d\n", __func__, r);
+ goto platform_error;
}
r = pci_register_driver(&mhi_pcie_driver);
if (r) {
- mhi_log(MHI_MSG_INFO,
- "Failed to register pcie drv ret %d\n", r);
+ pr_err("%s: Failed to register pcie drv ret %d\n", __func__, r);
goto error;
}
- mhi_ipc_log = ipc_log_context_create(MHI_IPC_LOG_PAGES, "mhi", 0);
- if (!mhi_ipc_log) {
- mhi_log(MHI_MSG_ERROR,
- "Failed to create IPC logging context\n");
- }
- mhi_log(MHI_MSG_INFO, "Exited\n");
+
return 0;
error:
- pci_unregister_driver(&mhi_pcie_driver);
+ platform_driver_unregister(&mhi_plat_driver);
+platform_error:
+ class_destroy(mhi_device_drv->mhi_bhi_class);
+
+class_error:
+ kfree(mhi_dev_drv);
+ mhi_device_drv = NULL;
return r;
}
diff --git a/drivers/platform/msm/mhi/mhi_init.c b/drivers/platform/msm/mhi/mhi_init.c
index a496c81239bf..e6bf75d9d7e7 100644
--- a/drivers/platform/msm/mhi/mhi_init.c
+++ b/drivers/platform/msm/mhi/mhi_init.c
@@ -53,12 +53,12 @@ size_t calculate_mhi_space(struct mhi_device_ctxt *mhi_dev_ctxt)
(NR_OF_CMD_RINGS * sizeof(struct mhi_chan_ctxt)) +
(mhi_dev_ctxt->mmio_info.nr_event_rings *
sizeof(struct mhi_event_ctxt));
- mhi_log(MHI_MSG_INFO, "Reserved %zd bytes for context info\n",
- mhi_dev_mem);
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "Reserved %zd bytes for context info\n", mhi_dev_mem);
/*Calculate size needed for cmd TREs */
mhi_dev_mem += (CMD_EL_PER_RING * sizeof(union mhi_cmd_pkt));
- mhi_log(MHI_MSG_INFO, "Final bytes for MHI device space %zd\n",
- mhi_dev_mem);
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "Final bytes for MHI device space %zd\n", mhi_dev_mem);
return mhi_dev_mem;
}
@@ -105,23 +105,6 @@ void init_local_chan_ctxt(struct mhi_ring *chan_ctxt,
chan_ctxt->overwrite_en = 0;
}
-int populate_bb_list(struct list_head *bb_list, int num_bb)
-{
- struct mhi_buf_info *mhi_buf = NULL;
- int i;
-
- for (i = 0; i < num_bb; ++i) {
- mhi_buf = kzalloc(sizeof(struct mhi_buf_info), GFP_KERNEL);
- if (!mhi_buf)
- return -ENOMEM;
- mhi_buf->bb_p_addr = 0;
- mhi_buf->bb_v_addr = NULL;
- mhi_log(MHI_MSG_INFO,
- "Allocated BB v_addr 0x%p, p_addr 0x%llx\n",
- mhi_buf->bb_v_addr, (u64)mhi_buf->bb_p_addr);
- }
- return 0;
-}
/**
* mhi_cmd_ring_init- Initialization of the command ring
*
@@ -153,91 +136,6 @@ static int mhi_cmd_ring_init(struct mhi_cmd_ctxt *cmd_ctxt,
return 0;
}
-/*
- * The device can have severe addressing limitations, and in this case
- * the MHI driver may be restricted on where memory can be allocated.
- *
- * The allocation of the MHI control data structures takes place as one
- * big, physically contiguous allocation.
- * The device's addressing window, must be placed around that control segment
- * allocation.
- * Here we attempt to do this by building an addressing window around the
- * initial allocated control segment.
- *
- * The window size is specified by the device and must be contiguous,
- * but depending on where the control segment was allocated, it may be
- * necessary to leave more room, before the ctrl segment start or after
- * the ctrl segment end.
- * The following assumptions are made:
- * Assumption: 1. size of allocated ctrl seg << (device allocation window / 2)
- * 2. allocated ctrl seg is physically contiguous
- */
-static int calculate_mhi_addressing_window(struct mhi_device_ctxt *mhi_dev_ctxt)
-{
- u64 dma_dev_mem_start = 0;
- u64 dma_seg_size = 0;
- u64 dma_max_addr = (dma_addr_t)(-1);
- u64 dev_address_limit = 0;
- int r = 0;
- const struct device_node *np =
- mhi_dev_ctxt->dev_info->plat_dev->dev.of_node;
-
- dma_dev_mem_start = mhi_dev_ctxt->dev_space.dma_dev_mem_start;
- r = of_property_read_u64(np, "mhi-dev-address-win-size",
- &dev_address_limit);
- if (r) {
- mhi_log(MHI_MSG_ERROR,
- "Failed to get device addressing limit ret %d",
- r);
- return r;
- }
- /* Mask off the last 3 bits for address calculation */
- dev_address_limit &= ~0x7;
- mhi_log(MHI_MSG_INFO, "Device Addressing limit 0x%llx\n",
- dev_address_limit);
- dma_seg_size = dev_address_limit / 2;
-
- /*
- * The region of the allocated control segment is within the
- * first half of the device's addressing limit
- */
- if (dma_dev_mem_start < dma_seg_size) {
- mhi_dev_ctxt->dev_space.start_win_addr = 0;
- mhi_dev_ctxt->dev_space.end_win_addr =
- dma_dev_mem_start + dma_seg_size +
- (dma_seg_size - dma_dev_mem_start);
- } else if (dma_dev_mem_start >= dma_seg_size &&
- dma_dev_mem_start <= (dma_max_addr - dma_seg_size)) {
- /*
- * The start of the control segment is located past
- * halfway point of the device's addressing limit
- * Place the control segment in the middle of the device's
- * addressing range
- */
- mhi_dev_ctxt->dev_space.start_win_addr =
- dma_dev_mem_start - dma_seg_size;
- mhi_dev_ctxt->dev_space.end_win_addr =
- dma_dev_mem_start + dma_seg_size;
- } else if (dma_dev_mem_start > (dma_max_addr - dma_seg_size)) {
- /*
- * The start of the control segment is located at the tail end
- * of the host addressing space. Leave extra addressing space
- * at window start
- */
- mhi_dev_ctxt->dev_space.start_win_addr = dma_dev_mem_start;
- mhi_dev_ctxt->dev_space.start_win_addr -=
- dma_seg_size + (dma_seg_size -
- (dma_max_addr - dma_dev_mem_start));
- mhi_dev_ctxt->dev_space.end_win_addr = dma_max_addr;
- }
- mhi_log(MHI_MSG_INFO,
- "MHI start address at 0x%llx, Window Start 0x%llx Window End 0x%llx\n",
- (u64)dma_dev_mem_start,
- (u64)mhi_dev_ctxt->dev_space.start_win_addr,
- (u64)mhi_dev_ctxt->dev_space.end_win_addr);
- return 0;
-}
-
int init_mhi_dev_mem(struct mhi_device_ctxt *mhi_dev_ctxt)
{
size_t mhi_mem_index = 0, ring_len;
@@ -249,12 +147,12 @@ int init_mhi_dev_mem(struct mhi_device_ctxt *mhi_dev_ctxt)
calculate_mhi_space(mhi_dev_ctxt);
mhi_dev_ctxt->dev_space.dev_mem_start =
- dma_alloc_coherent(&mhi_dev_ctxt->dev_info->pcie_device->dev,
+ dma_alloc_coherent(&mhi_dev_ctxt->plat_dev->dev,
mhi_dev_ctxt->dev_space.dev_mem_len,
&mhi_dev_ctxt->dev_space.dma_dev_mem_start,
GFP_KERNEL);
if (!mhi_dev_ctxt->dev_space.dev_mem_start) {
- mhi_log(MHI_MSG_ERROR,
+ mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
"Failed to allocate memory of size %zd bytes\n",
mhi_dev_ctxt->dev_space.dev_mem_len);
return -ENOMEM;
@@ -263,26 +161,20 @@ int init_mhi_dev_mem(struct mhi_device_ctxt *mhi_dev_ctxt)
dma_dev_mem_start = mhi_dev_ctxt->dev_space.dma_dev_mem_start;
memset(dev_mem_start, 0, mhi_dev_ctxt->dev_space.dev_mem_len);
- r = calculate_mhi_addressing_window(mhi_dev_ctxt);
- if (r) {
- mhi_log(MHI_MSG_ERROR,
- "Failed to calculate addressing window ret %d", r);
- return r;
- }
-
- mhi_log(MHI_MSG_INFO, "Starting Seg address: virt 0x%p, dma 0x%llx\n",
- dev_mem_start, (u64)dma_dev_mem_start);
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "Starting Seg address: virt 0x%p, dma 0x%llx\n",
+ dev_mem_start, (u64)dma_dev_mem_start);
- mhi_log(MHI_MSG_INFO, "Initializing CCABAP at virt 0x%p, dma 0x%llx\n",
- dev_mem_start + mhi_mem_index,
- (u64)dma_dev_mem_start + mhi_mem_index);
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "Initializing CCABAP at dma 0x%llx\n",
+ (u64)dma_dev_mem_start + mhi_mem_index);
mhi_dev_ctxt->dev_space.ring_ctxt.cc_list = dev_mem_start;
mhi_dev_ctxt->dev_space.ring_ctxt.dma_cc_list = dma_dev_mem_start;
mhi_mem_index += MHI_MAX_CHANNELS * sizeof(struct mhi_chan_ctxt);
- mhi_log(MHI_MSG_INFO, "Initializing CRCBAP at virt 0x%p, dma 0x%llx\n",
- dev_mem_start + mhi_mem_index,
- (u64)dma_dev_mem_start + mhi_mem_index);
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "Initializing CRCBAP at dma 0x%llx\n",
+ (u64)dma_dev_mem_start + mhi_mem_index);
mhi_dev_ctxt->dev_space.ring_ctxt.cmd_ctxt =
dev_mem_start + mhi_mem_index;
@@ -290,9 +182,9 @@ int init_mhi_dev_mem(struct mhi_device_ctxt *mhi_dev_ctxt)
dma_dev_mem_start + mhi_mem_index;
mhi_mem_index += NR_OF_CMD_RINGS * sizeof(struct mhi_chan_ctxt);
- mhi_log(MHI_MSG_INFO, "Initializing ECABAP at virt 0x%p, dma 0x%llx\n",
- dev_mem_start + mhi_mem_index,
- (u64)dma_dev_mem_start + mhi_mem_index);
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "Initializing ECABAP at dma 0x%llx\n",
+ (u64)dma_dev_mem_start + mhi_mem_index);
mhi_dev_ctxt->dev_space.ring_ctxt.ec_list =
dev_mem_start + mhi_mem_index;
mhi_dev_ctxt->dev_space.ring_ctxt.dma_ec_list =
@@ -300,10 +192,9 @@ int init_mhi_dev_mem(struct mhi_device_ctxt *mhi_dev_ctxt)
mhi_mem_index += mhi_dev_ctxt->mmio_info.nr_event_rings *
sizeof(struct mhi_event_ctxt);
- mhi_log(MHI_MSG_INFO,
- "Initializing CMD context at virt 0x%p, dma 0x%llx\n",
- dev_mem_start + mhi_mem_index,
- (u64)dma_dev_mem_start + mhi_mem_index);
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "Initializing CMD context at dma 0x%llx\n",
+ (u64)dma_dev_mem_start + mhi_mem_index);
/* TODO: Initialize both the local and device cmd context */
ring_len = (CMD_EL_PER_RING * sizeof(union mhi_cmd_pkt));
@@ -322,7 +213,7 @@ int init_mhi_dev_mem(struct mhi_device_ctxt *mhi_dev_ctxt)
ring_len = sizeof(union mhi_event_pkt) *
mhi_dev_ctxt->ev_ring_props[i].nr_desc;
ring_addr = dma_alloc_coherent(
- &mhi_dev_ctxt->dev_info->pcie_device->dev,
+ &mhi_dev_ctxt->plat_dev->dev,
ring_len, &ring_dma_addr, GFP_KERNEL);
if (!ring_addr)
goto err_ev_alloc;
@@ -330,9 +221,9 @@ int init_mhi_dev_mem(struct mhi_device_ctxt *mhi_dev_ctxt)
ring_dma_addr, ring_len);
init_local_ev_ctxt(&mhi_dev_ctxt->mhi_local_event_ctxt[i],
ring_addr, ring_len);
- mhi_log(MHI_MSG_INFO,
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
"Initializing EV_%d TRE list at virt 0x%p dma 0x%llx\n",
- i, ring_addr, (u64)ring_dma_addr);
+ i, ring_addr, (u64)ring_dma_addr);
}
return 0;
@@ -344,12 +235,12 @@ err_ev_alloc:
dev_ev_ctxt = &mhi_dev_ctxt->dev_space.ring_ctxt.ec_list[i];
ev_ctxt = &mhi_dev_ctxt->mhi_local_event_ctxt[i];
- dma_free_coherent(&mhi_dev_ctxt->dev_info->pcie_device->dev,
+ dma_free_coherent(&mhi_dev_ctxt->plat_dev->dev,
ev_ctxt->len,
ev_ctxt->base,
dev_ev_ctxt->mhi_event_ring_base_addr);
}
- dma_free_coherent(&mhi_dev_ctxt->dev_info->pcie_device->dev,
+ dma_free_coherent(&mhi_dev_ctxt->plat_dev->dev,
mhi_dev_ctxt->dev_space.dev_mem_len,
mhi_dev_ctxt->dev_space.dev_mem_start,
mhi_dev_ctxt->dev_space.dma_dev_mem_start);
@@ -363,34 +254,34 @@ static int mhi_init_events(struct mhi_device_ctxt *mhi_dev_ctxt)
sizeof(wait_queue_head_t),
GFP_KERNEL);
if (NULL == mhi_dev_ctxt->mhi_ev_wq.mhi_event_wq) {
- mhi_log(MHI_MSG_ERROR, "Failed to init event");
+ mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR, "Failed to init event");
return -ENOMEM;
}
mhi_dev_ctxt->mhi_ev_wq.state_change_event =
kmalloc(sizeof(wait_queue_head_t), GFP_KERNEL);
if (NULL == mhi_dev_ctxt->mhi_ev_wq.state_change_event) {
- mhi_log(MHI_MSG_ERROR, "Failed to init event");
+ mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR, "Failed to init event");
goto error_event_handle_alloc;
}
/* Initialize the event which signals M0 */
mhi_dev_ctxt->mhi_ev_wq.m0_event = kmalloc(sizeof(wait_queue_head_t),
GFP_KERNEL);
if (NULL == mhi_dev_ctxt->mhi_ev_wq.m0_event) {
- mhi_log(MHI_MSG_ERROR, "Failed to init event");
+ mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR, "Failed to init event");
goto error_state_change_event_handle;
}
/* Initialize the event which signals M0 */
mhi_dev_ctxt->mhi_ev_wq.m3_event = kmalloc(sizeof(wait_queue_head_t),
GFP_KERNEL);
if (NULL == mhi_dev_ctxt->mhi_ev_wq.m3_event) {
- mhi_log(MHI_MSG_ERROR, "Failed to init event");
+ mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR, "Failed to init event");
goto error_m0_event;
}
/* Initialize the event which signals M0 */
mhi_dev_ctxt->mhi_ev_wq.bhi_event = kmalloc(sizeof(wait_queue_head_t),
GFP_KERNEL);
if (NULL == mhi_dev_ctxt->mhi_ev_wq.bhi_event) {
- mhi_log(MHI_MSG_ERROR, "Failed to init event");
+ mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR, "Failed to init event");
goto error_bhi_event;
}
/* Initialize the event which starts the event parsing thread */
@@ -468,57 +359,49 @@ static int mhi_spawn_threads(struct mhi_device_ctxt *mhi_dev_ctxt)
* All threads, events mutexes, mhi specific data structures
* are initialized here
*
- * @param dev_info [IN ] pcie struct device information structure to
- which this mhi context belongs
* @param mhi_struct device [IN/OUT] reference to a mhi context to be populated
*
* @return errno
*/
-int mhi_init_device_ctxt(struct mhi_pcie_dev_info *dev_info,
- struct mhi_device_ctxt *mhi_dev_ctxt)
+int mhi_init_device_ctxt(struct mhi_device_ctxt *mhi_dev_ctxt)
{
int r = 0;
- if (NULL == dev_info || NULL == mhi_dev_ctxt)
- return -EINVAL;
-
- mhi_log(MHI_MSG_VERBOSE, "Entered\n");
-
- mhi_dev_ctxt->dev_info = dev_info;
- mhi_dev_ctxt->dev_props = &dev_info->core;
+ mhi_log(mhi_dev_ctxt, MHI_MSG_VERBOSE, "Entered\n");
r = mhi_populate_event_cfg(mhi_dev_ctxt);
if (r) {
- mhi_log(MHI_MSG_ERROR,
+ mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
"Failed to get event ring properties ret %d\n", r);
goto error_during_props;
}
r = mhi_init_sync(mhi_dev_ctxt);
if (r) {
- mhi_log(MHI_MSG_ERROR, "Failed to initialize mhi sync\n");
+ mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
+ "Failed to initialize mhi sync\n");
goto error_during_sync;
}
r = create_local_ev_ctxt(mhi_dev_ctxt);
if (r) {
- mhi_log(MHI_MSG_ERROR,
+ mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
"Failed to initialize local event ctxt ret %d\n", r);
goto error_during_local_ev_ctxt;
}
r = init_mhi_dev_mem(mhi_dev_ctxt);
if (r) {
- mhi_log(MHI_MSG_ERROR,
+ mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
"Failed to initialize device memory ret %d\n", r);
goto error_during_dev_mem_init;
}
r = mhi_init_events(mhi_dev_ctxt);
if (r) {
- mhi_log(MHI_MSG_ERROR,
+ mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
"Failed to initialize mhi events ret %d\n", r);
goto error_wq_init;
}
r = mhi_reset_all_thread_queues(mhi_dev_ctxt);
if (r) {
- mhi_log(MHI_MSG_ERROR,
+ mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
"Failed to initialize work queues ret %d\n", r);
goto error_during_thread_init;
}
@@ -527,7 +410,8 @@ int mhi_init_device_ctxt(struct mhi_pcie_dev_info *dev_info,
r = mhi_spawn_threads(mhi_dev_ctxt);
if (r) {
- mhi_log(MHI_MSG_ERROR, "Failed to spawn threads ret %d\n", r);
+ mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
+ "Failed to spawn threads ret %d\n", r);
goto error_during_thread_spawn;
}
mhi_init_wakelock(mhi_dev_ctxt);
@@ -543,7 +427,7 @@ error_during_thread_init:
kfree(mhi_dev_ctxt->mhi_ev_wq.m3_event);
kfree(mhi_dev_ctxt->mhi_ev_wq.bhi_event);
error_wq_init:
- dma_free_coherent(&mhi_dev_ctxt->dev_info->pcie_device->dev,
+ dma_free_coherent(&mhi_dev_ctxt->plat_dev->dev,
mhi_dev_ctxt->dev_space.dev_mem_len,
mhi_dev_ctxt->dev_space.dev_mem_start,
mhi_dev_ctxt->dev_space.dma_dev_mem_start);
@@ -623,7 +507,8 @@ int mhi_reset_all_thread_queues(
ret_val = mhi_init_state_change_thread_work_queue(
&mhi_dev_ctxt->state_change_work_item_list);
if (ret_val)
- mhi_log(MHI_MSG_ERROR, "Failed to reset STT work queue\n");
+ mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
+ "Failed to reset STT work queue\n");
return ret_val;
}
diff --git a/drivers/platform/msm/mhi/mhi_isr.c b/drivers/platform/msm/mhi/mhi_isr.c
index d7c604419593..7c83c3f2b5e0 100644
--- a/drivers/platform/msm/mhi/mhi_isr.c
+++ b/drivers/platform/msm/mhi/mhi_isr.c
@@ -29,13 +29,14 @@ static int mhi_process_event_ring(
struct mhi_ring *local_ev_ctxt =
&mhi_dev_ctxt->mhi_local_event_ctxt[ev_index];
+ mhi_log(mhi_dev_ctxt, MHI_MSG_VERBOSE, "enter ev_index:%u\n", ev_index);
read_lock_bh(&mhi_dev_ctxt->pm_xfer_lock);
if (unlikely(mhi_dev_ctxt->mhi_pm_state == MHI_PM_DISABLE)) {
- mhi_log(MHI_MSG_ERROR, "Invalid MHI PM State\n");
+ mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR, "Invalid MHI PM State\n");
read_unlock_bh(&mhi_dev_ctxt->pm_xfer_lock);
return -EIO;
}
- mhi_assert_device_wake(mhi_dev_ctxt, false);
+ mhi_dev_ctxt->assert_wake(mhi_dev_ctxt, false);
read_unlock_bh(&mhi_dev_ctxt->pm_xfer_lock);
ev_ctxt = &mhi_dev_ctxt->dev_space.ring_ctxt.ec_list[ev_index];
@@ -77,10 +78,9 @@ static int mhi_process_event_ring(
&cmd_pkt, ev_index);
MHI_TRB_GET_INFO(CMD_TRB_CHID, cmd_pkt, chan);
cfg = &mhi_dev_ctxt->mhi_chan_cfg[chan];
- mhi_log(MHI_MSG_INFO,
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
"MHI CCE received ring 0x%x chan:%u\n",
- ev_index,
- chan);
+ ev_index, chan);
spin_lock_irqsave(&cfg->event_lock, flags);
cfg->cmd_pkt = *cmd_pkt;
cfg->cmd_event_pkt =
@@ -102,9 +102,8 @@ static int mhi_process_event_ring(
__pm_stay_awake(&mhi_dev_ctxt->w_lock);
chan = MHI_EV_READ_CHID(EV_CHID, &event_to_process);
if (unlikely(!VALID_CHAN_NR(chan))) {
- mhi_log(MHI_MSG_ERROR,
- "Invalid chan:%d\n",
- chan);
+ mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
+ "Invalid chan:%d\n", chan);
break;
}
ring = &mhi_dev_ctxt->mhi_local_chan_ctxt[chan];
@@ -122,10 +121,9 @@ static int mhi_process_event_ring(
enum STATE_TRANSITION new_state;
unsigned long flags;
new_state = MHI_READ_STATE(&event_to_process);
- mhi_log(MHI_MSG_INFO,
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
"MHI STE received ring 0x%x State:%s\n",
- ev_index,
- state_transition_str(new_state));
+ ev_index, state_transition_str(new_state));
/* If transitioning to M1 schedule worker thread */
if (new_state == STATE_TRANSITION_M1) {
@@ -152,9 +150,8 @@ static int mhi_process_event_ring(
{
enum STATE_TRANSITION new_state;
- mhi_log(MHI_MSG_INFO,
- "MHI EEE received ring 0x%x\n",
- ev_index);
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "MHI EEE received ring 0x%x\n", ev_index);
__pm_stay_awake(&mhi_dev_ctxt->w_lock);
__pm_relax(&mhi_dev_ctxt->w_lock);
switch (MHI_READ_EXEC_ENV(&event_to_process)) {
@@ -172,17 +169,17 @@ static int mhi_process_event_ring(
break;
}
case MHI_PKT_TYPE_STALE_EVENT:
- mhi_log(MHI_MSG_INFO,
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
"Stale Event received for chan:%u\n",
MHI_EV_READ_CHID(EV_CHID, local_rp));
break;
case MHI_PKT_TYPE_SYS_ERR_EVENT:
- mhi_log(MHI_MSG_INFO,
- "MHI System Error Detected. Triggering Reset\n");
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "MHI System Error Detected. Triggering Reset\n");
BUG();
break;
default:
- mhi_log(MHI_MSG_ERROR,
+ mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
"Unsupported packet type code 0x%x\n",
MHI_TRB_READ_INFO(EV_TRB_TYPE,
&event_to_process));
@@ -200,8 +197,9 @@ static int mhi_process_event_ring(
--event_quota;
}
read_lock_bh(&mhi_dev_ctxt->pm_xfer_lock);
- mhi_deassert_device_wake(mhi_dev_ctxt);
+ mhi_dev_ctxt->deassert_wake(mhi_dev_ctxt);
read_unlock_bh(&mhi_dev_ctxt->pm_xfer_lock);
+ mhi_log(mhi_dev_ctxt, MHI_MSG_VERBOSE, "exit ev_index:%u\n", ev_index);
return ret_val;
}
@@ -230,7 +228,7 @@ int parse_event_thread(void *ctxt)
return 0;
default:
if (mhi_dev_ctxt->flags.kill_threads) {
- mhi_log(MHI_MSG_INFO,
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
"Caught exit signal, quitting\n");
return 0;
}
@@ -240,12 +238,13 @@ int parse_event_thread(void *ctxt)
}
break;
}
+ mhi_log(mhi_dev_ctxt, MHI_MSG_VERBOSE, "awake\n");
mhi_dev_ctxt->flags.ev_thread_stopped = 0;
atomic_dec(&mhi_dev_ctxt->counters.events_pending);
for (i = 1; i < mhi_dev_ctxt->mmio_info.nr_event_rings; ++i) {
if (mhi_dev_ctxt->mhi_state == MHI_STATE_SYS_ERR) {
- mhi_log(MHI_MSG_INFO,
- "SYS_ERR detected, not processing events\n");
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "SYS_ERR detected, not processing events\n");
atomic_set(&mhi_dev_ctxt->
counters.events_pending,
0);
@@ -262,6 +261,7 @@ int parse_event_thread(void *ctxt)
atomic_inc(ev_pen_ptr);
}
}
+ mhi_log(mhi_dev_ctxt, MHI_MSG_VERBOSE, "sleep\n");
}
}
@@ -273,65 +273,65 @@ void mhi_ctrl_ev_task(unsigned long data)
struct mhi_event_ring_cfg *ring_props =
&mhi_dev_ctxt->ev_ring_props[CTRL_EV_RING];
- mhi_log(MHI_MSG_VERBOSE, "Enter\n");
+ mhi_log(mhi_dev_ctxt, MHI_MSG_VERBOSE, "Enter\n");
/* Process control event ring */
mhi_process_event_ring(mhi_dev_ctxt,
CTRL_EV_RING,
ring_props->nr_desc);
enable_irq(MSI_TO_IRQ(mhi_dev_ctxt, CTRL_EV_RING));
- mhi_log(MHI_MSG_VERBOSE, "Exit\n");
+ mhi_log(mhi_dev_ctxt, MHI_MSG_VERBOSE, "Exit\n");
+
}
struct mhi_result *mhi_poll(struct mhi_client_handle *client_handle)
{
int ret_val;
+ struct mhi_client_config *client_config = client_handle->client_config;
- client_handle->result.buf_addr = NULL;
- client_handle->result.bytes_xferd = 0;
- client_handle->result.transaction_status = 0;
- ret_val = mhi_process_event_ring(client_handle->mhi_dev_ctxt,
- client_handle->event_ring_index,
+ client_config->result.buf_addr = NULL;
+ client_config->result.bytes_xferd = 0;
+ client_config->result.transaction_status = 0;
+ ret_val = mhi_process_event_ring(client_config->mhi_dev_ctxt,
+ client_config->event_ring_index,
1);
if (ret_val)
- mhi_log(MHI_MSG_INFO, "NAPI failed to process event ring\n");
- return &(client_handle->result);
+ mhi_log(client_config->mhi_dev_ctxt, MHI_MSG_INFO,
+ "NAPI failed to process event ring\n");
+ return &(client_config->result);
}
void mhi_mask_irq(struct mhi_client_handle *client_handle)
{
+ struct mhi_client_config *client_config = client_handle->client_config;
struct mhi_device_ctxt *mhi_dev_ctxt =
- client_handle->mhi_dev_ctxt;
+ client_config->mhi_dev_ctxt;
struct mhi_ring *ev_ring = &mhi_dev_ctxt->
- mhi_local_event_ctxt[client_handle->event_ring_index];
+ mhi_local_event_ctxt[client_config->event_ring_index];
- disable_irq_nosync(MSI_TO_IRQ(mhi_dev_ctxt, client_handle->msi_vec));
+ disable_irq_nosync(MSI_TO_IRQ(mhi_dev_ctxt, client_config->msi_vec));
ev_ring->msi_disable_cntr++;
}
void mhi_unmask_irq(struct mhi_client_handle *client_handle)
{
+ struct mhi_client_config *client_config = client_handle->client_config;
struct mhi_device_ctxt *mhi_dev_ctxt =
- client_handle->mhi_dev_ctxt;
+ client_config->mhi_dev_ctxt;
struct mhi_ring *ev_ring = &mhi_dev_ctxt->
- mhi_local_event_ctxt[client_handle->event_ring_index];
+ mhi_local_event_ctxt[client_config->event_ring_index];
ev_ring->msi_enable_cntr++;
- enable_irq(MSI_TO_IRQ(mhi_dev_ctxt, client_handle->msi_vec));
+ enable_irq(MSI_TO_IRQ(mhi_dev_ctxt, client_config->msi_vec));
}
irqreturn_t mhi_msi_handlr(int irq_number, void *dev_id)
{
- struct device *mhi_device = dev_id;
- struct mhi_device_ctxt *mhi_dev_ctxt = mhi_device->platform_data;
+ struct mhi_device_ctxt *mhi_dev_ctxt = dev_id;
int msi = IRQ_TO_MSI(mhi_dev_ctxt, irq_number);
- if (!mhi_dev_ctxt) {
- mhi_log(MHI_MSG_ERROR, "Failed to get a proper context\n");
- return IRQ_HANDLED;
- }
mhi_dev_ctxt->counters.msi_counter[
IRQ_TO_MSI(mhi_dev_ctxt, irq_number)]++;
- mhi_log(MHI_MSG_VERBOSE, "Got MSI 0x%x\n", msi);
+ mhi_log(mhi_dev_ctxt, MHI_MSG_VERBOSE, "Got MSI 0x%x\n", msi);
trace_mhi_msi(IRQ_TO_MSI(mhi_dev_ctxt, irq_number));
if (msi) {
@@ -347,31 +347,36 @@ irqreturn_t mhi_msi_handlr(int irq_number, void *dev_id)
irqreturn_t mhi_msi_ipa_handlr(int irq_number, void *dev_id)
{
- struct device *mhi_device = dev_id;
- u32 client_index;
- struct mhi_device_ctxt *mhi_dev_ctxt = mhi_device->platform_data;
+ struct mhi_device_ctxt *mhi_dev_ctxt = dev_id;
+ struct mhi_event_ring_cfg *ev_ring_props;
struct mhi_client_handle *client_handle;
+ struct mhi_client_config *client_config;
struct mhi_client_info_t *client_info;
struct mhi_cb_info cb_info;
int msi_num = (IRQ_TO_MSI(mhi_dev_ctxt, irq_number));
mhi_dev_ctxt->counters.msi_counter[msi_num]++;
- mhi_log(MHI_MSG_VERBOSE, "Got MSI 0x%x\n", msi_num);
+ mhi_log(mhi_dev_ctxt, MHI_MSG_VERBOSE, "Got MSI 0x%x\n", msi_num);
trace_mhi_msi(msi_num);
- client_index = MHI_MAX_CHANNELS -
- (mhi_dev_ctxt->mmio_info.nr_event_rings - msi_num);
- client_handle = mhi_dev_ctxt->client_handle_list[client_index];
- client_info = &client_handle->client_info;
- if (likely(client_handle)) {
- client_handle->result.user_data =
- client_handle->user_data;
- if (likely(client_info->mhi_client_cb)) {
- cb_info.result = &client_handle->result;
- cb_info.cb_reason = MHI_CB_XFER;
- cb_info.chan = client_handle->chan_info.chan_nr;
- cb_info.result->transaction_status = 0;
- client_info->mhi_client_cb(&cb_info);
- }
+
+ /* Obtain client config from MSI */
+ ev_ring_props = &mhi_dev_ctxt->ev_ring_props[msi_num];
+ client_handle = mhi_dev_ctxt->client_handle_list[ev_ring_props->chan];
+ if (unlikely(!client_handle)) {
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "Recv MSI for unreg chan:%u\n", ev_ring_props->chan);
+ return IRQ_HANDLED;
}
+
+ client_config = client_handle->client_config;
+ client_info = &client_config->client_info;
+ client_config->result.user_data =
+ client_config->user_data;
+ cb_info.result = &client_config->result;
+ cb_info.cb_reason = MHI_CB_XFER;
+ cb_info.chan = client_config->chan_info.chan_nr;
+ cb_info.result->transaction_status = 0;
+ client_info->mhi_client_cb(&cb_info);
+
return IRQ_HANDLED;
}
diff --git a/drivers/platform/msm/mhi/mhi_macros.h b/drivers/platform/msm/mhi/mhi_macros.h
index 133c0eeb034e..fc0e6f4bc27d 100644
--- a/drivers/platform/msm/mhi/mhi_macros.h
+++ b/drivers/platform/msm/mhi/mhi_macros.h
@@ -39,7 +39,6 @@
#define MHI_WORK_Q_MAX_SIZE 128
#define MAX_XFER_WORK_ITEMS 100
-#define MHI_MAX_SUPPORTED_DEVICES 1
#define MHI_PCIE_VENDOR_ID 0x17CB
#define MHI_PCIE_DEVICE_ID_9x35 0x0300
@@ -70,9 +69,9 @@
((enum MHI_CLIENT_CHANNEL)(_CHAN_NR) < MHI_CLIENT_RESERVED_1_LOWER))
#define IRQ_TO_MSI(_MHI_DEV_CTXT, _IRQ_NR) \
- ((_IRQ_NR) - (_MHI_DEV_CTXT)->dev_info->core.irq_base)
+ ((_IRQ_NR) - (_MHI_DEV_CTXT)->core.irq_base)
#define MSI_TO_IRQ(_MHI_DEV_CTXT, _MSI_NR) \
- ((_MHI_DEV_CTXT)->dev_info->core.irq_base + (_MSI_NR))
+ ((_MHI_DEV_CTXT)->core.irq_base + (_MSI_NR))
#define VALID_CHAN_NR(_CHAN_NR) (IS_HARDWARE_CHANNEL(_CHAN_NR) || \
IS_SOFTWARE_CHANNEL(_CHAN_NR))
@@ -84,8 +83,8 @@
#define MHI_HW_INTMOD_VAL_MS 2
/* Timeout Values */
-#define MHI_READY_STATUS_TIMEOUT_MS 50
-#define MHI_THREAD_SLEEP_TIMEOUT_MS 20
+#define MHI_READY_STATUS_TIMEOUT_MS 500
+#define MHI_THREAD_SLEEP_TIMEOUT_MS 100
#define MHI_RESUME_WAKE_RETRIES 20
#define IS_HW_EV_RING(_mhi_dev_ctxt, _EV_INDEX) (_EV_INDEX >= \
diff --git a/drivers/platform/msm/mhi/mhi_main.c b/drivers/platform/msm/mhi/mhi_main.c
index 430dc918af7e..6f6658b9602c 100644
--- a/drivers/platform/msm/mhi/mhi_main.c
+++ b/drivers/platform/msm/mhi/mhi_main.c
@@ -66,7 +66,7 @@ static void mhi_update_ctxt(struct mhi_device_ctxt *mhi_dev_ctxt,
mhi_trb_write_ptr = val;
} else if (mhi_dev_ctxt->mmio_info.event_db_addr == io_addr) {
if (chan < mhi_dev_ctxt->mmio_info.nr_event_rings) {
- mhi_log(MHI_MSG_INFO,
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
"EV ctxt: %ld val 0x%llx WP 0x%llx RP: 0x%llx",
chan, val,
mhi_dev_ctxt->dev_space.ring_ctxt.
@@ -76,7 +76,7 @@ static void mhi_update_ctxt(struct mhi_device_ctxt *mhi_dev_ctxt,
mhi_dev_ctxt->dev_space.ring_ctxt.ec_list[chan].
mhi_event_write_ptr = val;
} else {
- mhi_log(MHI_MSG_ERROR,
+ mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
"Bad EV ring index: %lx\n", chan);
}
}
@@ -84,65 +84,48 @@ static void mhi_update_ctxt(struct mhi_device_ctxt *mhi_dev_ctxt,
wmb();
}
-int mhi_init_pcie_device(struct mhi_pcie_dev_info *mhi_pcie_dev)
+int mhi_init_pcie_device(struct mhi_device_ctxt *mhi_dev_ctxt)
{
int ret_val = 0;
long int sleep_time = 100;
- struct pci_dev *pcie_device =
- (struct pci_dev *)mhi_pcie_dev->pcie_device;
+ struct pci_dev *pcie_device = mhi_dev_ctxt->pcie_device;
+ struct pcie_core_info *core = &mhi_dev_ctxt->core;
do {
- ret_val = pci_enable_device(mhi_pcie_dev->pcie_device);
+ ret_val = pci_enable_device(pcie_device);
if (0 != ret_val) {
- mhi_log(MHI_MSG_ERROR,
+ mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
"Failed to enable pcie struct device r: %d\n",
ret_val);
- mhi_log(MHI_MSG_ERROR,
+ mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
"Sleeping for ~ %li uS, and retrying.\n",
sleep_time);
msleep(sleep_time);
}
} while (ret_val != 0);
- mhi_log(MHI_MSG_INFO, "Successfully enabled pcie device.\n");
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "Successfully enabled pcie device.\n");
- mhi_pcie_dev->core.bar0_base =
- ioremap_nocache(pci_resource_start(pcie_device, 0),
- pci_resource_len(pcie_device, 0));
- if (!mhi_pcie_dev->core.bar0_base)
+ core->bar0_base = ioremap_nocache(pci_resource_start(pcie_device, 0),
+ pci_resource_len(pcie_device, 0));
+ if (!core->bar0_base)
goto mhi_device_list_error;
- mhi_pcie_dev->core.bar0_end = mhi_pcie_dev->core.bar0_base +
- pci_resource_len(pcie_device, 0);
- mhi_pcie_dev->core.bar2_base =
- ioremap_nocache(pci_resource_start(pcie_device, 2),
- pci_resource_len(pcie_device, 2));
- if (!mhi_pcie_dev->core.bar2_base)
- goto io_map_err;
-
- mhi_pcie_dev->core.bar2_end = mhi_pcie_dev->core.bar2_base +
- pci_resource_len(pcie_device, 2);
-
- if (!mhi_pcie_dev->core.bar0_base) {
- mhi_log(MHI_MSG_ERROR,
- "Failed to register for pcie resources\n");
- goto mhi_pcie_read_ep_config_err;
- }
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "Device BAR0 address is at 0x%p\n", core->bar0_base);
- mhi_log(MHI_MSG_INFO, "Device BAR0 address is at 0x%p\n",
- mhi_pcie_dev->core.bar0_base);
ret_val = pci_request_region(pcie_device, 0, "mhi");
if (ret_val)
- mhi_log(MHI_MSG_ERROR, "Could not request BAR0 region\n");
+ mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
+ "Could not request BAR0 region\n");
- mhi_pcie_dev->core.manufact_id = pcie_device->vendor;
- mhi_pcie_dev->core.dev_id = pcie_device->device;
+ core->manufact_id = pcie_device->vendor;
+ core->dev_id = pcie_device->device;
return 0;
-io_map_err:
- iounmap((void *)mhi_pcie_dev->core.bar0_base);
+
mhi_device_list_error:
pci_disable_device(pcie_device);
-mhi_pcie_read_ep_config_err:
return -EIO;
}
@@ -156,7 +139,7 @@ static void mhi_move_interrupts(struct mhi_device_ctxt *mhi_dev_ctxt, u32 cpu)
GET_EV_PROPS(EV_TYPE,
mhi_dev_ctxt->ev_ring_props[i].flags)) {
irq_to_affin = mhi_dev_ctxt->ev_ring_props[i].msi_vec;
- irq_to_affin += mhi_dev_ctxt->dev_props->irq_base;
+ irq_to_affin += mhi_dev_ctxt->core.irq_base;
irq_set_affinity(irq_to_affin, get_cpu_mask(cpu));
}
}
@@ -198,8 +181,9 @@ int get_chan_props(struct mhi_device_ctxt *mhi_dev_ctxt, int chan,
scnprintf(dt_prop, MAX_BUF_SIZE, "%s%d", "mhi-chan-cfg-", chan);
r = of_property_read_u32_array(
- mhi_dev_ctxt->dev_info->plat_dev->dev.of_node,
- dt_prop, (u32 *)chan_info,
+ mhi_dev_ctxt->plat_dev->dev.of_node,
+ dt_prop,
+ (u32 *)chan_info,
sizeof(struct mhi_chan_info) / sizeof(u32));
return r;
}
@@ -211,9 +195,10 @@ int mhi_release_chan_ctxt(struct mhi_device_ctxt *mhi_dev_ctxt,
if (cc_list == NULL || ring == NULL)
return -EINVAL;
- dma_free_coherent(&mhi_dev_ctxt->dev_info->pcie_device->dev,
- ring->len, ring->base,
- cc_list->mhi_trb_ring_base_addr);
+ dma_free_coherent(&mhi_dev_ctxt->plat_dev->dev,
+ ring->len,
+ ring->base,
+ cc_list->mhi_trb_ring_base_addr);
mhi_init_chan_ctxt(cc_list, 0, 0, 0, 0, 0, ring,
MHI_CHAN_STATE_DISABLED,
false,
@@ -221,38 +206,39 @@ int mhi_release_chan_ctxt(struct mhi_device_ctxt *mhi_dev_ctxt,
return 0;
}
-void free_tre_ring(struct mhi_client_handle *client_handle)
+void free_tre_ring(struct mhi_client_config *client_config)
{
struct mhi_chan_ctxt *chan_ctxt;
- struct mhi_device_ctxt *mhi_dev_ctxt = client_handle->mhi_dev_ctxt;
- int chan = client_handle->chan_info.chan_nr;
+ struct mhi_device_ctxt *mhi_dev_ctxt = client_config->mhi_dev_ctxt;
+ int chan = client_config->chan_info.chan_nr;
int r;
chan_ctxt = &mhi_dev_ctxt->dev_space.ring_ctxt.cc_list[chan];
r = mhi_release_chan_ctxt(mhi_dev_ctxt, chan_ctxt,
&mhi_dev_ctxt->mhi_local_chan_ctxt[chan]);
if (r)
- mhi_log(MHI_MSG_ERROR,
- "Failed to release chan %d ret %d\n", chan, r);
+ mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
+ "Failed to release chan %d ret %d\n", chan, r);
}
-static int populate_tre_ring(struct mhi_client_handle *client_handle)
+static int populate_tre_ring(struct mhi_client_config *client_config)
{
dma_addr_t ring_dma_addr;
void *ring_local_addr;
struct mhi_chan_ctxt *chan_ctxt;
- struct mhi_device_ctxt *mhi_dev_ctxt = client_handle->mhi_dev_ctxt;
- u32 chan = client_handle->chan_info.chan_nr;
- u32 nr_desc = client_handle->chan_info.max_desc;
+ struct mhi_device_ctxt *mhi_dev_ctxt = client_config->mhi_dev_ctxt;
+ u32 chan = client_config->chan_info.chan_nr;
+ u32 nr_desc = client_config->chan_info.max_desc;
- mhi_log(MHI_MSG_INFO,
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
"Entered chan %d requested desc %d\n", chan, nr_desc);
chan_ctxt = &mhi_dev_ctxt->dev_space.ring_ctxt.cc_list[chan];
- ring_local_addr = dma_alloc_coherent(
- &mhi_dev_ctxt->dev_info->pcie_device->dev,
- nr_desc * sizeof(union mhi_xfer_pkt),
- &ring_dma_addr, GFP_KERNEL);
+ ring_local_addr =
+ dma_alloc_coherent(&mhi_dev_ctxt->plat_dev->dev,
+ nr_desc * sizeof(union mhi_xfer_pkt),
+ &ring_dma_addr,
+ GFP_KERNEL);
if (ring_local_addr == NULL)
return -ENOMEM;
@@ -261,15 +247,15 @@ static int populate_tre_ring(struct mhi_client_handle *client_handle)
(uintptr_t)ring_local_addr,
nr_desc,
GET_CHAN_PROPS(CHAN_DIR,
- client_handle->chan_info.flags),
- client_handle->chan_info.ev_ring,
+ client_config->chan_info.flags),
+ client_config->chan_info.ev_ring,
&mhi_dev_ctxt->mhi_local_chan_ctxt[chan],
MHI_CHAN_STATE_ENABLED,
GET_CHAN_PROPS(PRESERVE_DB_STATE,
- client_handle->chan_info.flags),
+ client_config->chan_info.flags),
GET_CHAN_PROPS(BRSTMODE,
- client_handle->chan_info.flags));
- mhi_log(MHI_MSG_INFO, "Exited\n");
+ client_config->chan_info.flags));
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO, "Exited\n");
return 0;
}
@@ -283,85 +269,84 @@ int mhi_open_channel(struct mhi_client_handle *client_handle)
struct mhi_cmd_complete_event_pkt cmd_event_pkt;
union mhi_cmd_pkt cmd_pkt;
enum MHI_EVENT_CCS ev_code;
+ struct mhi_client_config *client_config = client_handle->client_config;
- if (!client_handle || client_handle->magic != MHI_HANDLE_MAGIC)
+ if (client_config->magic != MHI_HANDLE_MAGIC)
return -EINVAL;
- mhi_dev_ctxt = client_handle->mhi_dev_ctxt;
+ mhi_dev_ctxt = client_config->mhi_dev_ctxt;
ret_val = get_chan_props(mhi_dev_ctxt,
- client_handle->chan_info.chan_nr,
- &client_handle->chan_info);
+ client_config->chan_info.chan_nr,
+ &client_config->chan_info);
if (ret_val)
return ret_val;
- chan = client_handle->chan_info.chan_nr;
+ chan = client_config->chan_info.chan_nr;
cfg = &mhi_dev_ctxt->mhi_chan_cfg[chan];
chan_ring = &mhi_dev_ctxt->mhi_local_chan_ctxt[chan];
mutex_lock(&cfg->chan_lock);
- mhi_log(MHI_MSG_INFO,
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
"Entered: Client opening chan 0x%x\n", chan);
if (mhi_dev_ctxt->dev_exec_env <
GET_CHAN_PROPS(CHAN_BRINGUP_STAGE,
- client_handle->chan_info.flags)) {
- mhi_log(MHI_MSG_INFO,
+ client_config->chan_info.flags)) {
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
"Chan %d, MHI exec_env %d, not ready!\n",
- chan,
- mhi_dev_ctxt->dev_exec_env);
+ chan, mhi_dev_ctxt->dev_exec_env);
mutex_unlock(&cfg->chan_lock);
return -ENOTCONN;
}
- ret_val = populate_tre_ring(client_handle);
+ ret_val = populate_tre_ring(client_config);
if (ret_val) {
- mhi_log(MHI_MSG_ERROR,
+ mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
"Failed to initialize tre ring chan %d ret %d\n",
- chan,
- ret_val);
+ chan, ret_val);
mutex_unlock(&cfg->chan_lock);
return ret_val;
}
- client_handle->event_ring_index =
- mhi_dev_ctxt->dev_space.ring_ctxt.cc_list[chan].
- mhi_event_ring_index;
+ client_config->event_ring_index =
+ mhi_dev_ctxt->dev_space.ring_ctxt.
+ cc_list[chan].mhi_event_ring_index;
ret_val = enable_bb_ctxt(&mhi_dev_ctxt->chan_bb_list[chan],
- client_handle->chan_info.max_desc);
+ client_config->chan_info.max_desc);
if (ret_val) {
- mhi_log(MHI_MSG_ERROR,
+ mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
"Failed to initialize bb ctxt chan %d ret %d\n",
- chan,
- ret_val);
+ chan, ret_val);
mutex_unlock(&cfg->chan_lock);
return ret_val;
}
- client_handle->msi_vec =
+ client_config->msi_vec =
mhi_dev_ctxt->dev_space.ring_ctxt.ec_list[
- client_handle->event_ring_index].mhi_msi_vector;
- client_handle->intmod_t =
+ client_config->event_ring_index].mhi_msi_vector;
+ client_config->intmod_t =
mhi_dev_ctxt->dev_space.ring_ctxt.ec_list[
- client_handle->event_ring_index].mhi_intmodt;
+ client_config->event_ring_index].mhi_intmodt;
init_completion(&cfg->cmd_complete);
read_lock_bh(&mhi_dev_ctxt->pm_xfer_lock);
if (unlikely(mhi_dev_ctxt->mhi_pm_state == MHI_PM_DISABLE)) {
- mhi_log(MHI_MSG_ERROR,
+ mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
"MHI State is disabled\n");
read_unlock_bh(&mhi_dev_ctxt->pm_xfer_lock);
mutex_unlock(&cfg->chan_lock);
return -EIO;
}
+
WARN_ON(mhi_dev_ctxt->mhi_pm_state == MHI_PM_DISABLE);
- mhi_assert_device_wake(mhi_dev_ctxt, false);
+ mhi_dev_ctxt->assert_wake(mhi_dev_ctxt, false);
read_unlock_bh(&mhi_dev_ctxt->pm_xfer_lock);
- pm_runtime_get(&mhi_dev_ctxt->dev_info->pcie_device->dev);
+ mhi_dev_ctxt->runtime_get(mhi_dev_ctxt);
spin_lock_irq(&chan_ring->ring_lock);
chan_ring->ch_state = MHI_CHAN_STATE_ENABLED;
spin_unlock_irq(&chan_ring->ring_lock);
- ret_val = mhi_send_cmd(client_handle->mhi_dev_ctxt,
+ ret_val = mhi_send_cmd(client_config->mhi_dev_ctxt,
MHI_COMMAND_START_CHAN,
chan);
if (ret_val) {
- mhi_log(MHI_MSG_ERROR,
+ mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
"Failed to send start cmd for chan %d ret %d\n",
chan, ret_val);
goto error_completion;
@@ -369,9 +354,9 @@ int mhi_open_channel(struct mhi_client_handle *client_handle)
ret_val = wait_for_completion_timeout(&cfg->cmd_complete,
msecs_to_jiffies(MHI_MAX_CMD_TIMEOUT));
if (!ret_val) {
- mhi_log(MHI_MSG_ERROR,
- "Failed to receive cmd completion for %d\n",
- chan);
+ mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
+ "Failed to receive cmd completion for %d\n", chan);
+ ret_val = -EIO;
goto error_completion;
} else {
ret_val = 0;
@@ -385,76 +370,133 @@ int mhi_open_channel(struct mhi_client_handle *client_handle)
ev_code = MHI_EV_READ_CODE(EV_TRB_CODE,
((union mhi_event_pkt *)&cmd_event_pkt));
if (ev_code != MHI_EVENT_CC_SUCCESS) {
- mhi_log(MHI_MSG_ERROR,
- "Error to receive event completion ev_code:0x%x\n",
- ev_code);
+ mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
+ "Error to receive event comp. ev_code:0x%x\n", ev_code);
ret_val = -EIO;
goto error_completion;
}
- client_handle->chan_status = 1;
+ client_config->chan_status = 1;
error_completion:
read_lock_bh(&mhi_dev_ctxt->pm_xfer_lock);
- mhi_deassert_device_wake(mhi_dev_ctxt);
+ mhi_dev_ctxt->deassert_wake(mhi_dev_ctxt);
read_unlock_bh(&mhi_dev_ctxt->pm_xfer_lock);
- pm_runtime_put_noidle(&mhi_dev_ctxt->dev_info->pcie_device->dev);
+ mhi_dev_ctxt->runtime_put(mhi_dev_ctxt);
mutex_unlock(&cfg->chan_lock);
- mhi_log(MHI_MSG_INFO,
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
"Exited chan 0x%x ret:%d\n", chan, ret_val);
return ret_val;
}
EXPORT_SYMBOL(mhi_open_channel);
+bool mhi_is_device_ready(const struct device * const dev,
+ const char *node_name)
+{
+ struct mhi_device_ctxt *itr;
+ const struct device_node *of_node;
+ bool match_found = false;
+
+ if (!mhi_device_drv)
+ return false;
+ if (dev->of_node == NULL)
+ return false;
+
+ of_node = of_parse_phandle(dev->of_node, node_name, 0);
+ if (!of_node)
+ return false;
+
+ mutex_lock(&mhi_device_drv->lock);
+ list_for_each_entry(itr, &mhi_device_drv->head, node) {
+ struct platform_device *pdev = itr->plat_dev;
+
+ if (pdev->dev.of_node == of_node) {
+ match_found = true;
+ break;
+ }
+ }
+ mutex_unlock(&mhi_device_drv->lock);
+ return match_found;
+}
+EXPORT_SYMBOL(mhi_is_device_ready);
+
int mhi_register_channel(struct mhi_client_handle **client_handle,
- enum MHI_CLIENT_CHANNEL chan, s32 device_index,
- struct mhi_client_info_t *client_info, void *user_data)
+ struct mhi_client_info_t *client_info)
{
- struct mhi_device_ctxt *mhi_dev_ctxt = NULL;
+ struct mhi_device_ctxt *mhi_dev_ctxt = NULL, *itr;
+ const struct device_node *of_node;
+ struct mhi_client_config *client_config;
+ const char *node_name;
+ enum MHI_CLIENT_CHANNEL chan;
- if (!VALID_CHAN_NR(chan))
+ if (!client_info || client_info->dev->of_node == NULL)
return -EINVAL;
- if (NULL == client_handle || device_index < 0)
+ node_name = client_info->node_name;
+ chan = client_info->chan;
+ of_node = of_parse_phandle(client_info->dev->of_node, node_name, 0);
+ if (!of_node || !mhi_device_drv || chan >= MHI_MAX_CHANNELS)
return -EINVAL;
- mhi_dev_ctxt = &(mhi_devices.device_list[device_index].mhi_ctxt);
+ /* Traverse thru the list */
+ mutex_lock(&mhi_device_drv->lock);
+ list_for_each_entry(itr, &mhi_device_drv->head, node) {
+ struct platform_device *pdev = itr->plat_dev;
+
+ if (pdev->dev.of_node == of_node) {
+ mhi_dev_ctxt = itr;
+ break;
+ }
+ }
+ mutex_unlock(&mhi_device_drv->lock);
- if (NULL != mhi_dev_ctxt->client_handle_list[chan])
- return -EISCONN;
+ if (!mhi_dev_ctxt)
+ return -EINVAL;
- mhi_log(MHI_MSG_INFO,
- "Opened channel 0x%x for client\n", chan);
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "Registering channel 0x%x for client\n", chan);
*client_handle = kzalloc(sizeof(struct mhi_client_handle), GFP_KERNEL);
if (NULL == *client_handle)
return -ENOMEM;
+ (*client_handle)->client_config =
+ kzalloc(sizeof(*(*client_handle)->client_config), GFP_KERNEL);
+ if ((*client_handle)->client_config == NULL) {
+ kfree(*client_handle);
+ *client_handle = NULL;
+ return -ENOMEM;
+ }
mhi_dev_ctxt->client_handle_list[chan] = *client_handle;
- (*client_handle)->mhi_dev_ctxt = mhi_dev_ctxt;
- (*client_handle)->user_data = user_data;
- (*client_handle)->magic = MHI_HANDLE_MAGIC;
- (*client_handle)->chan_info.chan_nr = chan;
+ (*client_handle)->dev_id = mhi_dev_ctxt->core.dev_id;
+ (*client_handle)->domain = mhi_dev_ctxt->core.domain;
+ (*client_handle)->bus = mhi_dev_ctxt->core.bus;
+ (*client_handle)->slot = mhi_dev_ctxt->core.slot;
+ client_config = (*client_handle)->client_config;
+ client_config->mhi_dev_ctxt = mhi_dev_ctxt;
+ client_config->user_data = client_info->user_data;
+ client_config->magic = MHI_HANDLE_MAGIC;
+ client_config->chan_info.chan_nr = chan;
if (NULL != client_info)
- (*client_handle)->client_info = *client_info;
+ client_config->client_info = *client_info;
if (MHI_CLIENT_IP_HW_0_OUT == chan)
- (*client_handle)->intmod_t = 10;
+ client_config->intmod_t = 10;
if (MHI_CLIENT_IP_HW_0_IN == chan)
- (*client_handle)->intmod_t = 10;
+ client_config->intmod_t = 10;
- if (mhi_dev_ctxt->dev_exec_env == MHI_EXEC_ENV_AMSS) {
- mhi_log(MHI_MSG_INFO,
- "Exec env is AMSS notifing client now chan: 0x%x\n",
- chan);
+ if (mhi_dev_ctxt->dev_exec_env == MHI_EXEC_ENV_AMSS &&
+ mhi_dev_ctxt->flags.mhi_initialized) {
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "Exec env is AMSS notify client now chan:%u\n", chan);
mhi_notify_client(*client_handle, MHI_CB_MHI_ENABLED);
}
- mhi_log(MHI_MSG_VERBOSE,
- "Successfuly registered chan 0x%x\n", chan);
+ mhi_log(mhi_dev_ctxt, MHI_MSG_VERBOSE,
+ "Successfuly registered chan:%u\n", chan);
return 0;
}
EXPORT_SYMBOL(mhi_register_channel);
@@ -469,17 +511,20 @@ void mhi_close_channel(struct mhi_client_handle *client_handle)
union mhi_cmd_pkt cmd_pkt;
struct mhi_ring *chan_ring;
enum MHI_EVENT_CCS ev_code;
+ struct mhi_client_config *client_config =
+ client_handle->client_config;
- if (!client_handle ||
- client_handle->magic != MHI_HANDLE_MAGIC ||
- !client_handle->chan_status)
+ if (client_config->magic != MHI_HANDLE_MAGIC ||
+ !client_config->chan_status)
return;
- mhi_dev_ctxt = client_handle->mhi_dev_ctxt;
- chan = client_handle->chan_info.chan_nr;
+ mhi_dev_ctxt = client_config->mhi_dev_ctxt;
+ chan = client_config->chan_info.chan_nr;
cfg = &mhi_dev_ctxt->mhi_chan_cfg[chan];
- mhi_log(MHI_MSG_INFO, "Client attempting to close chan 0x%x\n", chan);
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "Client attempting to close chan 0x%x\n", chan);
+
chan_ring = &mhi_dev_ctxt->mhi_local_chan_ctxt[chan];
mutex_lock(&cfg->chan_lock);
@@ -490,25 +535,22 @@ void mhi_close_channel(struct mhi_client_handle *client_handle)
init_completion(&cfg->cmd_complete);
read_lock_bh(&mhi_dev_ctxt->pm_xfer_lock);
WARN_ON(mhi_dev_ctxt->mhi_pm_state == MHI_PM_DISABLE);
- mhi_assert_device_wake(mhi_dev_ctxt, false);
+ mhi_dev_ctxt->assert_wake(mhi_dev_ctxt, false);
read_unlock_bh(&mhi_dev_ctxt->pm_xfer_lock);
- pm_runtime_get(&mhi_dev_ctxt->dev_info->pcie_device->dev);
- ret_val = mhi_send_cmd(client_handle->mhi_dev_ctxt,
- MHI_COMMAND_RESET_CHAN,
- chan);
+ mhi_dev_ctxt->runtime_get(mhi_dev_ctxt);
+ ret_val = mhi_send_cmd(mhi_dev_ctxt,
+ MHI_COMMAND_RESET_CHAN, chan);
if (ret_val) {
- mhi_log(MHI_MSG_ERROR,
+ mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
"Failed to send reset cmd for chan %d ret %d\n",
- chan,
- ret_val);
+ chan, ret_val);
goto error_completion;
}
ret_val = wait_for_completion_timeout(&cfg->cmd_complete,
msecs_to_jiffies(MHI_MAX_CMD_TIMEOUT));
if (!ret_val) {
- mhi_log(MHI_MSG_ERROR,
- "Failed to receive cmd completion for %d\n",
- chan);
+ mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
+ "Failed to receive cmd completion for %d\n", chan);
goto error_completion;
}
@@ -519,7 +561,7 @@ void mhi_close_channel(struct mhi_client_handle *client_handle)
ev_code = MHI_EV_READ_CODE(EV_TRB_CODE,
((union mhi_event_pkt *)&cmd_event_pkt));
if (ev_code != MHI_EVENT_CC_SUCCESS) {
- mhi_log(MHI_MSG_ERROR,
+ mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
"Error to receive event completion ev_cod:0x%x\n",
ev_code);
goto error_completion;
@@ -527,20 +569,20 @@ void mhi_close_channel(struct mhi_client_handle *client_handle)
ret_val = reset_chan_cmd(mhi_dev_ctxt, &cmd_pkt);
if (ret_val)
- mhi_log(MHI_MSG_ERROR,
- "Error resetting cmd ret:%d\n",
- ret_val);
-
+ mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
+ "Error resetting cmd ret:%d\n", ret_val);
error_completion:
read_lock_bh(&mhi_dev_ctxt->pm_xfer_lock);
- mhi_deassert_device_wake(mhi_dev_ctxt);
+ mhi_dev_ctxt->deassert_wake(mhi_dev_ctxt);
read_unlock_bh(&mhi_dev_ctxt->pm_xfer_lock);
- pm_runtime_put_noidle(&mhi_dev_ctxt->dev_info->pcie_device->dev);
- mhi_log(MHI_MSG_INFO, "Freeing ring for chan 0x%x\n", chan);
- free_tre_ring(client_handle);
- mhi_log(MHI_MSG_INFO, "Chan 0x%x confirmed closed.\n", chan);
- client_handle->chan_status = 0;
+ mhi_dev_ctxt->runtime_put(mhi_dev_ctxt);
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "Freeing ring for chan 0x%x\n", chan);
+ free_tre_ring(client_config);
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "Chan 0x%x confirmed closed.\n", chan);
+ client_config->chan_status = 0;
mutex_unlock(&cfg->chan_lock);
}
EXPORT_SYMBOL(mhi_close_channel);
@@ -606,7 +648,8 @@ static int create_bb(struct mhi_device_ctxt *mhi_dev_ctxt,
int r;
uintptr_t bb_index, ctxt_index_wp, ctxt_index_rp;
- mhi_log(MHI_MSG_RAW, "Entered chan %d\n", chan);
+ mhi_log(mhi_dev_ctxt, MHI_MSG_RAW,
+ "Entered chan %d\n", chan);
get_element_index(bb_ctxt, bb_ctxt->wp, &bb_index);
get_element_index(&mhi_dev_ctxt->mhi_local_chan_ctxt[chan],
mhi_dev_ctxt->mhi_local_chan_ctxt[chan].wp,
@@ -615,9 +658,9 @@ static int create_bb(struct mhi_device_ctxt *mhi_dev_ctxt,
mhi_dev_ctxt->mhi_local_chan_ctxt[chan].rp,
&ctxt_index_rp);
BUG_ON(bb_index != ctxt_index_wp);
- mhi_log(MHI_MSG_VERBOSE,
- "Chan RP index %ld Chan WP index %ld, chan %d\n",
- ctxt_index_rp, ctxt_index_wp, chan);
+ mhi_log(mhi_dev_ctxt, MHI_MSG_VERBOSE,
+ "Chan RP index %ld Chan WP index %ld, chan %d\n",
+ ctxt_index_rp, ctxt_index_wp, chan);
r = ctxt_add_element(bb_ctxt, (void **)&bb_info);
if (r)
return r;
@@ -626,21 +669,22 @@ static int create_bb(struct mhi_device_ctxt *mhi_dev_ctxt,
bb_info->client_buf = buf;
bb_info->dir = dir;
bb_info->bb_p_addr = dma_map_single(
- &mhi_dev_ctxt->dev_info->plat_dev->dev,
+ &mhi_dev_ctxt->plat_dev->dev,
bb_info->client_buf,
bb_info->buf_len,
bb_info->dir);
if (!VALID_BUF(bb_info->bb_p_addr, bb_info->buf_len, mhi_dev_ctxt)) {
- mhi_log(MHI_MSG_INFO,
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
"Buffer outside DMA range 0x%lx, size 0x%zx\n",
- (uintptr_t)bb_info->bb_p_addr, buf_len);
- dma_unmap_single(&mhi_dev_ctxt->dev_info->plat_dev->dev,
+ (uintptr_t)bb_info->bb_p_addr, buf_len);
+ dma_unmap_single(&mhi_dev_ctxt->plat_dev->dev,
bb_info->bb_p_addr,
bb_info->buf_len,
bb_info->dir);
- mhi_log(MHI_MSG_RAW, "Allocating BB, chan %d\n", chan);
+ mhi_log(mhi_dev_ctxt, MHI_MSG_RAW,
+ "Allocating BB, chan %d\n", chan);
bb_info->bb_v_addr = dma_alloc_coherent(
- &mhi_dev_ctxt->dev_info->pcie_device->dev,
+ &mhi_dev_ctxt->plat_dev->dev,
bb_info->buf_len,
&bb_info->bb_p_addr,
GFP_ATOMIC);
@@ -648,7 +692,8 @@ static int create_bb(struct mhi_device_ctxt *mhi_dev_ctxt,
return -ENOMEM;
mhi_dev_ctxt->counters.bb_used[chan]++;
if (dir == DMA_TO_DEVICE) {
- mhi_log(MHI_MSG_INFO, "Copying client buf into BB.\n");
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "Copying client buf into BB.\n");
memcpy(bb_info->bb_v_addr, buf, bb_info->buf_len);
/* Flush out data to bounce buffer */
wmb();
@@ -656,26 +701,26 @@ static int create_bb(struct mhi_device_ctxt *mhi_dev_ctxt,
bb_info->bb_active = 1;
}
*bb = bb_info;
- mhi_log(MHI_MSG_RAW, "Exited chan %d\n", chan);
+ mhi_log(mhi_dev_ctxt, MHI_MSG_RAW, "Exited chan %d\n", chan);
return 0;
}
static void free_bounce_buffer(struct mhi_device_ctxt *mhi_dev_ctxt,
struct mhi_buf_info *bb)
{
- mhi_log(MHI_MSG_RAW, "Entered\n");
+ mhi_log(mhi_dev_ctxt, MHI_MSG_RAW, "Entered\n");
if (!bb->bb_active)
/* This buffer was maped directly to device */
- dma_unmap_single(&mhi_dev_ctxt->dev_info->plat_dev->dev,
+ dma_unmap_single(&mhi_dev_ctxt->plat_dev->dev,
bb->bb_p_addr, bb->buf_len, bb->dir);
else
/* This buffer was bounced */
- dma_free_coherent(&mhi_dev_ctxt->dev_info->pcie_device->dev,
+ dma_free_coherent(&mhi_dev_ctxt->plat_dev->dev,
bb->buf_len,
bb->bb_v_addr,
bb->bb_p_addr);
bb->bb_active = 0;
- mhi_log(MHI_MSG_RAW, "Exited\n");
+ mhi_log(mhi_dev_ctxt, MHI_MSG_RAW, "Exited\n");
}
void reset_bb_ctxt(struct mhi_device_ctxt *mhi_dev_ctxt,
@@ -684,7 +729,7 @@ void reset_bb_ctxt(struct mhi_device_ctxt *mhi_dev_ctxt,
int r = 0;
struct mhi_buf_info *bb = NULL;
- mhi_log(MHI_MSG_VERBOSE, "Entered\n");
+ mhi_log(mhi_dev_ctxt, MHI_MSG_VERBOSE, "Entered\n");
/*
Assumption: No events are expected during or after
this operation is occurring for this channel.
@@ -703,11 +748,11 @@ void reset_bb_ctxt(struct mhi_device_ctxt *mhi_dev_ctxt,
bb_ctxt->ack_rp = bb_ctxt->base;
bb_ctxt->rp = bb_ctxt->base;
bb_ctxt->wp = bb_ctxt->base;
- mhi_log(MHI_MSG_VERBOSE, "Exited\n");
+ mhi_log(mhi_dev_ctxt, MHI_MSG_VERBOSE, "Exited\n");
}
static int mhi_queue_dma_xfer(
- struct mhi_client_handle *client_handle,
+ struct mhi_client_config *client_config,
dma_addr_t buf, size_t buf_len, enum MHI_FLAGS mhi_flags)
{
union mhi_xfer_pkt *pkt_loc;
@@ -715,17 +760,17 @@ static int mhi_queue_dma_xfer(
enum MHI_CLIENT_CHANNEL chan;
struct mhi_device_ctxt *mhi_dev_ctxt;
- mhi_dev_ctxt = client_handle->mhi_dev_ctxt;
+ mhi_dev_ctxt = client_config->mhi_dev_ctxt;
MHI_ASSERT(VALID_BUF(buf, buf_len, mhi_dev_ctxt),
"Client buffer is of invalid length\n");
- chan = client_handle->chan_info.chan_nr;
+ chan = client_config->chan_info.chan_nr;
pkt_loc = mhi_dev_ctxt->mhi_local_chan_ctxt[chan].wp;
pkt_loc->data_tx_pkt.buffer_ptr = buf;
pkt_loc->type.info = mhi_flags;
trace_mhi_tre(pkt_loc, chan, 0);
- if (likely(0 != client_handle->intmod_t))
+ if (likely(client_config->intmod_t))
MHI_TRB_SET_INFO(TX_TRB_BEI, pkt_loc, 1);
else
MHI_TRB_SET_INFO(TX_TRB_BEI, pkt_loc, 0);
@@ -736,21 +781,21 @@ static int mhi_queue_dma_xfer(
/* Ensure writes to descriptor are flushed */
wmb();
- mhi_log(MHI_MSG_VERBOSE,
+ mhi_log(mhi_dev_ctxt, MHI_MSG_VERBOSE,
"Channel %d Has buf size of %zd and buf addr %lx, flags 0x%x\n",
- chan, buf_len, (uintptr_t)buf, mhi_flags);
+ chan, buf_len, (uintptr_t)buf, mhi_flags);
/* Add the TRB to the correct transfer ring */
ret_val = ctxt_add_element(&mhi_dev_ctxt->mhi_local_chan_ctxt[chan],
(void *)&pkt_loc);
if (unlikely(0 != ret_val)) {
- mhi_log(MHI_MSG_VERBOSE,
+ mhi_log(mhi_dev_ctxt, MHI_MSG_VERBOSE,
"Failed to insert trb in xfer ring\n");
return ret_val;
}
if (MHI_OUT ==
- GET_CHAN_PROPS(CHAN_DIR, client_handle->chan_info.flags))
+ GET_CHAN_PROPS(CHAN_DIR, client_config->chan_info.flags))
atomic_inc(&mhi_dev_ctxt->counters.outbound_acks);
return ret_val;
@@ -765,56 +810,55 @@ int mhi_queue_xfer(struct mhi_client_handle *client_handle,
struct mhi_device_ctxt *mhi_dev_ctxt;
u32 chan;
unsigned long flags;
+ struct mhi_client_config *client_config;
if (!client_handle || !buf || !buf_len)
return -EINVAL;
- mhi_dev_ctxt = client_handle->mhi_dev_ctxt;
- chan = client_handle->chan_info.chan_nr;
+ client_config = client_handle->client_config;
+ mhi_dev_ctxt = client_config->mhi_dev_ctxt;
+ chan = client_config->chan_info.chan_nr;
read_lock_irqsave(&mhi_dev_ctxt->pm_xfer_lock, flags);
if (mhi_dev_ctxt->mhi_pm_state == MHI_PM_DISABLE) {
read_unlock_irqrestore(&mhi_dev_ctxt->pm_xfer_lock, flags);
- mhi_log(MHI_MSG_ERROR,
+ mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
"MHI is not in active state\n");
return -EINVAL;
}
-
- pm_runtime_get(&mhi_dev_ctxt->dev_info->pcie_device->dev);
- mhi_assert_device_wake(mhi_dev_ctxt, false);
+ mhi_dev_ctxt->runtime_get(mhi_dev_ctxt);
+ mhi_dev_ctxt->assert_wake(mhi_dev_ctxt, false);
read_unlock_irqrestore(&mhi_dev_ctxt->pm_xfer_lock, flags);
- if (MHI_OUT == GET_CHAN_PROPS(CHAN_DIR, client_handle->chan_info.flags))
+ if (GET_CHAN_PROPS(CHAN_DIR, client_config->chan_info.flags) == MHI_OUT)
dma_dir = DMA_TO_DEVICE;
else
dma_dir = DMA_FROM_DEVICE;
- r = create_bb(client_handle->mhi_dev_ctxt,
- client_handle->chan_info.chan_nr,
- buf, buf_len, dma_dir, &bb);
+ r = create_bb(client_config->mhi_dev_ctxt,
+ client_config->chan_info.chan_nr,
+ buf,
+ buf_len,
+ dma_dir,
+ &bb);
if (r) {
- mhi_log(MHI_MSG_VERBOSE,
- "Failed to create BB, chan %d ret %d\n",
- chan,
- r);
- pm_runtime_mark_last_busy(&mhi_dev_ctxt->
- dev_info->pcie_device->dev);
- pm_runtime_put_noidle(&mhi_dev_ctxt->dev_info->
- pcie_device->dev);
+ mhi_log(mhi_dev_ctxt, MHI_MSG_VERBOSE,
+ "Failed to create BB, chan %d ret %d\n", chan, r);
+ mhi_dev_ctxt->runtime_put(mhi_dev_ctxt);
read_lock_irqsave(&mhi_dev_ctxt->pm_xfer_lock, flags);
- mhi_deassert_device_wake(mhi_dev_ctxt);
+ mhi_dev_ctxt->deassert_wake(mhi_dev_ctxt);
read_unlock_irqrestore(&mhi_dev_ctxt->pm_xfer_lock, flags);
return r;
}
- mhi_log(MHI_MSG_VERBOSE,
- "Queueing to HW: Client Buf 0x%p, size 0x%zx, DMA %llx, chan %d\n",
- buf, buf_len, (u64)bb->bb_p_addr,
- client_handle->chan_info.chan_nr);
- r = mhi_queue_dma_xfer(client_handle,
- bb->bb_p_addr,
- bb->buf_len,
- mhi_flags);
+ mhi_log(mhi_dev_ctxt, MHI_MSG_VERBOSE,
+ "Queueing to HW: Client Buf 0x%p, size 0x%zx, DMA %llx, chan %d\n",
+ buf, buf_len, (u64)bb->bb_p_addr,
+ client_config->chan_info.chan_nr);
+ r = mhi_queue_dma_xfer(client_config,
+ bb->bb_p_addr,
+ bb->buf_len,
+ mhi_flags);
/*
* Assumption: If create_bounce_buffer did not fail, we do not
@@ -826,11 +870,8 @@ int mhi_queue_xfer(struct mhi_client_handle *client_handle,
read_lock_irqsave(&mhi_dev_ctxt->pm_xfer_lock, flags);
mhi_queue_tre(mhi_dev_ctxt, chan, MHI_RING_TYPE_XFER_RING);
if (dma_dir == DMA_FROM_DEVICE) {
- pm_runtime_mark_last_busy(&mhi_dev_ctxt->
- dev_info->pcie_device->dev);
- pm_runtime_put_noidle(&mhi_dev_ctxt->
- dev_info->pcie_device->dev);
- mhi_deassert_device_wake(mhi_dev_ctxt);
+ mhi_dev_ctxt->runtime_put(mhi_dev_ctxt);
+ mhi_dev_ctxt->deassert_wake(mhi_dev_ctxt);
}
read_unlock_irqrestore(&mhi_dev_ctxt->pm_xfer_lock, flags);
return 0;
@@ -848,13 +889,12 @@ int mhi_send_cmd(struct mhi_device_ctxt *mhi_dev_ctxt,
mhi_local_cmd_ctxt[PRIMARY_CMD_RING];
if (chan >= MHI_MAX_CHANNELS || cmd >= MHI_COMMAND_MAX_NR) {
- mhi_log(MHI_MSG_ERROR,
- "Invalid channel id, received id: 0x%x",
- chan);
+ mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
+ "Invalid channel id, received id: 0x%x", chan);
return -EINVAL;
}
- mhi_log(MHI_MSG_INFO,
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
"Entered, MHI state %s dev_exec_env %d chan %d cmd %d\n",
TO_MHI_STATE_STR(mhi_dev_ctxt->mhi_state),
mhi_dev_ctxt->dev_exec_env, chan, cmd);
@@ -868,14 +908,16 @@ int mhi_send_cmd(struct mhi_device_ctxt *mhi_dev_ctxt,
ring_el_type = MHI_PKT_TYPE_START_CHAN_CMD;
break;
default:
- mhi_log(MHI_MSG_ERROR, "Bad command received\n");
+ mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
+ "Bad command received\n");
return -EINVAL;
}
spin_lock_irqsave(&mhi_ring->ring_lock, flags);
ret_val = ctxt_add_element(mhi_ring, (void *)&cmd_pkt);
if (ret_val) {
- mhi_log(MHI_MSG_ERROR, "Failed to insert element\n");
+ mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
+ "Failed to insert element\n");
spin_unlock_irqrestore(&mhi_ring->ring_lock, flags);
return ret_val;
}
@@ -886,13 +928,10 @@ int mhi_send_cmd(struct mhi_device_ctxt *mhi_dev_ctxt,
mhi_queue_tre(mhi_dev_ctxt, 0, MHI_RING_TYPE_CMD_RING);
read_unlock_irqrestore(&mhi_dev_ctxt->pm_xfer_lock, flags2);
spin_unlock_irqrestore(&mhi_ring->ring_lock, flags);
+ mhi_log(mhi_dev_ctxt, MHI_MSG_VERBOSE,
+ "Sent command 0x%x for chan %d\n", cmd, chan);
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO, "Exited ret %d.\n", ret_val);
- mhi_log(MHI_MSG_VERBOSE,
- "Sent command 0x%x for chan %d\n",
- cmd,
- chan);
-
- mhi_log(MHI_MSG_INFO, "Exited ret %d.\n", ret_val);
return ret_val;
}
@@ -904,7 +943,7 @@ static void parse_inbound_bb(struct mhi_device_ctxt *mhi_dev_ctxt,
struct mhi_buf_info *bb;
- mhi_log(MHI_MSG_INFO, "Entered\n");
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO, "Entered\n");
bb = bb_ctxt->rp;
bb->filled_size = bounced_data_size;
@@ -915,7 +954,7 @@ static void parse_inbound_bb(struct mhi_device_ctxt *mhi_dev_ctxt,
if (bb->bb_active) {
/* This is coherent memory, no cache management is needed */
memcpy(bb->client_buf, bb->bb_v_addr, bb->filled_size);
- mhi_log(MHI_MSG_RAW,
+ mhi_log(mhi_dev_ctxt, MHI_MSG_RAW,
"Bounce from BB:0x%p to Client Buf: 0x%p Len 0x%zx\n",
bb->client_buf, bb->bb_v_addr, bb->filled_size);
}
@@ -929,7 +968,7 @@ static void parse_inbound_bb(struct mhi_device_ctxt *mhi_dev_ctxt,
* rp, since it can be moved async by mhi_poll_inbound
*/
free_bounce_buffer(mhi_dev_ctxt, bb);
- mhi_log(MHI_MSG_INFO, "Exited\n");
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO, "Exited\n");
}
static void parse_outbound_bb(struct mhi_device_ctxt *mhi_dev_ctxt,
@@ -940,7 +979,7 @@ static void parse_outbound_bb(struct mhi_device_ctxt *mhi_dev_ctxt,
struct mhi_buf_info *bb;
bb = bb_ctxt->rp;
- mhi_log(MHI_MSG_RAW, "Entered\n");
+ mhi_log(mhi_dev_ctxt, MHI_MSG_RAW, "Entered\n");
BUG_ON(bb->dir != DMA_TO_DEVICE);
bb->filled_size = bounced_data_size;
BUG_ON(bb->filled_size != bb->buf_len);
@@ -948,7 +987,7 @@ static void parse_outbound_bb(struct mhi_device_ctxt *mhi_dev_ctxt,
result->bytes_xferd = bb->filled_size;
result->transaction_status = 0;
free_bounce_buffer(mhi_dev_ctxt, bb);
- mhi_log(MHI_MSG_RAW, "Exited\n");
+ mhi_log(mhi_dev_ctxt, MHI_MSG_RAW, "Exited\n");
}
static int parse_outbound(struct mhi_device_ctxt *mhi_dev_ctxt,
@@ -957,42 +996,44 @@ static int parse_outbound(struct mhi_device_ctxt *mhi_dev_ctxt,
struct mhi_result *result = NULL;
int ret_val = 0;
struct mhi_client_handle *client_handle = NULL;
+ struct mhi_client_config *client_config;
struct mhi_ring *local_chan_ctxt = NULL;
struct mhi_cb_info cb_info;
struct mhi_ring *bb_ctxt = &mhi_dev_ctxt->chan_bb_list[chan];
local_chan_ctxt = &mhi_dev_ctxt->mhi_local_chan_ctxt[chan];
client_handle = mhi_dev_ctxt->client_handle_list[chan];
+ client_config = client_handle->client_config;
/* If ring is empty */
MHI_ASSERT(!unlikely(mhi_dev_ctxt->mhi_local_chan_ctxt[chan].rp ==
mhi_dev_ctxt->mhi_local_chan_ctxt[chan].wp), "Empty Event Ring\n");
parse_outbound_bb(mhi_dev_ctxt, bb_ctxt,
- &client_handle->result, xfer_len);
+ &client_config->result, xfer_len);
- mhi_log(MHI_MSG_RAW, "Removing BB from head, chan %d\n", chan);
+ mhi_log(mhi_dev_ctxt, MHI_MSG_RAW,
+ "Removing BB from head, chan %d\n", chan);
atomic_dec(&mhi_dev_ctxt->counters.outbound_acks);
- mhi_deassert_device_wake(mhi_dev_ctxt);
- pm_runtime_put_noidle(&mhi_dev_ctxt->dev_info->pcie_device->dev);
- pm_runtime_mark_last_busy(&mhi_dev_ctxt->dev_info->pcie_device->dev);
+ mhi_dev_ctxt->deassert_wake(mhi_dev_ctxt);
+ mhi_dev_ctxt->runtime_put(mhi_dev_ctxt);
ret_val = ctxt_del_element(&mhi_dev_ctxt->mhi_local_chan_ctxt[chan],
NULL);
BUG_ON(ret_val);
ret_val = ctxt_del_element(bb_ctxt, NULL);
BUG_ON(ret_val);
- if (NULL != client_handle) {
- result = &mhi_dev_ctxt->client_handle_list[chan]->result;
- if (NULL != (&client_handle->client_info.mhi_client_cb)) {
- client_handle->result.user_data =
- client_handle->user_data;
- cb_info.cb_reason = MHI_CB_XFER;
- cb_info.result = &client_handle->result;
- cb_info.chan = chan;
- client_handle->client_info.mhi_client_cb(&cb_info);
- }
+
+ result = &client_config->result;
+ if (NULL != (&client_config->client_info.mhi_client_cb)) {
+ client_config->result.user_data =
+ client_config->user_data;
+ cb_info.cb_reason = MHI_CB_XFER;
+ cb_info.result = result;
+ cb_info.chan = chan;
+ client_config->client_info.mhi_client_cb(&cb_info);
}
- mhi_log(MHI_MSG_RAW,
+
+ mhi_log(mhi_dev_ctxt, MHI_MSG_RAW,
"Processed outbound ack chan %d Pending acks %d.\n",
chan, atomic_read(&mhi_dev_ctxt->counters.outbound_acks));
return 0;
@@ -1002,6 +1043,7 @@ static int parse_inbound(struct mhi_device_ctxt *mhi_dev_ctxt,
u32 chan, union mhi_xfer_pkt *local_ev_trb_loc, u16 xfer_len)
{
struct mhi_client_handle *client_handle;
+ struct mhi_client_config *client_config;
struct mhi_ring *local_chan_ctxt;
struct mhi_result *result;
struct mhi_cb_info cb_info;
@@ -1010,16 +1052,14 @@ static int parse_inbound(struct mhi_device_ctxt *mhi_dev_ctxt,
uintptr_t bb_index, ctxt_index_rp, ctxt_index_wp;
client_handle = mhi_dev_ctxt->client_handle_list[chan];
+ client_config = client_handle->client_config;
local_chan_ctxt = &mhi_dev_ctxt->mhi_local_chan_ctxt[chan];
MHI_ASSERT(!unlikely(mhi_dev_ctxt->mhi_local_chan_ctxt[chan].rp ==
mhi_dev_ctxt->mhi_local_chan_ctxt[chan].wp), "Empty Event Ring\n");
- if (NULL != mhi_dev_ctxt->client_handle_list[chan])
- result = &mhi_dev_ctxt->client_handle_list[chan]->result;
-
- parse_inbound_bb(mhi_dev_ctxt, bb_ctxt,
- &client_handle->result, xfer_len);
+ result = &client_config->result;
+ parse_inbound_bb(mhi_dev_ctxt, bb_ctxt, result, xfer_len);
if (unlikely(IS_SOFTWARE_CHANNEL(chan))) {
MHI_TX_TRB_SET_LEN(TX_TRB_LEN, local_ev_trb_loc, xfer_len);
@@ -1034,19 +1074,19 @@ static int parse_inbound(struct mhi_device_ctxt *mhi_dev_ctxt,
get_element_index(&mhi_dev_ctxt->mhi_local_chan_ctxt[chan],
mhi_dev_ctxt->mhi_local_chan_ctxt[chan].wp,
&ctxt_index_wp);
- mhi_log(MHI_MSG_VERBOSE,
+ mhi_log(mhi_dev_ctxt, MHI_MSG_VERBOSE,
"Chan RP index %ld Chan WP index %ld chan %d\n",
ctxt_index_rp, ctxt_index_wp, chan);
BUG_ON(bb_index != ctxt_index_rp);
- if (NULL != client_handle->client_info.mhi_client_cb) {
- client_handle->result.user_data =
- client_handle->user_data;
+ if (client_config->client_info.mhi_client_cb) {
+ client_config->result.user_data =
+ client_config->user_data;
cb_info.cb_reason = MHI_CB_XFER;
- cb_info.result = &client_handle->result;
+ cb_info.result = &client_config->result;
cb_info.chan = chan;
- client_handle->client_info.mhi_client_cb(&cb_info);
+ client_config->client_info.mhi_client_cb(&cb_info);
} else {
- mhi_log(MHI_MSG_VERBOSE,
+ mhi_log(mhi_dev_ctxt, MHI_MSG_VERBOSE,
"No client registered chan %d\n", chan);
}
} else {
@@ -1067,7 +1107,7 @@ static int parse_inbound(struct mhi_device_ctxt *mhi_dev_ctxt,
&mhi_dev_ctxt->mhi_local_chan_ctxt[chan],
mhi_dev_ctxt->mhi_local_chan_ctxt[chan].wp,
&ctxt_index_wp);
- mhi_log(MHI_MSG_VERBOSE,
+ mhi_log(mhi_dev_ctxt, MHI_MSG_VERBOSE,
"Chan RP index %ld Chan WP index %ld chan %d\n",
ctxt_index_rp, ctxt_index_wp, chan);
BUG_ON(bb_index != ctxt_index_rp);
@@ -1088,27 +1128,32 @@ static int validate_xfer_el_addr(struct mhi_chan_ctxt *ring,
-ERANGE : 0;
}
-static void print_tre(int chan, struct mhi_ring *ring, struct mhi_tx_pkt *tre)
+static void print_tre(struct mhi_device_ctxt *mhi_dev_ctxt,
+ int chan,
+ struct mhi_ring *ring,
+ struct mhi_tx_pkt *tre)
{
uintptr_t el_index;
get_element_index(ring, tre, &el_index);
- mhi_log(MHI_MSG_ERROR, "Printing TRE 0x%p index %lx for channel %d:\n",
- tre, el_index, chan);
- mhi_log(MHI_MSG_ERROR, "Buffer Pointer 0x%llx, len 0x%x, info 0x%x\n",
- tre->buffer_ptr, tre->buf_len, tre->info);
+ mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
+ "Printing TRE 0x%p index %lx for channel %d:\n",
+ tre, el_index, chan);
+ mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
+ "Buffer Pointer 0x%llx, len 0x%x, info 0x%x\n",
+ tre->buffer_ptr, tre->buf_len, tre->info);
}
-int parse_xfer_event(struct mhi_device_ctxt *ctxt,
+int parse_xfer_event(struct mhi_device_ctxt *mhi_dev_ctxt,
union mhi_event_pkt *event, u32 event_id)
{
- struct mhi_device_ctxt *mhi_dev_ctxt = (struct mhi_device_ctxt *)ctxt;
struct mhi_result *result;
u32 chan = MHI_MAX_CHANNELS;
u16 xfer_len;
uintptr_t phy_ev_trb_loc;
union mhi_xfer_pkt *local_ev_trb_loc;
struct mhi_client_handle *client_handle;
+ struct mhi_client_config *client_config;
union mhi_xfer_pkt *local_trb_loc;
struct mhi_chan_ctxt *chan_ctxt;
u32 nr_trb_to_parse;
@@ -1121,11 +1166,11 @@ int parse_xfer_event(struct mhi_device_ctxt *ctxt,
local_chan_ctxt = &mhi_dev_ctxt->mhi_local_chan_ctxt[chan];
ev_code = MHI_EV_READ_CODE(EV_TRB_CODE, event);
client_handle = mhi_dev_ctxt->client_handle_list[chan];
- client_handle->pkt_count++;
- result = &client_handle->result;
- mhi_log(MHI_MSG_VERBOSE,
- "Event Received, chan %d, cc_code %d\n",
- chan, ev_code);
+ client_config = client_handle->client_config;
+ client_config->pkt_count++;
+ result = &client_config->result;
+ mhi_log(mhi_dev_ctxt, MHI_MSG_VERBOSE,
+ "Event Received, chan %d, cc_code %d\n", chan, ev_code);
if (ev_code == MHI_EVENT_CC_OVERFLOW)
result->transaction_status = -EOVERFLOW;
else
@@ -1158,10 +1203,9 @@ int parse_xfer_event(struct mhi_device_ctxt *ctxt,
local_ev_trb_loc,
&nr_trb_to_parse);
if (unlikely(ret_val)) {
- mhi_log(MHI_MSG_CRITICAL,
+ mhi_log(mhi_dev_ctxt, MHI_MSG_CRITICAL,
"Failed to get nr available trbs ret: %d.\n",
ret_val);
- panic("critical error");
return ret_val;
}
do {
@@ -1177,9 +1221,8 @@ int parse_xfer_event(struct mhi_device_ctxt *ctxt,
local_trb_loc);
if (!VALID_BUF(trb_data_loc, xfer_len, mhi_dev_ctxt)) {
- mhi_log(MHI_MSG_CRITICAL,
- "Bad buffer ptr: %lx.\n",
- (uintptr_t)trb_data_loc);
+ mhi_log(mhi_dev_ctxt, MHI_MSG_CRITICAL,
+ "Bad buf ptr: %llx.\n", trb_data_loc);
return -EINVAL;
}
if (local_chan_ctxt->dir == MHI_IN) {
@@ -1192,7 +1235,7 @@ int parse_xfer_event(struct mhi_device_ctxt *ctxt,
mhi_dev_ctxt->counters.chan_pkts_xferd[chan]++;
if (local_trb_loc ==
(union mhi_xfer_pkt *)local_chan_ctxt->rp) {
- mhi_log(MHI_MSG_CRITICAL,
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
"Done. Processed until: %lx.\n",
(uintptr_t)trb_data_loc);
break;
@@ -1208,7 +1251,8 @@ int parse_xfer_event(struct mhi_device_ctxt *ctxt,
{
u64 db_value = 0;
- mhi_log(MHI_MSG_INFO, "DB_MODE/OOB Detected chan %d.\n", chan);
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "DB_MODE/OOB Detected chan %d.\n", chan);
local_chan_ctxt->db_mode.db_mode = 1;
if (local_chan_ctxt->wp != local_chan_ctxt->rp) {
@@ -1219,9 +1263,6 @@ int parse_xfer_event(struct mhi_device_ctxt *ctxt,
mhi_dev_ctxt->mmio_info.chan_db_addr, chan,
db_value);
}
- client_handle = mhi_dev_ctxt->client_handle_list[chan];
- if (client_handle)
- result->transaction_status = -ENOTCONN;
break;
}
case MHI_EVENT_CC_BAD_TRE:
@@ -1229,15 +1270,16 @@ int parse_xfer_event(struct mhi_device_ctxt *ctxt,
local_ev_trb_loc = (void *)mhi_p2v_addr(mhi_dev_ctxt,
MHI_RING_TYPE_EVENT_RING, event_id,
phy_ev_trb_loc);
- mhi_log(MHI_MSG_ERROR,
+ mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
"Received BAD TRE event for ring %d, pointer 0x%p\n",
chan, local_ev_trb_loc);
- print_tre(chan, &mhi_dev_ctxt->mhi_local_chan_ctxt[chan],
+ print_tre(mhi_dev_ctxt, chan,
+ &mhi_dev_ctxt->mhi_local_chan_ctxt[chan],
(struct mhi_tx_pkt *)local_ev_trb_loc);
BUG();
break;
default:
- mhi_log(MHI_MSG_ERROR,
+ mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
"Unknown TX completion.\n");
break;
@@ -1261,12 +1303,14 @@ int recycle_trb_and_ring(struct mhi_device_ctxt *mhi_dev_ctxt,
ret_val = ctxt_del_element(ring, &removed_element);
if (ret_val) {
- mhi_log(MHI_MSG_ERROR, "Could not remove element from ring\n");
+ mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
+ "Could not remove element from ring\n");
return ret_val;
}
ret_val = ctxt_add_element(ring, &added_element);
if (ret_val) {
- mhi_log(MHI_MSG_ERROR, "Could not add element to ring\n");
+ mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
+ "Could not add element to ring\n");
return ret_val;
}
@@ -1296,7 +1340,6 @@ static int reset_chan_cmd(struct mhi_device_ctxt *mhi_dev_ctxt,
struct mhi_ring *ev_ring;
struct mhi_chan_ctxt *chan_ctxt;
struct mhi_event_ctxt *ev_ctxt = NULL;
- struct mhi_client_handle *client_handle = NULL;
int pending_el = 0, i;
struct mhi_ring *bb_ctxt;
unsigned long flags;
@@ -1306,20 +1349,21 @@ static int reset_chan_cmd(struct mhi_device_ctxt *mhi_dev_ctxt,
MHI_TRB_GET_INFO(CMD_TRB_CHID, cmd_pkt, chan);
if (!VALID_CHAN_NR(chan)) {
- mhi_log(MHI_MSG_ERROR,
+ mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
"Bad channel number for CCE\n");
return -EINVAL;
}
bb_ctxt = &mhi_dev_ctxt->chan_bb_list[chan];
- client_handle = mhi_dev_ctxt->client_handle_list[chan];
+
local_chan_ctxt = &mhi_dev_ctxt->mhi_local_chan_ctxt[chan];
chan_ctxt = &mhi_dev_ctxt->dev_space.ring_ctxt.cc_list[chan];
ev_ring = &mhi_dev_ctxt->
mhi_local_event_ctxt[chan_ctxt->mhi_event_ring_index];
ev_ctxt = &mhi_dev_ctxt->
dev_space.ring_ctxt.ec_list[chan_ctxt->mhi_event_ring_index];
- mhi_log(MHI_MSG_INFO, "Processed cmd reset event\n");
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "Processed cmd reset event\n");
/* Clear all stale events related to Channel */
spin_lock_irqsave(&ev_ring->ring_lock, flags);
@@ -1357,21 +1401,18 @@ static int reset_chan_cmd(struct mhi_device_ctxt *mhi_dev_ctxt,
local_chan_ctxt->rp,
local_chan_ctxt->wp,
&pending_el);
- mhi_log(MHI_MSG_INFO, "Decrementing chan %d out acks by %d.\n",
- chan, pending_el);
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "Decrementing chan %d out acks by %d.\n", chan, pending_el);
atomic_sub(pending_el, &mhi_dev_ctxt->counters.outbound_acks);
read_lock_bh(&mhi_dev_ctxt->pm_xfer_lock);
for (i = 0; i < pending_el; i++)
- mhi_deassert_device_wake(mhi_dev_ctxt);
+ mhi_dev_ctxt->deassert_wake(mhi_dev_ctxt);
read_unlock_bh(&mhi_dev_ctxt->pm_xfer_lock);
for (i = 0; i < pending_el; i++) {
- pm_runtime_put_noidle(&mhi_dev_ctxt->
- dev_info->pcie_device->dev);
- pm_runtime_mark_last_busy(&mhi_dev_ctxt->
- dev_info->pcie_device->dev);
+ mhi_dev_ctxt->runtime_put(mhi_dev_ctxt);
}
/* Reset the local channel context */
@@ -1384,10 +1425,10 @@ static int reset_chan_cmd(struct mhi_device_ctxt *mhi_dev_ctxt,
chan_ctxt->mhi_trb_read_ptr = chan_ctxt->mhi_trb_ring_base_addr;
chan_ctxt->mhi_trb_write_ptr = chan_ctxt->mhi_trb_ring_base_addr;
- mhi_log(MHI_MSG_INFO, "Cleaning up BB list\n");
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO, "Cleaning up BB list\n");
reset_bb_ctxt(mhi_dev_ctxt, bb_ctxt);
- mhi_log(MHI_MSG_INFO, "Reset complete.\n");
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO, "Reset complete.\n");
return ret_val;
}
@@ -1418,15 +1459,17 @@ int mhi_poll_inbound(struct mhi_client_handle *client_handle,
struct mhi_chan_cfg *cfg;
struct mhi_ring *bb_ctxt = NULL;
struct mhi_buf_info *bb = NULL;
+ struct mhi_client_config *client_config;
int chan = 0, r = 0;
- if (!client_handle || !result || !client_handle->mhi_dev_ctxt)
+ if (!client_handle || !result)
return -EINVAL;
+ client_config = client_handle->client_config;
+ mhi_dev_ctxt = client_config->mhi_dev_ctxt;
- mhi_log(MHI_MSG_VERBOSE, "Entered\n");
+ mhi_log(mhi_dev_ctxt, MHI_MSG_VERBOSE, "Entered\n");
- mhi_dev_ctxt = client_handle->mhi_dev_ctxt;
- chan = client_handle->chan_info.chan_nr;
+ chan = client_config->chan_info.chan_nr;
local_chan_ctxt = &mhi_dev_ctxt->mhi_local_chan_ctxt[chan];
cfg = &mhi_dev_ctxt->mhi_chan_cfg[chan];
bb_ctxt = &mhi_dev_ctxt->chan_bb_list[chan];
@@ -1437,7 +1480,7 @@ int mhi_poll_inbound(struct mhi_client_handle *client_handle,
result->flags = pending_trb->info;
bb = bb_ctxt->ack_rp;
if (bb->bb_active) {
- mhi_log(MHI_MSG_VERBOSE,
+ mhi_log(mhi_dev_ctxt, MHI_MSG_VERBOSE,
"Bounce buffer active chan %d, copying data\n",
chan);
}
@@ -1458,7 +1501,7 @@ int mhi_poll_inbound(struct mhi_client_handle *client_handle,
r = -ENODATA;
}
mutex_unlock(&cfg->chan_lock);
- mhi_log(MHI_MSG_VERBOSE,
+ mhi_log(mhi_dev_ctxt, MHI_MSG_VERBOSE,
"Exited Result: Buf addr: 0x%p Bytes xfed 0x%zx chan %d\n",
result->buf_addr, result->bytes_xferd, chan);
return r;
@@ -1488,11 +1531,11 @@ int mhi_wait_for_mdm(struct mhi_device_ctxt *mhi_dev_ctxt)
while (mhi_reg_read(mhi_dev_ctxt->mmio_info.mmio_addr, MHIREGLEN)
== 0xFFFFFFFF
&& j <= MHI_MAX_LINK_RETRIES) {
- mhi_log(MHI_MSG_CRITICAL,
- "Could not access device retry %d\n", j);
+ mhi_log(mhi_dev_ctxt, MHI_MSG_CRITICAL,
+ "Could not access device retry %d\n", j);
msleep(MHI_LINK_STABILITY_WAIT_MS);
if (MHI_MAX_LINK_RETRIES == j) {
- mhi_log(MHI_MSG_CRITICAL,
+ mhi_log(mhi_dev_ctxt, MHI_MSG_CRITICAL,
"Could not access device, FAILING!\n");
return -ETIME;
}
@@ -1503,9 +1546,12 @@ int mhi_wait_for_mdm(struct mhi_device_ctxt *mhi_dev_ctxt)
int mhi_get_max_desc(struct mhi_client_handle *client_handle)
{
+ struct mhi_client_config *client_config;
+
if (!client_handle)
return -EINVAL;
- return client_handle->chan_info.max_desc - 1;
+ client_config = client_handle->client_config;
+ return client_config->chan_info.max_desc - 1;
}
EXPORT_SYMBOL(mhi_get_max_desc);
@@ -1514,6 +1560,17 @@ int mhi_get_epid(struct mhi_client_handle *client_handle)
return MHI_EPID;
}
+void mhi_runtime_get(struct mhi_device_ctxt *mhi_dev_ctxt)
+{
+ pm_runtime_get(&mhi_dev_ctxt->pcie_device->dev);
+}
+
+void mhi_runtime_put(struct mhi_device_ctxt *mhi_dev_ctxt)
+{
+ pm_runtime_mark_last_busy(&mhi_dev_ctxt->pcie_device->dev);
+ pm_runtime_put_noidle(&mhi_dev_ctxt->pcie_device->dev);
+}
+
/*
* mhi_assert_device_wake - Set WAKE_DB register
* force_set - if true, will set bit regardless of counts
@@ -1572,16 +1629,17 @@ void mhi_deassert_device_wake(struct mhi_device_ctxt *mhi_dev_ctxt)
int mhi_set_lpm(struct mhi_client_handle *client_handle, bool enable_lpm)
{
- struct mhi_device_ctxt *mhi_dev_ctxt = client_handle->mhi_dev_ctxt;
+ struct mhi_client_config *client_config = client_handle->client_config;
+ struct mhi_device_ctxt *mhi_dev_ctxt = client_config->mhi_dev_ctxt;
unsigned long flags;
read_lock_irqsave(&mhi_dev_ctxt->pm_xfer_lock, flags);
/* Disable low power mode by asserting Wake */
if (enable_lpm == false)
- mhi_assert_device_wake(mhi_dev_ctxt, false);
+ mhi_dev_ctxt->assert_wake(mhi_dev_ctxt, false);
else
- mhi_deassert_device_wake(mhi_dev_ctxt);
+ mhi_dev_ctxt->deassert_wake(mhi_dev_ctxt);
read_unlock_irqrestore(&mhi_dev_ctxt->pm_xfer_lock, flags);
@@ -1592,21 +1650,26 @@ EXPORT_SYMBOL(mhi_set_lpm);
int mhi_set_bus_request(struct mhi_device_ctxt *mhi_dev_ctxt,
int index)
{
- mhi_log(MHI_MSG_INFO, "Setting bus request to index %d\n", index);
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "Setting bus request to index %d\n", index);
return msm_bus_scale_client_update_request(mhi_dev_ctxt->bus_client,
index);
}
-int mhi_deregister_channel(struct mhi_client_handle
- *client_handle) {
+int mhi_deregister_channel(struct mhi_client_handle *client_handle)
+{
int ret_val = 0;
int chan;
+ struct mhi_client_config *client_config;
- if (!client_handle || client_handle->magic != MHI_HANDLE_MAGIC)
+ if (!client_handle)
return -EINVAL;
- chan = client_handle->chan_info.chan_nr;
- client_handle->magic = 0;
- client_handle->mhi_dev_ctxt->client_handle_list[chan] = NULL;
+
+ client_config = client_handle->client_config;
+ chan = client_config->chan_info.chan_nr;
+ client_config->magic = 0;
+ client_config->mhi_dev_ctxt->client_handle_list[chan] = NULL;
+ kfree(client_config);
kfree(client_handle);
return ret_val;
}
@@ -1627,9 +1690,9 @@ void mhi_process_db_brstmode(struct mhi_device_ctxt *mhi_dev_ctxt,
ring_ctxt = &mhi_dev_ctxt->
mhi_local_event_ctxt[chan];
- mhi_log(MHI_MSG_VERBOSE,
- "db.set addr: %p io_offset 0x%lx val:0x%x\n",
- io_addr, chan, val);
+ mhi_log(mhi_dev_ctxt, MHI_MSG_VERBOSE,
+ "db.set addr: %p io_offset 0x%lx val:0x%x\n",
+ io_addr, chan, val);
mhi_update_ctxt(mhi_dev_ctxt, io_addr, chan, val);
@@ -1637,10 +1700,9 @@ void mhi_process_db_brstmode(struct mhi_device_ctxt *mhi_dev_ctxt,
mhi_write_db(mhi_dev_ctxt, io_addr, chan, val);
ring_ctxt->db_mode.db_mode = 0;
} else {
- mhi_log(MHI_MSG_INFO,
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
"Not ringing xfer db, chan %ld, brstmode %d db_mode %d\n",
- chan,
- ring_ctxt->db_mode.brstmode,
+ chan, ring_ctxt->db_mode.brstmode,
ring_ctxt->db_mode.db_mode);
}
}
@@ -1650,10 +1712,9 @@ void mhi_process_db_brstmode_disable(struct mhi_device_ctxt *mhi_dev_ctxt,
uintptr_t chan,
u32 val)
{
- mhi_log(MHI_MSG_VERBOSE,
- "db.set addr: %p io_offset 0x%lx val:0x%x\n",
- io_addr, chan, val);
-
+ mhi_log(mhi_dev_ctxt, MHI_MSG_VERBOSE,
+ "db.set addr: %p io_offset 0x%lx val:0x%x\n",
+ io_addr, chan, val);
mhi_update_ctxt(mhi_dev_ctxt, io_addr, chan, val);
mhi_write_db(mhi_dev_ctxt, io_addr, chan, val);
}
@@ -1663,9 +1724,9 @@ void mhi_process_db(struct mhi_device_ctxt *mhi_dev_ctxt,
uintptr_t chan, u32 val)
{
- mhi_log(MHI_MSG_VERBOSE,
- "db.set addr: %p io_offset 0x%lx val:0x%x\n",
- io_addr, chan, val);
+ mhi_log(mhi_dev_ctxt, MHI_MSG_VERBOSE,
+ "db.set addr: %p io_offset 0x%lx val:0x%x\n",
+ io_addr, chan, val);
mhi_update_ctxt(mhi_dev_ctxt, io_addr, chan, val);
@@ -1678,7 +1739,7 @@ void mhi_process_db(struct mhi_device_ctxt *mhi_dev_ctxt,
mhi_write_db(mhi_dev_ctxt, io_addr, chan, val);
chan_ctxt->db_mode.db_mode = 0;
} else {
- mhi_log(MHI_MSG_INFO,
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
"Not ringing xfer db, chan %ld, brstmode %d db_mode %d\n",
chan, chan_ctxt->db_mode.brstmode,
chan_ctxt->db_mode.db_mode);
@@ -1714,10 +1775,9 @@ void mhi_reg_write(struct mhi_device_ctxt *mhi_dev_ctxt,
void __iomem *io_addr,
uintptr_t io_offset, u32 val)
{
- mhi_log(MHI_MSG_RAW, "d.s 0x%p off: 0x%lx 0x%x\n",
- io_addr, io_offset, val);
+ mhi_log(mhi_dev_ctxt, MHI_MSG_RAW,
+ "d.s 0x%p off: 0x%lx 0x%x\n", io_addr, io_offset, val);
iowrite32(val, io_addr + io_offset);
-
/* Flush write to device */
wmb();
}
diff --git a/drivers/platform/msm/mhi/mhi_mmio_ops.c b/drivers/platform/msm/mhi/mhi_mmio_ops.c
index b4447378683e..a991a2e68b34 100644
--- a/drivers/platform/msm/mhi/mhi_mmio_ops.c
+++ b/drivers/platform/msm/mhi/mhi_mmio_ops.c
@@ -29,93 +29,79 @@
int mhi_test_for_device_reset(struct mhi_device_ctxt *mhi_dev_ctxt)
{
u32 pcie_word_val = 0;
- u32 expiry_counter;
unsigned long flags;
rwlock_t *pm_xfer_lock = &mhi_dev_ctxt->pm_xfer_lock;
+ unsigned long timeout;
- mhi_log(MHI_MSG_INFO, "Waiting for MMIO RESET bit to be cleared.\n");
- read_lock_irqsave(pm_xfer_lock, flags);
- if (!MHI_REG_ACCESS_VALID(mhi_dev_ctxt->mhi_pm_state)) {
- read_unlock_irqrestore(pm_xfer_lock, flags);
- return -EIO;
- }
- pcie_word_val = mhi_reg_read(mhi_dev_ctxt->mmio_info.mmio_addr,
- MHISTATUS);
- MHI_READ_FIELD(pcie_word_val,
- MHICTRL_RESET_MASK,
- MHICTRL_RESET_SHIFT);
- read_unlock_irqrestore(&mhi_dev_ctxt->pm_xfer_lock, flags);
- if (pcie_word_val == 0xFFFFFFFF)
- return -ENOTCONN;
-
- while (MHI_STATE_RESET != pcie_word_val && expiry_counter < 100) {
- expiry_counter++;
- mhi_log(MHI_MSG_ERROR,
- "Device is not RESET, sleeping and retrying.\n");
- msleep(MHI_READY_STATUS_TIMEOUT_MS);
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "Waiting for MMIO RESET bit to be cleared.\n");
+
+ timeout = jiffies +
+ msecs_to_jiffies(mhi_dev_ctxt->poll_reset_timeout_ms);
+ while (time_before(jiffies, timeout)) {
read_lock_irqsave(pm_xfer_lock, flags);
if (!MHI_REG_ACCESS_VALID(mhi_dev_ctxt->mhi_pm_state)) {
read_unlock_irqrestore(pm_xfer_lock, flags);
return -EIO;
}
pcie_word_val = mhi_reg_read(mhi_dev_ctxt->mmio_info.mmio_addr,
- MHICTRL);
+ MHICTRL);
+ read_unlock_irqrestore(&mhi_dev_ctxt->pm_xfer_lock, flags);
+ if (pcie_word_val == 0xFFFFFFFF)
+ return -ENOTCONN;
MHI_READ_FIELD(pcie_word_val,
- MHICTRL_RESET_MASK,
- MHICTRL_RESET_SHIFT);
- read_unlock_irqrestore(pm_xfer_lock, flags);
- }
+ MHICTRL_RESET_MASK,
+ MHICTRL_RESET_SHIFT);
- if (MHI_STATE_READY != pcie_word_val)
- return -ENOTCONN;
- return 0;
+ if (!pcie_word_val)
+ return 0;
+
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "MHI still in Reset sleeping\n");
+ msleep(MHI_THREAD_SLEEP_TIMEOUT_MS);
+ }
+ mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
+ "Timeout waiting for reset to be cleared\n");
+ return -ETIMEDOUT;
}
int mhi_test_for_device_ready(struct mhi_device_ctxt *mhi_dev_ctxt)
{
u32 pcie_word_val = 0;
- u32 expiry_counter;
unsigned long flags;
rwlock_t *pm_xfer_lock = &mhi_dev_ctxt->pm_xfer_lock;
+ unsigned long timeout;
- mhi_log(MHI_MSG_INFO, "Waiting for MMIO Ready bit to be set\n");
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "Waiting for MMIO Ready bit to be set\n");
- read_lock_irqsave(pm_xfer_lock, flags);
- if (!MHI_REG_ACCESS_VALID(mhi_dev_ctxt->mhi_pm_state)) {
- read_unlock_irqrestore(pm_xfer_lock, flags);
- return -EIO;
- }
- /* Read MMIO and poll for READY bit to be set */
- pcie_word_val = mhi_reg_read(
- mhi_dev_ctxt->mmio_info.mmio_addr, MHISTATUS);
- MHI_READ_FIELD(pcie_word_val,
- MHISTATUS_READY_MASK,
- MHISTATUS_READY_SHIFT);
- read_unlock_irqrestore(pm_xfer_lock, flags);
-
- if (pcie_word_val == 0xFFFFFFFF)
- return -ENOTCONN;
- expiry_counter = 0;
- while (MHI_STATE_READY != pcie_word_val && expiry_counter < 50) {
- expiry_counter++;
- mhi_log(MHI_MSG_ERROR,
- "Device is not ready, sleeping and retrying.\n");
- msleep(MHI_READY_STATUS_TIMEOUT_MS);
+ timeout = jiffies +
+ msecs_to_jiffies(mhi_dev_ctxt->poll_reset_timeout_ms);
+ while (time_before(jiffies, timeout)) {
+ /* Read MMIO and poll for READY bit to be set */
read_lock_irqsave(pm_xfer_lock, flags);
if (!MHI_REG_ACCESS_VALID(mhi_dev_ctxt->mhi_pm_state)) {
read_unlock_irqrestore(pm_xfer_lock, flags);
return -EIO;
}
+
pcie_word_val = mhi_reg_read(mhi_dev_ctxt->mmio_info.mmio_addr,
MHISTATUS);
- MHI_READ_FIELD(pcie_word_val,
- MHISTATUS_READY_MASK, MHISTATUS_READY_SHIFT);
read_unlock_irqrestore(pm_xfer_lock, flags);
+ if (pcie_word_val == 0xFFFFFFFF)
+ return -ENOTCONN;
+ MHI_READ_FIELD(pcie_word_val,
+ MHISTATUS_READY_MASK,
+ MHISTATUS_READY_SHIFT);
+ if (pcie_word_val == MHI_STATE_READY)
+ return 0;
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "Device is not ready, sleeping and retrying.\n");
+ msleep(MHI_THREAD_SLEEP_TIMEOUT_MS);
}
-
- if (pcie_word_val != MHI_STATE_READY)
- return -ETIMEDOUT;
- return 0;
+ mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
+ "Device timed out waiting for ready\n");
+ return -ETIMEDOUT;
}
int mhi_init_mmio(struct mhi_device_ctxt *mhi_dev_ctxt)
@@ -125,28 +111,26 @@ int mhi_init_mmio(struct mhi_device_ctxt *mhi_dev_ctxt)
u32 i = 0;
int ret_val;
- mhi_log(MHI_MSG_INFO, "~~~ Initializing MMIO ~~~\n");
- mhi_dev_ctxt->mmio_info.mmio_addr = mhi_dev_ctxt->dev_props->bar0_base;
-
- mhi_log(MHI_MSG_INFO, "Bar 0 address is at: 0x%p\n",
- mhi_dev_ctxt->mmio_info.mmio_addr);
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "~~~ Initializing MMIO ~~~\n");
+ mhi_dev_ctxt->mmio_info.mmio_addr = mhi_dev_ctxt->core.bar0_base;
mhi_dev_ctxt->mmio_info.mmio_len = mhi_reg_read(
mhi_dev_ctxt->mmio_info.mmio_addr,
MHIREGLEN);
if (0 == mhi_dev_ctxt->mmio_info.mmio_len) {
- mhi_log(MHI_MSG_ERROR, "Received mmio length as zero\n");
+ mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
+ "Received mmio length as zero\n");
return -EIO;
}
- mhi_log(MHI_MSG_INFO, "Testing MHI Ver\n");
- mhi_dev_ctxt->dev_props->mhi_ver = mhi_reg_read(
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO, "Testing MHI Ver\n");
+ mhi_dev_ctxt->core.mhi_ver = mhi_reg_read(
mhi_dev_ctxt->mmio_info.mmio_addr, MHIVER);
- if (MHI_VERSION != mhi_dev_ctxt->dev_props->mhi_ver) {
- mhi_log(MHI_MSG_CRITICAL,
- "Bad MMIO version, 0x%x\n",
- mhi_dev_ctxt->dev_props->mhi_ver);
+ if (mhi_dev_ctxt->core.mhi_ver != MHI_VERSION) {
+ mhi_log(mhi_dev_ctxt, MHI_MSG_CRITICAL,
+ "Bad MMIO version, 0x%x\n", mhi_dev_ctxt->core.mhi_ver);
return ret_val;
}
@@ -159,9 +143,10 @@ int mhi_init_mmio(struct mhi_device_ctxt *mhi_dev_ctxt)
else
chan_ctxt->chstate = MHI_CHAN_STATE_DISABLED;
}
- mhi_log(MHI_MSG_INFO,
- "Read back MMIO Ready bit successfully. Moving on..\n");
- mhi_log(MHI_MSG_INFO, "Reading channel doorbell offset\n");
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "Read back MMIO Ready bit successfully. Moving on..\n");
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "Reading channel doorbell offset\n");
mhi_dev_ctxt->mmio_info.chan_db_addr =
mhi_dev_ctxt->mmio_info.mmio_addr;
@@ -173,13 +158,15 @@ int mhi_init_mmio(struct mhi_device_ctxt *mhi_dev_ctxt)
CHDBOFF, CHDBOFF_CHDBOFF_MASK,
CHDBOFF_CHDBOFF_SHIFT);
- mhi_log(MHI_MSG_INFO, "Reading event doorbell offset\n");
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "Reading event doorbell offset\n");
mhi_dev_ctxt->mmio_info.event_db_addr += mhi_reg_read_field(
mhi_dev_ctxt->mmio_info.mmio_addr,
ERDBOFF, ERDBOFF_ERDBOFF_MASK,
ERDBOFF_ERDBOFF_SHIFT);
- mhi_log(MHI_MSG_INFO, "Setting all MMIO values.\n");
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "Setting all MMIO values.\n");
mhi_reg_write_field(mhi_dev_ctxt, mhi_dev_ctxt->mmio_info.mmio_addr,
MHICFG,
@@ -290,7 +277,7 @@ int mhi_init_mmio(struct mhi_device_ctxt *mhi_dev_ctxt)
MHIDATALIMIT_LOWER_MHIDATALIMIT_LOWER_MASK,
MHIDATALIMIT_LOWER_MHIDATALIMIT_LOWER_SHIFT,
pcie_word_val);
- mhi_log(MHI_MSG_INFO, "Done..\n");
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO, "Done..\n");
return 0;
}
diff --git a/drivers/platform/msm/mhi/mhi_pm.c b/drivers/platform/msm/mhi/mhi_pm.c
index 2f44601e225e..b88f90759f8a 100644
--- a/drivers/platform/msm/mhi/mhi_pm.c
+++ b/drivers/platform/msm/mhi/mhi_pm.c
@@ -23,11 +23,13 @@
/* Write only sysfs attributes */
static DEVICE_ATTR(MHI_M0, S_IWUSR, NULL, sysfs_init_m0);
+static DEVICE_ATTR(MHI_M3, S_IWUSR, NULL, sysfs_init_m3);
/* Read only sysfs attributes */
static struct attribute *mhi_attributes[] = {
&dev_attr_MHI_M0.attr,
+ &dev_attr_MHI_M3.attr,
NULL,
};
@@ -38,9 +40,9 @@ static struct attribute_group mhi_attribute_group = {
int mhi_pci_suspend(struct device *dev)
{
int r = 0;
+ struct mhi_device_ctxt *mhi_dev_ctxt = dev_get_drvdata(dev);
- mhi_log(MHI_MSG_INFO, "Entered\n");
-
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO, "Entered\n");
/* if rpm status still active then force suspend */
if (!pm_runtime_status_suspended(dev)) {
r = mhi_runtime_suspend(dev);
@@ -51,65 +53,67 @@ int mhi_pci_suspend(struct device *dev)
pm_runtime_set_suspended(dev);
pm_runtime_disable(dev);
- mhi_log(MHI_MSG_INFO, "Exit\n");
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO, "Exit\n");
return r;
}
int mhi_runtime_suspend(struct device *dev)
{
int r = 0;
- struct mhi_device_ctxt *mhi_dev_ctxt = dev->platform_data;
+ struct mhi_device_ctxt *mhi_dev_ctxt = dev_get_drvdata(dev);
mutex_lock(&mhi_dev_ctxt->pm_lock);
read_lock_bh(&mhi_dev_ctxt->pm_xfer_lock);
- mhi_log(MHI_MSG_INFO, "Entered with State:0x%x %s\n",
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "Entered with State:0x%x %s\n",
mhi_dev_ctxt->mhi_pm_state,
TO_MHI_STATE_STR(mhi_dev_ctxt->mhi_state));
/* Link is already disabled */
if (mhi_dev_ctxt->mhi_pm_state == MHI_PM_DISABLE ||
mhi_dev_ctxt->mhi_pm_state == MHI_PM_M3) {
- mhi_log(MHI_MSG_INFO, "Already in active state, exiting\n");
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "Already in active state, exiting\n");
read_unlock_bh(&mhi_dev_ctxt->pm_xfer_lock);
mutex_unlock(&mhi_dev_ctxt->pm_lock);
return 0;
}
if (unlikely(atomic_read(&mhi_dev_ctxt->counters.device_wake))) {
- mhi_log(MHI_MSG_INFO, "Busy, Aborting Runtime Suspend\n");
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "Busy, Aborting Runtime Suspend\n");
read_unlock_bh(&mhi_dev_ctxt->pm_xfer_lock);
mutex_unlock(&mhi_dev_ctxt->pm_lock);
return -EBUSY;
}
- mhi_assert_device_wake(mhi_dev_ctxt, false);
+ mhi_dev_ctxt->assert_wake(mhi_dev_ctxt, false);
read_unlock_bh(&mhi_dev_ctxt->pm_xfer_lock);
r = wait_event_timeout(*mhi_dev_ctxt->mhi_ev_wq.m0_event,
mhi_dev_ctxt->mhi_state == MHI_STATE_M0 ||
mhi_dev_ctxt->mhi_state == MHI_STATE_M1,
msecs_to_jiffies(MHI_MAX_RESUME_TIMEOUT));
if (!r) {
- mhi_log(MHI_MSG_CRITICAL,
+ mhi_log(mhi_dev_ctxt, MHI_MSG_CRITICAL,
"Failed to get M0||M1 event, timeout, current state:%s\n",
TO_MHI_STATE_STR(mhi_dev_ctxt->mhi_state));
r = -EIO;
goto rpm_suspend_exit;
}
- mhi_log(MHI_MSG_INFO, "Allowing M3 State\n");
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO, "Allowing M3 State\n");
write_lock_irq(&mhi_dev_ctxt->pm_xfer_lock);
- mhi_deassert_device_wake(mhi_dev_ctxt);
+ mhi_dev_ctxt->deassert_wake(mhi_dev_ctxt);
mhi_dev_ctxt->mhi_pm_state = MHI_PM_M3_ENTER;
mhi_set_m_state(mhi_dev_ctxt, MHI_STATE_M3);
write_unlock_irq(&mhi_dev_ctxt->pm_xfer_lock);
- mhi_log(MHI_MSG_INFO,
- "Waiting for M3 completion.\n");
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO, "Waiting for M3 completion.\n");
r = wait_event_timeout(*mhi_dev_ctxt->mhi_ev_wq.m3_event,
mhi_dev_ctxt->mhi_state == MHI_STATE_M3,
msecs_to_jiffies(MHI_MAX_SUSPEND_TIMEOUT));
if (!r) {
- mhi_log(MHI_MSG_CRITICAL,
+ mhi_log(mhi_dev_ctxt, MHI_MSG_CRITICAL,
"Failed to get M3 event, timeout, current state:%s\n",
TO_MHI_STATE_STR(mhi_dev_ctxt->mhi_state));
r = -EIO;
@@ -118,19 +122,23 @@ int mhi_runtime_suspend(struct device *dev)
r = mhi_turn_off_pcie_link(mhi_dev_ctxt);
if (r) {
- mhi_log(MHI_MSG_ERROR,
- "Failed to Turn off link ret:%d\n", r);
+ mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
+ "Failed to Turn off link ret:%d\n",
+ r);
}
rpm_suspend_exit:
- mhi_log(MHI_MSG_INFO, "Exited\n");
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO, "Exited\n");
mutex_unlock(&mhi_dev_ctxt->pm_lock);
return r;
}
int mhi_runtime_idle(struct device *dev)
{
- mhi_log(MHI_MSG_INFO, "Entered returning -EBUSY\n");
+ struct mhi_device_ctxt *mhi_dev_ctxt = dev_get_drvdata(dev);
+
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "Entered returning -EBUSY\n");
/*
* RPM framework during runtime resume always calls
@@ -150,7 +158,7 @@ int mhi_runtime_idle(struct device *dev)
int mhi_runtime_resume(struct device *dev)
{
int r = 0;
- struct mhi_device_ctxt *mhi_dev_ctxt = dev->platform_data;
+ struct mhi_device_ctxt *mhi_dev_ctxt = dev_get_drvdata(dev);
mutex_lock(&mhi_dev_ctxt->pm_lock);
read_lock_bh(&mhi_dev_ctxt->pm_xfer_lock);
@@ -160,7 +168,7 @@ int mhi_runtime_resume(struct device *dev)
/* turn on link */
r = mhi_turn_on_pcie_link(mhi_dev_ctxt);
if (r) {
- mhi_log(MHI_MSG_CRITICAL,
+ mhi_log(mhi_dev_ctxt, MHI_MSG_CRITICAL,
"Failed to resume link\n");
goto rpm_resume_exit;
}
@@ -178,7 +186,7 @@ int mhi_runtime_resume(struct device *dev)
mhi_dev_ctxt->mhi_state == MHI_STATE_M1,
msecs_to_jiffies(MHI_MAX_RESUME_TIMEOUT));
if (!r) {
- mhi_log(MHI_MSG_ERROR,
+ mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
"Failed to get M0 event, timeout\n");
r = -EIO;
goto rpm_resume_exit;
@@ -187,17 +195,18 @@ int mhi_runtime_resume(struct device *dev)
rpm_resume_exit:
mutex_unlock(&mhi_dev_ctxt->pm_lock);
- mhi_log(MHI_MSG_INFO, "Exited with :%d\n", r);
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO, "Exited with :%d\n", r);
return r;
}
int mhi_pci_resume(struct device *dev)
{
int r = 0;
+ struct mhi_device_ctxt *mhi_dev_ctxt = dev_get_drvdata(dev);
r = mhi_runtime_resume(dev);
if (r) {
- mhi_log(MHI_MSG_CRITICAL,
+ mhi_log(mhi_dev_ctxt, MHI_MSG_CRITICAL,
"Failed to resume link\n");
} else {
pm_runtime_set_active(dev);
@@ -220,12 +229,29 @@ void mhi_rem_pm_sysfs(struct device *dev)
ssize_t sysfs_init_m0(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- struct mhi_device_ctxt *mhi_dev_ctxt =
- &mhi_devices.device_list[0].mhi_ctxt;
+ struct mhi_device_ctxt *mhi_dev_ctxt = dev_get_drvdata(dev);
+
+ pm_runtime_get(&mhi_dev_ctxt->pcie_device->dev);
+ pm_runtime_mark_last_busy(&mhi_dev_ctxt->pcie_device->dev);
+ pm_runtime_put_noidle(&mhi_dev_ctxt->pcie_device->dev);
+
+ return count;
+}
+
+ssize_t sysfs_init_m3(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct mhi_device_ctxt *mhi_dev_ctxt = dev_get_drvdata(dev);
+
+ if (atomic_read(&mhi_dev_ctxt->counters.device_wake) == 0) {
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "Schedule RPM suspend");
+ pm_runtime_mark_last_busy(&mhi_dev_ctxt->
+ pcie_device->dev);
+ pm_request_autosuspend(&mhi_dev_ctxt->
+ pcie_device->dev);
+ }
- pm_runtime_get(&mhi_dev_ctxt->dev_info->pcie_device->dev);
- pm_runtime_mark_last_busy(&mhi_dev_ctxt->dev_info->pcie_device->dev);
- pm_runtime_put_noidle(&mhi_dev_ctxt->dev_info->pcie_device->dev);
return count;
}
@@ -234,45 +260,45 @@ int mhi_turn_off_pcie_link(struct mhi_device_ctxt *mhi_dev_ctxt)
struct pci_dev *pcie_dev;
int r = 0;
- mhi_log(MHI_MSG_INFO, "Entered...\n");
- pcie_dev = mhi_dev_ctxt->dev_info->pcie_device;
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO, "Entered...\n");
+ pcie_dev = mhi_dev_ctxt->pcie_device;
if (0 == mhi_dev_ctxt->flags.link_up) {
- mhi_log(MHI_MSG_CRITICAL,
+ mhi_log(mhi_dev_ctxt, MHI_MSG_CRITICAL,
"Link already marked as down, nothing to do\n");
goto exit;
}
r = pci_save_state(pcie_dev);
if (r) {
- mhi_log(MHI_MSG_CRITICAL,
- "Failed to save pcie state ret: %d\n",
- r);
+ mhi_log(mhi_dev_ctxt, MHI_MSG_CRITICAL,
+ "Failed to save pcie state ret: %d\n", r);
}
- mhi_dev_ctxt->dev_props->pcie_state = pci_store_saved_state(pcie_dev);
+ mhi_dev_ctxt->core.pcie_state = pci_store_saved_state(pcie_dev);
pci_disable_device(pcie_dev);
r = pci_set_power_state(pcie_dev, PCI_D3hot);
if (r) {
- mhi_log(MHI_MSG_CRITICAL,
- "Failed to set pcie power state to D3 hot ret: %d\n",
- r);
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "Failed to set pcie power state to D3hot ret:%d\n", r);
}
r = msm_pcie_pm_control(MSM_PCIE_SUSPEND,
pcie_dev->bus->number,
pcie_dev,
- NULL,
- 0);
+ NULL,
+ 0);
if (r)
- mhi_log(MHI_MSG_CRITICAL,
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
"Failed to suspend pcie bus ret 0x%x\n", r);
r = mhi_set_bus_request(mhi_dev_ctxt, 0);
if (r)
- mhi_log(MHI_MSG_INFO, "Failed to set bus freq ret %d\n", r);
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "Failed to set bus freq ret %d\n", r);
mhi_dev_ctxt->flags.link_up = 0;
exit:
- mhi_log(MHI_MSG_INFO, "Exited...\n");
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO, "Exited...\n");
+
return 0;
}
@@ -281,17 +307,16 @@ int mhi_turn_on_pcie_link(struct mhi_device_ctxt *mhi_dev_ctxt)
int r = 0;
struct pci_dev *pcie_dev;
- pcie_dev = mhi_dev_ctxt->dev_info->pcie_device;
+ pcie_dev = mhi_dev_ctxt->pcie_device;
- mhi_log(MHI_MSG_INFO, "Entered...\n");
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO, "Entered...\n");
if (mhi_dev_ctxt->flags.link_up)
goto exit;
r = mhi_set_bus_request(mhi_dev_ctxt, 1);
if (r)
- mhi_log(MHI_MSG_CRITICAL,
- "Could not set bus frequency ret: %d\n",
- r);
+ mhi_log(mhi_dev_ctxt, MHI_MSG_CRITICAL,
+ "Could not set bus frequency ret: %d\n", r);
r = msm_pcie_pm_control(MSM_PCIE_RESUME,
pcie_dev->bus->number,
@@ -299,24 +324,23 @@ int mhi_turn_on_pcie_link(struct mhi_device_ctxt *mhi_dev_ctxt)
NULL,
0);
if (r) {
- mhi_log(MHI_MSG_CRITICAL,
- "Failed to resume pcie bus ret %d\n", r);
+ mhi_log(mhi_dev_ctxt, MHI_MSG_CRITICAL,
+ "Failed to resume pcie bus ret %d\n", r);
goto exit;
}
r = pci_enable_device(pcie_dev);
if (r)
- mhi_log(MHI_MSG_CRITICAL,
- "Failed to enable device ret:%d\n",
- r);
+ mhi_log(mhi_dev_ctxt, MHI_MSG_CRITICAL,
+ "Failed to enable device ret:%d\n", r);
pci_load_and_free_saved_state(pcie_dev,
- &mhi_dev_ctxt->dev_props->pcie_state);
+ &mhi_dev_ctxt->core.pcie_state);
pci_restore_state(pcie_dev);
pci_set_master(pcie_dev);
mhi_dev_ctxt->flags.link_up = 1;
exit:
- mhi_log(MHI_MSG_INFO, "Exited...\n");
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO, "Exited...\n");
return r;
}
diff --git a/drivers/platform/msm/mhi/mhi_ring_ops.c b/drivers/platform/msm/mhi/mhi_ring_ops.c
index 07d0098a1b61..e15055f7db9c 100644
--- a/drivers/platform/msm/mhi/mhi_ring_ops.c
+++ b/drivers/platform/msm/mhi/mhi_ring_ops.c
@@ -21,7 +21,6 @@ static int add_element(struct mhi_ring *ring, void **rp,
if (NULL == ring || 0 == ring->el_size
|| NULL == ring->base || 0 == ring->len) {
- mhi_log(MHI_MSG_ERROR, "Bad input parameters, quitting.\n");
return -EINVAL;
}
@@ -39,8 +38,6 @@ static int add_element(struct mhi_ring *ring, void **rp,
if (ring->overwrite_en) {
ctxt_del_element(ring, NULL);
} else {
- mhi_log(MHI_MSG_INFO, "Ring 0x%lX is full\n",
- (uintptr_t)ring->base);
return -ENOSPC;
}
}
@@ -92,8 +89,6 @@ int delete_element(struct mhi_ring *ring, void **rp,
if (r)
return r;
if (d_wp == d_rp) {
- mhi_log(MHI_MSG_VERBOSE, "Ring 0x%lx is empty\n",
- (uintptr_t)ring->base);
if (NULL != assigned_addr)
*assigned_addr = NULL;
return -ENODATA;
@@ -113,23 +108,26 @@ int delete_element(struct mhi_ring *ring, void **rp,
int mhi_get_free_desc(struct mhi_client_handle *client_handle)
{
u32 chan;
+ struct mhi_client_config *client_config;
struct mhi_device_ctxt *ctxt;
int bb_ring, ch_ring;
- if (!client_handle || MHI_HANDLE_MAGIC != client_handle->magic ||
- !client_handle->mhi_dev_ctxt)
+ if (!client_handle)
return -EINVAL;
- ctxt = client_handle->mhi_dev_ctxt;
- chan = client_handle->chan_info.chan_nr;
+ client_config = client_handle->client_config;
+ ctxt = client_config->mhi_dev_ctxt;
+ chan = client_config->chan_info.chan_nr;
- bb_ring = get_nr_avail_ring_elements(&ctxt->chan_bb_list[chan]);
- ch_ring = get_nr_avail_ring_elements(&ctxt->mhi_local_chan_ctxt[chan]);
+ bb_ring = get_nr_avail_ring_elements(ctxt, &ctxt->chan_bb_list[chan]);
+ ch_ring = get_nr_avail_ring_elements(ctxt,
+ &ctxt->mhi_local_chan_ctxt[chan]);
return min(bb_ring, ch_ring);
}
EXPORT_SYMBOL(mhi_get_free_desc);
-int get_nr_avail_ring_elements(struct mhi_ring *ring)
+int get_nr_avail_ring_elements(struct mhi_device_ctxt *mhi_dev_ctxt,
+ struct mhi_ring *ring)
{
u32 nr_el = 0;
uintptr_t ring_size = 0;
@@ -138,7 +136,7 @@ int get_nr_avail_ring_elements(struct mhi_ring *ring)
ring_size = ring->len / ring->el_size;
ret_val = get_nr_enclosed_el(ring, ring->rp, ring->wp, &nr_el);
if (ret_val != 0) {
- mhi_log(MHI_MSG_ERROR,
+ mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
"Failed to get enclosed el ret %d.\n", ret_val);
return 0;
}
@@ -155,19 +153,14 @@ int get_nr_enclosed_el(struct mhi_ring *ring, void *rp,
if (NULL == ring || 0 == ring->el_size ||
NULL == ring->base || 0 == ring->len) {
- mhi_log(MHI_MSG_ERROR, "Bad input parameters, quitting.\n");
return -EINVAL;
}
r = get_element_index(ring, rp, &index_rp);
- if (r) {
- mhi_log(MHI_MSG_CRITICAL, "Bad element index rp 0x%p.\n", rp);
+ if (r)
return r;
- }
r = get_element_index(ring, wp, &index_wp);
- if (r) {
- mhi_log(MHI_MSG_CRITICAL, "Bad element index wp 0x%p.\n", wp);
+ if (r)
return r;
- }
ring_size = ring->len / ring->el_size;
if (index_rp < index_wp)
diff --git a/drivers/platform/msm/mhi/mhi_ssr.c b/drivers/platform/msm/mhi/mhi_ssr.c
index defd6f4fd137..5cb0d1b76fb6 100644
--- a/drivers/platform/msm/mhi/mhi_ssr.c
+++ b/drivers/platform/msm/mhi/mhi_ssr.c
@@ -24,40 +24,35 @@
static int mhi_ssr_notify_cb(struct notifier_block *nb,
unsigned long action, void *data)
{
-
+ struct mhi_device_ctxt *mhi_dev_ctxt =
+ container_of(nb, struct mhi_device_ctxt, mhi_ssr_nb);
switch (action) {
case SUBSYS_BEFORE_POWERUP:
- mhi_log(MHI_MSG_INFO,
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
"Received Subsystem event BEFORE_POWERUP\n");
break;
case SUBSYS_AFTER_POWERUP:
- mhi_log(MHI_MSG_INFO,
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
"Received Subsystem event AFTER_POWERUP\n");
break;
case SUBSYS_POWERUP_FAILURE:
- mhi_log(MHI_MSG_INFO,
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
"Received Subsystem event POWERUP_FAILURE\n");
break;
case SUBSYS_BEFORE_SHUTDOWN:
- mhi_log(MHI_MSG_INFO,
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
"Received Subsystem event BEFORE_SHUTDOWN\n");
- mhi_log(MHI_MSG_INFO,
- "Not notifying clients\n");
break;
case SUBSYS_AFTER_SHUTDOWN:
- mhi_log(MHI_MSG_INFO,
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
"Received Subsystem event AFTER_SHUTDOWN\n");
- mhi_log(MHI_MSG_INFO,
- "Not notifying clients\n");
break;
case SUBSYS_RAMDUMP_NOTIFICATION:
- mhi_log(MHI_MSG_INFO,
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
"Received Subsystem event RAMDUMP\n");
- mhi_log(MHI_MSG_INFO,
- "Not notifying clients\n");
break;
default:
- mhi_log(MHI_MSG_INFO,
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
"Received ESOC notifcation %d, NOT handling\n",
(int)action);
break;
@@ -65,36 +60,30 @@ static int mhi_ssr_notify_cb(struct notifier_block *nb,
return NOTIFY_OK;
}
-static struct notifier_block mhi_ssr_nb = {
- .notifier_call = mhi_ssr_notify_cb,
-};
-
int mhi_esoc_register(struct mhi_device_ctxt *mhi_dev_ctxt)
{
int ret_val = 0;
struct device_node *np;
- struct pci_driver *mhi_driver;
- struct device *dev = &mhi_dev_ctxt->dev_info->pcie_device->dev;
+ struct device *dev = &mhi_dev_ctxt->pcie_device->dev;
- mhi_driver = mhi_dev_ctxt->dev_info->mhi_pcie_driver;
np = dev->of_node;
mhi_dev_ctxt->esoc_handle = devm_register_esoc_client(dev, "mdm");
- mhi_log(MHI_MSG_VERBOSE,
+ mhi_log(mhi_dev_ctxt, MHI_MSG_VERBOSE,
"Of table of pcie struct device property is dev->of_node %p\n",
np);
if (IS_ERR_OR_NULL(mhi_dev_ctxt->esoc_handle)) {
- mhi_log(MHI_MSG_CRITICAL,
+ mhi_log(mhi_dev_ctxt, MHI_MSG_CRITICAL,
"Failed to register for SSR, ret %lx\n",
(uintptr_t)mhi_dev_ctxt->esoc_handle);
return -EIO;
}
-
+ mhi_dev_ctxt->mhi_ssr_nb.notifier_call = mhi_ssr_notify_cb;
mhi_dev_ctxt->esoc_ssr_handle = subsys_notif_register_notifier(
mhi_dev_ctxt->esoc_handle->name,
- &mhi_ssr_nb);
+ &mhi_dev_ctxt->mhi_ssr_nb);
if (IS_ERR_OR_NULL(mhi_dev_ctxt->esoc_ssr_handle)) {
ret_val = PTR_RET(mhi_dev_ctxt->esoc_ssr_handle);
- mhi_log(MHI_MSG_CRITICAL,
+ mhi_log(mhi_dev_ctxt, MHI_MSG_CRITICAL,
"Can't find esoc desc ret 0x%lx\n",
(uintptr_t)mhi_dev_ctxt->esoc_ssr_handle);
}
@@ -107,18 +96,25 @@ void mhi_notify_client(struct mhi_client_handle *client_handle,
{
struct mhi_cb_info cb_info = {0};
struct mhi_result result = {0};
+ struct mhi_client_config *client_config;
cb_info.result = NULL;
cb_info.cb_reason = reason;
- if (NULL != client_handle &&
- NULL != client_handle->client_info.mhi_client_cb) {
- result.user_data = client_handle->user_data;
- cb_info.chan = client_handle->chan_info.chan_nr;
+ if (client_handle == NULL)
+ return;
+
+ client_config = client_handle->client_config;
+
+ if (client_config->client_info.mhi_client_cb) {
+ result.user_data = client_config->user_data;
+ cb_info.chan = client_config->chan_info.chan_nr;
cb_info.result = &result;
- mhi_log(MHI_MSG_INFO, "Calling back for chan %d, reason %d\n",
- cb_info.chan, reason);
- client_handle->client_info.mhi_client_cb(&cb_info);
+ mhi_log(client_config->mhi_dev_ctxt, MHI_MSG_INFO,
+ "Calling back for chan %d, reason %d\n",
+ cb_info.chan,
+ reason);
+ client_config->client_info.mhi_client_cb(&cb_info);
}
}
@@ -136,16 +132,15 @@ void mhi_notify_clients(struct mhi_device_ctxt *mhi_dev_ctxt,
}
}
-int set_mhi_base_state(struct mhi_pcie_dev_info *mhi_pcie_dev)
+int set_mhi_base_state(struct mhi_device_ctxt *mhi_dev_ctxt)
{
u32 pcie_word_val = 0;
int r = 0;
- struct mhi_device_ctxt *mhi_dev_ctxt = &mhi_pcie_dev->mhi_ctxt;
- mhi_pcie_dev->bhi_ctxt.bhi_base = mhi_pcie_dev->core.bar0_base;
- pcie_word_val = mhi_reg_read(mhi_pcie_dev->bhi_ctxt.bhi_base, BHIOFF);
- mhi_pcie_dev->bhi_ctxt.bhi_base += pcie_word_val;
- pcie_word_val = mhi_reg_read(mhi_pcie_dev->bhi_ctxt.bhi_base,
+ mhi_dev_ctxt->bhi_ctxt.bhi_base = mhi_dev_ctxt->core.bar0_base;
+ pcie_word_val = mhi_reg_read(mhi_dev_ctxt->bhi_ctxt.bhi_base, BHIOFF);
+ mhi_dev_ctxt->bhi_ctxt.bhi_base += pcie_word_val;
+ pcie_word_val = mhi_reg_read(mhi_dev_ctxt->bhi_ctxt.bhi_base,
BHI_EXECENV);
mhi_dev_ctxt->dev_exec_env = pcie_word_val;
if (pcie_word_val == MHI_EXEC_ENV_AMSS) {
@@ -153,55 +148,50 @@ int set_mhi_base_state(struct mhi_pcie_dev_info *mhi_pcie_dev)
} else if (pcie_word_val == MHI_EXEC_ENV_PBL) {
mhi_dev_ctxt->base_state = STATE_TRANSITION_BHI;
} else {
- mhi_log(MHI_MSG_ERROR, "Invalid EXEC_ENV: 0x%x\n",
+ mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
+ "Invalid EXEC_ENV: 0x%x\n",
pcie_word_val);
r = -EIO;
}
- mhi_log(MHI_MSG_INFO, "EXEC_ENV: %d Base state %d\n",
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "EXEC_ENV: %d Base state %d\n",
pcie_word_val, mhi_dev_ctxt->base_state);
return r;
}
void mhi_link_state_cb(struct msm_pcie_notify *notify)
{
-
- struct mhi_pcie_dev_info *mhi_pcie_dev;
struct mhi_device_ctxt *mhi_dev_ctxt = NULL;
- if (NULL == notify || NULL == notify->data) {
- mhi_log(MHI_MSG_CRITICAL,
- "Incomplete handle received\n");
+ if (!notify || !notify->data) {
+ pr_err("%s: incomplete handle received\n", __func__);
return;
}
- mhi_pcie_dev = notify->data;
- mhi_dev_ctxt = &mhi_pcie_dev->mhi_ctxt;
+ mhi_dev_ctxt = notify->data;
switch (notify->event) {
case MSM_PCIE_EVENT_LINKDOWN:
- mhi_log(MHI_MSG_INFO, "Received MSM_PCIE_EVENT_LINKDOWN\n");
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "Received MSM_PCIE_EVENT_LINKDOWN\n");
break;
case MSM_PCIE_EVENT_LINKUP:
- mhi_log(MHI_MSG_INFO,
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
"Received MSM_PCIE_EVENT_LINKUP\n");
- mhi_pcie_dev->link_up_cntr++;
+ mhi_dev_ctxt->counters.link_up_cntr++;
break;
case MSM_PCIE_EVENT_WAKEUP:
- mhi_log(MHI_MSG_INFO,
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
"Received MSM_PCIE_EVENT_WAKE\n");
__pm_stay_awake(&mhi_dev_ctxt->w_lock);
__pm_relax(&mhi_dev_ctxt->w_lock);
if (mhi_dev_ctxt->flags.mhi_initialized) {
- pm_runtime_get(&mhi_dev_ctxt->
- dev_info->pcie_device->dev);
- pm_runtime_mark_last_busy(&mhi_dev_ctxt->
- dev_info->pcie_device->dev);
- pm_runtime_put_noidle(&mhi_dev_ctxt->
- dev_info->pcie_device->dev);
+ mhi_dev_ctxt->runtime_get(mhi_dev_ctxt);
+ mhi_dev_ctxt->runtime_put(mhi_dev_ctxt);
}
break;
default:
- mhi_log(MHI_MSG_INFO,
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
"Received bad link event\n");
return;
}
@@ -213,9 +203,9 @@ int init_mhi_base_state(struct mhi_device_ctxt *mhi_dev_ctxt)
r = mhi_init_state_transition(mhi_dev_ctxt, mhi_dev_ctxt->base_state);
if (r) {
- mhi_log(MHI_MSG_CRITICAL,
- "Failed to start state change event, to %d\n",
- mhi_dev_ctxt->base_state);
+ mhi_log(mhi_dev_ctxt, MHI_MSG_CRITICAL,
+ "Failed to start state change event, to %d\n",
+ mhi_dev_ctxt->base_state);
}
return r;
}
diff --git a/drivers/platform/msm/mhi/mhi_states.c b/drivers/platform/msm/mhi/mhi_states.c
index 1021a56d1b3d..5c1386f6d30c 100644
--- a/drivers/platform/msm/mhi/mhi_states.c
+++ b/drivers/platform/msm/mhi/mhi_states.c
@@ -94,7 +94,7 @@ static void ring_all_chan_dbs(struct mhi_device_ctxt *mhi_dev_ctxt,
u32 i = 0;
struct mhi_ring *local_ctxt = NULL;
- mhi_log(MHI_MSG_VERBOSE, "Ringing chan dbs\n");
+ mhi_log(mhi_dev_ctxt, MHI_MSG_VERBOSE, "Ringing chan dbs\n");
for (i = 0; i < MHI_MAX_CHANNELS; ++i)
if (VALID_CHAN_NR(i)) {
local_ctxt = &mhi_dev_ctxt->mhi_local_chan_ctxt[i];
@@ -115,7 +115,7 @@ static void ring_all_cmd_dbs(struct mhi_device_ctxt *mhi_dev_ctxt)
u64 rp = 0;
struct mhi_ring *local_ctxt = NULL;
- mhi_log(MHI_MSG_VERBOSE, "Ringing chan dbs\n");
+ mhi_log(mhi_dev_ctxt, MHI_MSG_VERBOSE, "Ringing chan dbs\n");
local_ctxt = &mhi_dev_ctxt->mhi_local_cmd_ctxt[PRIMARY_CMD_RING];
rp = mhi_v2p_addr(mhi_dev_ctxt, MHI_RING_TYPE_CMD_RING,
@@ -163,7 +163,8 @@ static int process_m0_transition(
enum STATE_TRANSITION cur_work_item)
{
- mhi_log(MHI_MSG_INFO, "Entered With State %s\n",
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "Entered With State %s\n",
TO_MHI_STATE_STR(mhi_dev_ctxt->mhi_state));
switch (mhi_dev_ctxt->mhi_state) {
@@ -182,7 +183,7 @@ static int process_m0_transition(
mhi_dev_ctxt->mhi_pm_state = MHI_PM_M0;
write_unlock_irq(&mhi_dev_ctxt->pm_xfer_lock);
read_lock_bh(&mhi_dev_ctxt->pm_xfer_lock);
- mhi_assert_device_wake(mhi_dev_ctxt, true);
+ mhi_dev_ctxt->assert_wake(mhi_dev_ctxt, true);
if (mhi_dev_ctxt->flags.mhi_initialized) {
ring_all_ev_dbs(mhi_dev_ctxt);
@@ -190,10 +191,11 @@ static int process_m0_transition(
ring_all_cmd_dbs(mhi_dev_ctxt);
}
- mhi_deassert_device_wake(mhi_dev_ctxt);
+ mhi_dev_ctxt->deassert_wake(mhi_dev_ctxt);
read_unlock_bh(&mhi_dev_ctxt->pm_xfer_lock);
wake_up(mhi_dev_ctxt->mhi_ev_wq.m0_event);
- mhi_log(MHI_MSG_INFO, "Exited\n");
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO, "Exited\n");
+
return 0;
}
@@ -207,7 +209,7 @@ void process_m1_transition(struct work_struct *work)
mutex_lock(&mhi_dev_ctxt->pm_lock);
write_lock_irq(&mhi_dev_ctxt->pm_xfer_lock);
- mhi_log(MHI_MSG_INFO,
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
"Processing M1 state transition from state %s\n",
TO_MHI_STATE_STR(mhi_dev_ctxt->mhi_state));
@@ -218,7 +220,8 @@ void process_m1_transition(struct work_struct *work)
return;
}
- mhi_log(MHI_MSG_INFO, "Transitioning to M2 Transition\n");
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "Transitioning to M2 Transition\n");
mhi_dev_ctxt->mhi_pm_state = MHI_PM_M1_M2_TRANSITION;
mhi_dev_ctxt->counters.m1_m2++;
mhi_dev_ctxt->mhi_state = MHI_STATE_M2;
@@ -230,24 +233,24 @@ void process_m1_transition(struct work_struct *work)
/* During DEBOUNCE Time We could be receiving M0 Event */
if (mhi_dev_ctxt->mhi_pm_state == MHI_PM_M1_M2_TRANSITION) {
- mhi_log(MHI_MSG_INFO, "Entered M2 State\n");
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "Entered M2 State\n");
mhi_dev_ctxt->mhi_pm_state = MHI_PM_M2;
}
write_unlock_irq(&mhi_dev_ctxt->pm_xfer_lock);
if (unlikely(atomic_read(&mhi_dev_ctxt->counters.device_wake))) {
- mhi_log(MHI_MSG_INFO, "Exiting M2 Immediately, count:%d\n",
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "Exiting M2 Immediately, count:%d\n",
atomic_read(&mhi_dev_ctxt->counters.device_wake));
read_lock_bh(&mhi_dev_ctxt->pm_xfer_lock);
- mhi_assert_device_wake(mhi_dev_ctxt, true);
- mhi_deassert_device_wake(mhi_dev_ctxt);
+ mhi_dev_ctxt->assert_wake(mhi_dev_ctxt, true);
+ mhi_dev_ctxt->deassert_wake(mhi_dev_ctxt);
read_unlock_bh(&mhi_dev_ctxt->pm_xfer_lock);
- } else {
- mhi_log(MHI_MSG_INFO, "Schedule RPM suspend");
- pm_runtime_mark_last_busy(&mhi_dev_ctxt->
- dev_info->pcie_device->dev);
- pm_request_autosuspend(&mhi_dev_ctxt->
- dev_info->pcie_device->dev);
+ } else if (mhi_dev_ctxt->core.pci_master) {
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO, "Schedule RPM suspend");
+ pm_runtime_mark_last_busy(&mhi_dev_ctxt->pcie_device->dev);
+ pm_request_autosuspend(&mhi_dev_ctxt->pcie_device->dev);
}
mutex_unlock(&mhi_dev_ctxt->pm_lock);
}
@@ -257,7 +260,7 @@ static int process_m3_transition(
enum STATE_TRANSITION cur_work_item)
{
- mhi_log(MHI_MSG_INFO,
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
"Entered with State %s\n",
TO_MHI_STATE_STR(mhi_dev_ctxt->mhi_state));
@@ -284,7 +287,7 @@ static int process_link_down_transition(
struct mhi_device_ctxt *mhi_dev_ctxt,
enum STATE_TRANSITION cur_work_item)
{
- mhi_log(MHI_MSG_INFO,
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
"Entered with State %s\n",
TO_MHI_STATE_STR(mhi_dev_ctxt->mhi_state));
return -EIO;
@@ -294,23 +297,22 @@ static int process_wake_transition(
struct mhi_device_ctxt *mhi_dev_ctxt,
enum STATE_TRANSITION cur_work_item)
{
- mhi_log(MHI_MSG_INFO,
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
"Entered with State %s\n",
TO_MHI_STATE_STR(mhi_dev_ctxt->mhi_state));
return -EIO;
-
}
static int process_bhi_transition(
struct mhi_device_ctxt *mhi_dev_ctxt,
enum STATE_TRANSITION cur_work_item)
{
- mhi_log(MHI_MSG_INFO, "Entered\n");
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO, "Entered\n");
write_lock_irq(&mhi_dev_ctxt->pm_xfer_lock);
mhi_dev_ctxt->mhi_state = MHI_STATE_BHI;
write_unlock_irq(&mhi_dev_ctxt->pm_xfer_lock);
wake_up_interruptible(mhi_dev_ctxt->mhi_ev_wq.bhi_event);
- mhi_log(MHI_MSG_INFO, "Exited\n");
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO, "Exited\n");
return 0;
}
@@ -320,11 +322,12 @@ static int process_ready_transition(
{
int r = 0;
- mhi_log(MHI_MSG_INFO, "Processing READY state transition\n");
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "Processing READY state transition\n");
r = mhi_reset_all_thread_queues(mhi_dev_ctxt);
if (r) {
- mhi_log(MHI_MSG_ERROR,
+ mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
"Failed to reset thread queues\n");
return r;
}
@@ -335,7 +338,7 @@ static int process_ready_transition(
write_unlock_irq(&mhi_dev_ctxt->pm_xfer_lock);
/* Initialize MMIO */
if (r) {
- mhi_log(MHI_MSG_ERROR,
+ mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
"Failure during MMIO initialization\n");
return r;
}
@@ -344,7 +347,7 @@ static int process_ready_transition(
cur_work_item);
read_unlock_bh(&mhi_dev_ctxt->pm_xfer_lock);
if (r) {
- mhi_log(MHI_MSG_ERROR,
+ mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
"Failure during event ring init\n");
return r;
}
@@ -379,7 +382,8 @@ static int process_reset_transition(
enum STATE_TRANSITION cur_work_item)
{
int r = 0, i = 0;
- mhi_log(MHI_MSG_INFO, "Processing RESET state transition\n");
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "Processing RESET state transition\n");
write_lock_irq(&mhi_dev_ctxt->pm_xfer_lock);
mhi_dev_ctxt->mhi_state = MHI_STATE_RESET;
write_unlock_irq(&mhi_dev_ctxt->pm_xfer_lock);
@@ -387,11 +391,12 @@ static int process_reset_transition(
mhi_dev_ctxt->counters.mhi_reset_cntr++;
r = mhi_test_for_device_reset(mhi_dev_ctxt);
if (r)
- mhi_log(MHI_MSG_INFO, "Device not RESET ret %d\n", r);
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "Device not RESET ret %d\n", r);
r = mhi_test_for_device_ready(mhi_dev_ctxt);
if (r) {
- mhi_log(MHI_MSG_ERROR, "timed out waiting for ready ret:%d\n",
- r);
+ mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
+ "timed out waiting for ready ret:%d\n", r);
return r;
}
@@ -417,7 +422,7 @@ static int process_reset_transition(
r = mhi_init_state_transition(mhi_dev_ctxt,
STATE_TRANSITION_READY);
if (0 != r)
- mhi_log(MHI_MSG_CRITICAL,
+ mhi_log(mhi_dev_ctxt, MHI_MSG_CRITICAL,
"Failed to initiate %s state trans\n",
state_transition_str(STATE_TRANSITION_READY));
return r;
@@ -427,7 +432,7 @@ static int process_syserr_transition(
struct mhi_device_ctxt *mhi_dev_ctxt,
enum STATE_TRANSITION cur_work_item)
{
- mhi_log(MHI_MSG_INFO,
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
"Entered with State %s\n",
TO_MHI_STATE_STR(mhi_dev_ctxt->mhi_state));
return -EIO;
@@ -443,7 +448,8 @@ static void enable_clients(struct mhi_device_ctxt *mhi_dev_ctxt,
cb_info.cb_reason = MHI_CB_MHI_ENABLED;
- mhi_log(MHI_MSG_INFO, "Enabling Clients, exec env %d.\n", exec_env);
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "Enabling Clients, exec env %d.\n", exec_env);
for (i = 0; i < MHI_MAX_CHANNELS; ++i) {
if (!VALID_CHAN_NR(i))
continue;
@@ -455,14 +461,15 @@ static void enable_clients(struct mhi_device_ctxt *mhi_dev_ctxt,
mhi_notify_client(client_handle, MHI_CB_MHI_ENABLED);
}
- mhi_log(MHI_MSG_INFO, "Done.\n");
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO, "Done.\n");
}
static int process_sbl_transition(
struct mhi_device_ctxt *mhi_dev_ctxt,
enum STATE_TRANSITION cur_work_item)
{
- mhi_log(MHI_MSG_INFO, "Enabled\n");
+
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO, "Enabled\n");
write_lock_irq(&mhi_dev_ctxt->pm_xfer_lock);
mhi_dev_ctxt->dev_exec_env = MHI_EXEC_ENV_SBL;
write_unlock_irq(&mhi_dev_ctxt->pm_xfer_lock);
@@ -476,7 +483,8 @@ static int process_amss_transition(
{
int r = 0;
- mhi_log(MHI_MSG_INFO, "Processing AMSS state transition\n");
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "Processing AMSS state transition\n");
write_lock_irq(&mhi_dev_ctxt->pm_xfer_lock);
mhi_dev_ctxt->dev_exec_env = MHI_EXEC_ENV_AMSS;
write_unlock_irq(&mhi_dev_ctxt->pm_xfer_lock);
@@ -486,19 +494,20 @@ static int process_amss_transition(
cur_work_item);
mhi_dev_ctxt->flags.mhi_initialized = 1;
if (r) {
- mhi_log(MHI_MSG_CRITICAL,
+ mhi_log(mhi_dev_ctxt, MHI_MSG_CRITICAL,
"Failed to set local chan state ret %d\n", r);
- mhi_deassert_device_wake(mhi_dev_ctxt);
+ mhi_dev_ctxt->deassert_wake(mhi_dev_ctxt);
return r;
}
read_lock_bh(&mhi_dev_ctxt->pm_xfer_lock);
ring_all_chan_dbs(mhi_dev_ctxt, true);
read_unlock_bh(&mhi_dev_ctxt->pm_xfer_lock);
- mhi_log(MHI_MSG_INFO,
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
"Notifying clients that MHI is enabled\n");
enable_clients(mhi_dev_ctxt, mhi_dev_ctxt->dev_exec_env);
} else {
- mhi_log(MHI_MSG_INFO, "MHI is initialized\n");
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "MHI is initialized\n");
}
read_lock_bh(&mhi_dev_ctxt->pm_xfer_lock);
@@ -510,17 +519,20 @@ static int process_amss_transition(
* incremented by pci fw pci_pm_init() or by
* mhi shutdown/ssr apis.
*/
- mhi_log(MHI_MSG_INFO, "Allow runtime suspend\n");
+ if (mhi_dev_ctxt->core.pci_master) {
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "Allow runtime suspend\n");
- pm_runtime_mark_last_busy(&mhi_dev_ctxt->dev_info->pcie_device->dev);
- pm_runtime_allow(&mhi_dev_ctxt->dev_info->pcie_device->dev);
+ pm_runtime_mark_last_busy(&mhi_dev_ctxt->pcie_device->dev);
+ pm_runtime_allow(&mhi_dev_ctxt->pcie_device->dev);
+ }
/* During probe we incremented, releasing that count */
read_lock_bh(&mhi_dev_ctxt->pm_xfer_lock);
- mhi_deassert_device_wake(mhi_dev_ctxt);
+ mhi_dev_ctxt->deassert_wake(mhi_dev_ctxt);
read_unlock_bh(&mhi_dev_ctxt->pm_xfer_lock);
- mhi_log(MHI_MSG_INFO, "Exited\n");
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO, "Exited\n");
return 0;
}
@@ -530,7 +542,8 @@ static int process_stt_work_item(
{
int r = 0;
- mhi_log(MHI_MSG_INFO, "Transitioning to %s\n",
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "Transitioning to %s\n",
state_transition_str(cur_work_item));
trace_mhi_state(cur_work_item);
switch (cur_work_item) {
@@ -567,7 +580,7 @@ static int process_stt_work_item(
r = process_wake_transition(mhi_dev_ctxt, cur_work_item);
break;
default:
- mhi_log(MHI_MSG_ERROR,
+ mhi_log(mhi_dev_ctxt, MHI_MSG_CRITICAL,
"Unrecongized state: %s\n",
state_transition_str(cur_work_item));
break;
@@ -585,23 +598,19 @@ int mhi_state_change_thread(void *ctxt)
&mhi_dev_ctxt->state_change_work_item_list;
struct mhi_ring *state_change_q = &work_q->q_info;
- if (NULL == mhi_dev_ctxt) {
- mhi_log(MHI_MSG_ERROR, "Got bad context, quitting\n");
- return -EIO;
- }
for (;;) {
r = wait_event_interruptible(
*mhi_dev_ctxt->mhi_ev_wq.state_change_event,
((work_q->q_info.rp != work_q->q_info.wp) &&
!mhi_dev_ctxt->flags.st_thread_stopped));
if (r) {
- mhi_log(MHI_MSG_INFO,
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
"Caught signal %d, quitting\n", r);
return 0;
}
if (mhi_dev_ctxt->flags.kill_threads) {
- mhi_log(MHI_MSG_INFO,
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
"Caught exit signal, quitting\n");
return 0;
}
@@ -638,10 +647,11 @@ int mhi_init_state_transition(struct mhi_device_ctxt *mhi_dev_ctxt,
&mhi_dev_ctxt->state_change_work_item_list;
spin_lock_irqsave(work_q->q_lock, flags);
- nr_avail_work_items = get_nr_avail_ring_elements(stt_ring);
+ nr_avail_work_items =
+ get_nr_avail_ring_elements(mhi_dev_ctxt, stt_ring);
BUG_ON(nr_avail_work_items <= 0);
- mhi_log(MHI_MSG_VERBOSE,
+ mhi_log(mhi_dev_ctxt, MHI_MSG_VERBOSE,
"Processing state transition %s\n",
state_transition_str(new_state));
*(enum STATE_TRANSITION *)stt_ring->wp = new_state;
diff --git a/drivers/platform/msm/mhi/mhi_sys.c b/drivers/platform/msm/mhi/mhi_sys.c
index c5c025b8585a..3389de2f95b3 100644
--- a/drivers/platform/msm/mhi/mhi_sys.c
+++ b/drivers/platform/msm/mhi/mhi_sys.c
@@ -51,13 +51,13 @@ static ssize_t mhi_dbgfs_chan_read(struct file *fp, char __user *buf,
{
int amnt_copied = 0;
struct mhi_chan_ctxt *chan_ctxt;
- struct mhi_device_ctxt *mhi_dev_ctxt =
- &mhi_devices.device_list[0].mhi_ctxt;
+ struct mhi_device_ctxt *mhi_dev_ctxt = fp->private_data;
uintptr_t v_wp_index;
uintptr_t v_rp_index;
int valid_chan = 0;
struct mhi_chan_ctxt *cc_list;
struct mhi_client_handle *client_handle;
+ struct mhi_client_config *client_config;
int pkts_queued;
if (NULL == mhi_dev_ctxt)
@@ -76,6 +76,7 @@ static ssize_t mhi_dbgfs_chan_read(struct file *fp, char __user *buf,
continue;
}
client_handle = mhi_dev_ctxt->client_handle_list[*offp];
+ client_config = client_handle->client_config;
valid_chan = 1;
}
@@ -87,8 +88,9 @@ static ssize_t mhi_dbgfs_chan_read(struct file *fp, char __user *buf,
mhi_dev_ctxt->mhi_local_chan_ctxt[*offp].wp,
&v_wp_index);
- pkts_queued = client_handle->chan_info.max_desc -
- get_nr_avail_ring_elements(&mhi_dev_ctxt->
+ pkts_queued = client_config->chan_info.max_desc -
+ get_nr_avail_ring_elements(mhi_dev_ctxt,
+ &mhi_dev_ctxt->
mhi_local_chan_ctxt[*offp]) - 1;
amnt_copied =
scnprintf(mhi_dev_ctxt->chan_info,
@@ -115,7 +117,7 @@ static ssize_t mhi_dbgfs_chan_read(struct file *fp, char __user *buf,
"pkts_queued",
pkts_queued,
"/",
- client_handle->chan_info.max_desc,
+ client_config->chan_info.max_desc,
"bb_used:",
mhi_dev_ctxt->counters.bb_used[*offp]);
@@ -128,9 +130,16 @@ static ssize_t mhi_dbgfs_chan_read(struct file *fp, char __user *buf,
return -ENOMEM;
}
+int mhi_dbgfs_open(struct inode *inode, struct file *fp)
+{
+ fp->private_data = inode->i_private;
+ return 0;
+}
+
static const struct file_operations mhi_dbgfs_chan_fops = {
.read = mhi_dbgfs_chan_read,
.write = NULL,
+ .open = mhi_dbgfs_open,
};
static ssize_t mhi_dbgfs_ev_read(struct file *fp, char __user *buf,
@@ -143,8 +152,7 @@ static ssize_t mhi_dbgfs_ev_read(struct file *fp, char __user *buf,
uintptr_t v_rp_index;
uintptr_t device_p_rp_index;
- struct mhi_device_ctxt *mhi_dev_ctxt =
- &mhi_devices.device_list[0].mhi_ctxt;
+ struct mhi_device_ctxt *mhi_dev_ctxt = fp->private_data;
if (NULL == mhi_dev_ctxt)
return -EIO;
*offp = (u32)(*offp) % mhi_dev_ctxt->mmio_info.nr_event_rings;
@@ -209,31 +217,15 @@ static ssize_t mhi_dbgfs_ev_read(struct file *fp, char __user *buf,
static const struct file_operations mhi_dbgfs_ev_fops = {
.read = mhi_dbgfs_ev_read,
.write = NULL,
-};
-
-static ssize_t mhi_dbgfs_trigger_msi(struct file *fp, const char __user *buf,
- size_t count, loff_t *offp)
-{
- u32 msi_nr = 0;
- void *irq_ctxt = &((mhi_devices.device_list[0]).pcie_device->dev);
-
- if (copy_from_user(&msi_nr, buf, sizeof(msi_nr)))
- return -ENOMEM;
- mhi_msi_handlr(msi_nr, irq_ctxt);
- return 0;
-}
-
-static const struct file_operations mhi_dbgfs_trigger_msi_fops = {
- .read = NULL,
- .write = mhi_dbgfs_trigger_msi,
+ .open = mhi_dbgfs_open,
};
static ssize_t mhi_dbgfs_state_read(struct file *fp, char __user *buf,
size_t count, loff_t *offp)
{
int amnt_copied = 0;
- struct mhi_device_ctxt *mhi_dev_ctxt =
- &mhi_devices.device_list[0].mhi_ctxt;
+ struct mhi_device_ctxt *mhi_dev_ctxt = fp->private_data;
+
if (NULL == mhi_dev_ctxt)
return -EIO;
msleep(100);
@@ -260,7 +252,7 @@ static ssize_t mhi_dbgfs_state_read(struct file *fp, char __user *buf,
"device_wake:",
atomic_read(&mhi_dev_ctxt->counters.device_wake),
"usage_count:",
- atomic_read(&mhi_dev_ctxt->dev_info->pcie_device->dev.
+ atomic_read(&mhi_dev_ctxt->pcie_device->dev.
power.usage_count),
"outbound_acks:",
atomic_read(&mhi_dev_ctxt->counters.outbound_acks));
@@ -275,63 +267,61 @@ static ssize_t mhi_dbgfs_state_read(struct file *fp, char __user *buf,
static const struct file_operations mhi_dbgfs_state_fops = {
.read = mhi_dbgfs_state_read,
.write = NULL,
+ .open = mhi_dbgfs_open,
};
int mhi_init_debugfs(struct mhi_device_ctxt *mhi_dev_ctxt)
{
struct dentry *mhi_chan_stats;
struct dentry *mhi_state_stats;
- struct dentry *mhi_msi_trigger;
struct dentry *mhi_ev_stats;
-
- mhi_dev_ctxt->mhi_parent_folder =
- debugfs_create_dir("mhi", NULL);
- if (mhi_dev_ctxt->mhi_parent_folder == NULL) {
- mhi_log(MHI_MSG_INFO, "Failed to create debugfs parent dir.\n");
+ const struct pcie_core_info *core = &mhi_dev_ctxt->core;
+ char node_name[32];
+
+ snprintf(node_name,
+ sizeof(node_name),
+ "%04x_%02u.%02u.%02u",
+ core->dev_id, core->domain, core->bus, core->slot);
+
+ mhi_dev_ctxt->child =
+ debugfs_create_dir(node_name, mhi_dev_ctxt->parent);
+ if (mhi_dev_ctxt->child == NULL) {
+ mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
+ "Failed to create debugfs parent dir.\n");
return -EIO;
}
mhi_chan_stats = debugfs_create_file("mhi_chan_stats",
0444,
- mhi_dev_ctxt->mhi_parent_folder,
+ mhi_dev_ctxt->child,
mhi_dev_ctxt,
&mhi_dbgfs_chan_fops);
if (mhi_chan_stats == NULL)
return -ENOMEM;
mhi_ev_stats = debugfs_create_file("mhi_ev_stats",
0444,
- mhi_dev_ctxt->mhi_parent_folder,
+ mhi_dev_ctxt->child,
mhi_dev_ctxt,
&mhi_dbgfs_ev_fops);
if (mhi_ev_stats == NULL)
goto clean_chan;
mhi_state_stats = debugfs_create_file("mhi_state_stats",
0444,
- mhi_dev_ctxt->mhi_parent_folder,
+ mhi_dev_ctxt->child,
mhi_dev_ctxt,
&mhi_dbgfs_state_fops);
if (mhi_state_stats == NULL)
goto clean_ev_stats;
- mhi_msi_trigger = debugfs_create_file("mhi_msi_trigger",
- 0444,
- mhi_dev_ctxt->mhi_parent_folder,
- mhi_dev_ctxt,
- &mhi_dbgfs_trigger_msi_fops);
- if (mhi_msi_trigger == NULL)
- goto clean_state;
mhi_dev_ctxt->chan_info = kmalloc(MHI_LOG_SIZE, GFP_KERNEL);
if (mhi_dev_ctxt->chan_info == NULL)
- goto clean_all;
+ goto clean_ev_stats;
return 0;
-clean_all:
- debugfs_remove(mhi_msi_trigger);
-clean_state:
- debugfs_remove(mhi_state_stats);
+
clean_ev_stats:
debugfs_remove(mhi_ev_stats);
clean_chan:
debugfs_remove(mhi_chan_stats);
- debugfs_remove(mhi_dev_ctxt->mhi_parent_folder);
+ debugfs_remove(mhi_dev_ctxt->child);
return -ENOMEM;
}
diff --git a/drivers/platform/msm/mhi/mhi_sys.h b/drivers/platform/msm/mhi/mhi_sys.h
index a948a2354de7..712647dc9f7c 100644
--- a/drivers/platform/msm/mhi/mhi_sys.h
+++ b/drivers/platform/msm/mhi/mhi_sys.h
@@ -38,12 +38,13 @@ extern void *mhi_ipc_log;
} \
} while (0)
-#define mhi_log(_msg_lvl, _msg, ...) do { \
+#define mhi_log(mhi_dev_ctxt, _msg_lvl, _msg, ...) do { \
if ((_msg_lvl) >= mhi_msg_lvl) \
pr_alert("[%s] " _msg, __func__, ##__VA_ARGS__);\
- if (mhi_ipc_log && ((_msg_lvl) >= mhi_ipc_log_lvl)) \
- ipc_log_string(mhi_ipc_log, \
- "[%s] " _msg, __func__, ##__VA_ARGS__); \
+ if (mhi_dev_ctxt->mhi_ipc_log && \
+ ((_msg_lvl) >= mhi_ipc_log_lvl)) \
+ ipc_log_string(mhi_dev_ctxt->mhi_ipc_log, \
+ "[%s] " _msg, __func__, ##__VA_ARGS__); \
} while (0)
extern const char * const mhi_states_str[MHI_STATE_LIMIT];
diff --git a/drivers/platform/msm/mhi_uci/mhi_uci.c b/drivers/platform/msm/mhi_uci/mhi_uci.c
index 96c4671f994f..0e28ebdd8fea 100644
--- a/drivers/platform/msm/mhi_uci/mhi_uci.c
+++ b/drivers/platform/msm/mhi_uci/mhi_uci.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -31,8 +31,6 @@
#define MHI_DEV_NODE_NAME_LEN 13
#define MHI_SOFTWARE_CLIENT_LIMIT 23
-#define TRE_TYPICAL_SIZE 0x1000
-#define TRE_MAX_SIZE 0xFFFF
#define MHI_UCI_IPC_LOG_PAGES (25)
#define MAX_NR_TRBS_PER_CHAN 10
@@ -129,9 +127,8 @@ struct uci_client {
struct mhi_uci_ctxt_t {
struct list_head node;
- struct platform_dev *pdev;
+ struct platform_device *pdev;
struct uci_client client_handles[MHI_SOFTWARE_CLIENT_LIMIT];
- struct mhi_client_info_t client_info;
dev_t dev_t;
struct mutex ctrl_mutex;
struct cdev cdev[MHI_SOFTWARE_CLIENT_LIMIT];
@@ -332,8 +329,8 @@ static int mhi_uci_send_packet(struct mhi_client_handle **client_handle,
return 0;
for (i = 0; i < nr_avail_trbs; ++i) {
- data_to_insert_now = min(data_left_to_insert,
- TRE_MAX_SIZE);
+ data_to_insert_now = min_t(size_t, data_left_to_insert,
+ uci_handle->out_attr.max_packet_size);
if (is_uspace_buf) {
data_loc = kmalloc(data_to_insert_now, GFP_KERNEL);
if (NULL == data_loc) {
@@ -1172,6 +1169,9 @@ static void uci_xfer_cb(struct mhi_cb_info *cb_info)
uci_handle = cb_info->result->user_data;
switch (cb_info->cb_reason) {
case MHI_CB_MHI_ENABLED:
+ uci_log(uci_handle->uci_ipc_log,
+ UCI_DBG_INFO,
+ "MHI enabled CB received.\n");
atomic_set(&uci_handle->mhi_disabled, 0);
break;
case MHI_CB_MHI_DISABLED:
@@ -1202,9 +1202,11 @@ static void uci_xfer_cb(struct mhi_cb_info *cb_info)
}
}
-static int mhi_register_client(struct uci_client *mhi_client)
+static int mhi_register_client(struct uci_client *mhi_client,
+ struct device *dev)
{
int ret_val = 0;
+ struct mhi_client_info_t client_info;
uci_log(mhi_client->uci_ipc_log,
UCI_DBG_INFO,
@@ -1222,11 +1224,13 @@ static int mhi_register_client(struct uci_client *mhi_client)
UCI_DBG_INFO,
"Registering chan %d\n",
mhi_client->out_chan);
- ret_val = mhi_register_channel(&mhi_client->out_handle,
- mhi_client->out_chan,
- 0,
- &mhi_client->uci_ctxt->client_info,
- mhi_client);
+ client_info.dev = dev;
+ client_info.node_name = "qcom,mhi";
+ client_info.user_data = mhi_client;
+ client_info.mhi_client_cb = uci_xfer_cb;
+ client_info.chan = mhi_client->out_chan;
+ client_info.max_payload = mhi_client->out_attr.max_packet_size;
+ ret_val = mhi_register_channel(&mhi_client->out_handle, &client_info);
if (0 != ret_val)
uci_log(mhi_client->uci_ipc_log,
UCI_DBG_ERROR,
@@ -1238,11 +1242,9 @@ static int mhi_register_client(struct uci_client *mhi_client)
UCI_DBG_INFO,
"Registering chan %d\n",
mhi_client->in_chan);
- ret_val = mhi_register_channel(&mhi_client->in_handle,
- mhi_client->in_chan,
- 0,
- &mhi_client->uci_ctxt->client_info,
- mhi_client);
+ client_info.max_payload = mhi_client->in_attr.max_packet_size;
+ client_info.chan = mhi_client->in_chan;
+ ret_val = mhi_register_channel(&mhi_client->in_handle, &client_info);
if (0 != ret_val)
uci_log(mhi_client->uci_ipc_log,
UCI_DBG_ERROR,
@@ -1266,13 +1268,16 @@ static int mhi_uci_probe(struct platform_device *pdev)
struct mhi_uci_ctxt_t *uci_ctxt;
int ret_val;
int i;
- char node_name[16];
+ char node_name[32];
uci_log(mhi_uci_drv_ctxt.mhi_uci_ipc_log,
UCI_DBG_INFO,
"Entered with pdev:%p\n",
pdev);
+ if (mhi_is_device_ready(&pdev->dev, "qcom,mhi") == false)
+ return -EPROBE_DEFER;
+
if (pdev->dev.of_node == NULL)
return -ENODEV;
@@ -1286,7 +1291,7 @@ static int mhi_uci_probe(struct platform_device *pdev)
if (!uci_ctxt)
return -ENOMEM;
- uci_ctxt->client_info.mhi_client_cb = uci_xfer_cb;
+ uci_ctxt->pdev = pdev;
mutex_init(&uci_ctxt->ctrl_mutex);
uci_log(mhi_uci_drv_ctxt.mhi_uci_ipc_log,
@@ -1309,7 +1314,8 @@ static int mhi_uci_probe(struct platform_device *pdev)
uci_client->uci_ctxt = uci_ctxt;
if (uci_client->in_attr.uci_ownership) {
- ret_val = mhi_register_client(uci_client);
+ ret_val = mhi_register_client(uci_client,
+ &pdev->dev);
if (ret_val) {
uci_log(mhi_uci_drv_ctxt.mhi_uci_ipc_log,
UCI_DBG_CRITICAL,
@@ -1319,7 +1325,13 @@ static int mhi_uci_probe(struct platform_device *pdev)
return -EIO;
}
- snprintf(node_name, sizeof(node_name), "mhi-uci%d",
+ snprintf(node_name,
+ sizeof(node_name),
+ "mhi_uci_%04x_%02u.%02u.%02u_%d",
+ uci_client->out_handle->dev_id,
+ uci_client->out_handle->domain,
+ uci_client->out_handle->bus,
+ uci_client->out_handle->slot,
uci_client->out_attr.chan_id);
uci_client->uci_ipc_log = ipc_log_context_create
(MHI_UCI_IPC_LOG_PAGES,
@@ -1364,11 +1376,16 @@ static int mhi_uci_probe(struct platform_device *pdev)
}
uci_client->dev =
device_create(mhi_uci_drv_ctxt.mhi_uci_class,
- NULL,
- uci_ctxt->dev_t + i,
- NULL,
- DEVICE_NAME "_pipe_%d",
- uci_client->out_chan);
+ NULL,
+ uci_ctxt->dev_t + i,
+ NULL,
+ DEVICE_NAME "_%04x_%02u.%02u.%02u%s%d",
+ uci_client->out_handle->dev_id,
+ uci_client->out_handle->domain,
+ uci_client->out_handle->bus,
+ uci_client->out_handle->slot,
+ "_pipe_",
+ uci_client->out_chan);
if (IS_ERR(uci_client->dev)) {
uci_log(uci_client->uci_ipc_log,
UCI_DBG_ERROR,