From e02de095f4c7c12db7ebc973ea7d74e96a7ebfdb Mon Sep 17 00:00:00 2001 From: Jeykumar Sankaran Date: Mon, 29 Feb 2016 14:47:10 -0800 Subject: msm: mdss: Add support for concurrent writeback This change adds support for concurrent writeback in supported targets. The client requests for concurrent writeback by selecting the data point in output buffer flags. Change-Id: Ic108ce94daef4f96d1fa27b4057e49c01b9e9b8e Signed-off-by: Jeykumar Sankaran Signed-off-by: Ingrid Gallardo --- drivers/video/fbdev/msm/mdss.h | 1 + drivers/video/fbdev/msm/mdss_fb.c | 13 +- drivers/video/fbdev/msm/mdss_mdp.c | 5 +- drivers/video/fbdev/msm/mdss_mdp.h | 34 +++++ drivers/video/fbdev/msm/mdss_mdp_ctl.c | 108 ++++++++++++++- drivers/video/fbdev/msm/mdss_mdp_intf_writeback.c | 155 +++++++++++++++++++++- drivers/video/fbdev/msm/mdss_mdp_layer.c | 112 +++++++++++++++- drivers/video/fbdev/msm/mdss_mdp_overlay.c | 16 +++ drivers/video/fbdev/msm/mdss_mdp_wfd.c | 112 ++++++++++++++-- drivers/video/fbdev/msm/mdss_mdp_wfd.h | 13 +- 10 files changed, 532 insertions(+), 37 deletions(-) (limited to 'drivers/video/fbdev') diff --git a/drivers/video/fbdev/msm/mdss.h b/drivers/video/fbdev/msm/mdss.h index 12164d31d4b6..f957ee082514 100644 --- a/drivers/video/fbdev/msm/mdss.h +++ b/drivers/video/fbdev/msm/mdss.h @@ -174,6 +174,7 @@ enum mdss_hw_capabilities { MDSS_CAPS_QSEED3, MDSS_CAPS_DEST_SCALER, MDSS_CAPS_10_BIT_SUPPORTED, + MDSS_CAPS_CWB_SUPPORTED, MDSS_CAPS_MAX, }; diff --git a/drivers/video/fbdev/msm/mdss_fb.c b/drivers/video/fbdev/msm/mdss_fb.c index d66a578dd1ed..0e26de90900c 100644 --- a/drivers/video/fbdev/msm/mdss_fb.c +++ b/drivers/video/fbdev/msm/mdss_fb.c @@ -4374,8 +4374,19 @@ static int mdss_fb_atomic_commit_ioctl(struct fb_info *info, commit.commit_v1.output_layer = output_layer_user; rc = copy_to_user(argp, &commit, sizeof(struct mdp_layer_commit)); - if (rc) + if (rc) { pr_err("copy to user for release & retire fence failed\n"); + goto err; + } + } + + if (output_layer_user) { + rc = copy_to_user(&output_layer_user->buffer.fence, + &output_layer->buffer.fence, + sizeof(int)); + + if (rc) + pr_err("copy to user for output fence failed"); } err: diff --git a/drivers/video/fbdev/msm/mdss_mdp.c b/drivers/video/fbdev/msm/mdss_mdp.c index b2308f9b05b0..6ca1883da1bb 100644 --- a/drivers/video/fbdev/msm/mdss_mdp.c +++ b/drivers/video/fbdev/msm/mdss_mdp.c @@ -222,7 +222,7 @@ static struct mdss_mdp_irq mdp_irq_map[] = { { MDSS_MDP_IRQ_TYPE_PING_PONG_AUTO_REF, 2, MDSS_MDP_INTR_PING_PONG_2_AUTOREFRESH_DONE, 0}, { MDSS_MDP_IRQ_TYPE_PING_PONG_AUTO_REF, 3, MDSS_MDP_INTR_PING_PONG_3_AUTOREFRESH_DONE, 0}, { MDSS_MDP_IRQ_TYPE_CWB_OVERFLOW, 2, MDSS_MDP_INTR2_PING_PONG_2_CWB_OVERFLOW, 1}, - { MDSS_MDP_IRQ_TYPE_CWB_OVERFLOW, 3, MDSS_MDP_INTR2_PING_PONG_2_CWB_OVERFLOW, 1} + { MDSS_MDP_IRQ_TYPE_CWB_OVERFLOW, 3, MDSS_MDP_INTR2_PING_PONG_3_CWB_OVERFLOW, 1} }; static struct intr_callback *mdp_intr_cb; @@ -1838,6 +1838,7 @@ static void mdss_mdp_hw_rev_caps_init(struct mdss_data_type *mdata) mdata->mdss_caps_map); set_bit(MDSS_CAPS_QSEED3, mdata->mdss_caps_map); set_bit(MDSS_CAPS_DEST_SCALER, mdata->mdss_caps_map); + set_bit(MDSS_CAPS_CWB_SUPPORTED, mdata->mdss_caps_map); mdss_mdp_init_default_prefill_factors(mdata); mdss_set_quirk(mdata, MDSS_QUIRK_DSC_RIGHT_ONLY_PU); mdss_set_quirk(mdata, MDSS_QUIRK_DSC_2SLICE_PU_THRPUT); @@ -2346,6 +2347,8 @@ ssize_t mdss_mdp_show_capabilities(struct device *dev, SPRINT(" dest_scaler"); if (mdata->has_separate_rotator) SPRINT(" separate_rotator"); + if (test_bit(MDSS_CAPS_CWB_SUPPORTED, mdata->mdss_caps_map)) + SPRINT(" concurrent_writeback"); SPRINT("\n"); #undef SPRINT diff --git a/drivers/video/fbdev/msm/mdss_mdp.h b/drivers/video/fbdev/msm/mdss_mdp.h index f9c708178eac..bedbc10714c1 100644 --- a/drivers/video/fbdev/msm/mdss_mdp.h +++ b/drivers/video/fbdev/msm/mdss_mdp.h @@ -339,6 +339,17 @@ struct mdss_mdp_ctl_intfs_ops { int (*update_lineptr)(struct mdss_mdp_ctl *ctl, bool enable); }; +struct mdss_mdp_cwb { + struct mutex queue_lock; + struct list_head data_queue; + int valid; + u32 wb_idx; + struct mdp_output_layer *layer; + void *priv_data; + struct msm_sync_pt_data cwb_sync_pt_data; + struct blocking_notifier_head notifier_head; +}; + struct mdss_mdp_ctl { u32 num; char __iomem *base; @@ -554,6 +565,13 @@ struct mdss_mdp_data { struct mdss_mdp_pipe *last_pipe; }; +struct mdss_mdp_wb_data { + struct mdp_output_layer layer; + struct mdss_mdp_data data; + bool signal_required; + struct list_head next; +}; + struct pp_hist_col_info { u32 col_state; u32 col_en; @@ -814,6 +832,10 @@ struct mdss_overlay_private { u32 bl_events; u32 ad_events; u32 ad_bl_events; + + struct mdss_mdp_cwb cwb; + wait_queue_head_t wb_waitq; + atomic_t wb_busy; }; struct mdss_mdp_set_ot_params { @@ -1453,6 +1475,14 @@ void *mdss_mdp_get_intf_base_addr(struct mdss_data_type *mdata, u32 interface_id); int mdss_mdp_cmd_start(struct mdss_mdp_ctl *ctl); int mdss_mdp_writeback_start(struct mdss_mdp_ctl *ctl); +void *mdss_mdp_writeback_get_ctx_for_cwb(struct mdss_mdp_ctl *ctl); +int mdss_mdp_writeback_prepare_cwb(struct mdss_mdp_ctl *ctl, + struct mdss_mdp_writeback_arg *wb_arg); +int mdss_mdp_acquire_wb(struct mdss_mdp_ctl *ctl); +int mdss_mdp_cwb_validate(struct msm_fb_data_type *mfd, + struct mdp_output_layer *layer); +int mdss_mdp_cwb_check_resource(struct mdss_mdp_ctl *ctl, u32 wb_idx); + int mdss_mdp_overlay_kickoff(struct msm_fb_data_type *mfd, struct mdp_display_commit *data); struct mdss_mdp_data *mdss_mdp_overlay_buf_alloc(struct msm_fb_data_type *mfd, @@ -1689,6 +1719,8 @@ void mdss_mdp_set_roi(struct mdss_mdp_ctl *ctl, struct mdss_rect *l_roi, struct mdss_rect *r_roi); void mdss_mdp_mixer_update_pipe_map(struct mdss_mdp_ctl *master_ctl, int mixer_mux); +int mdss_mdp_wb_import_data(struct device *device, + struct mdss_mdp_wb_data *wb_data); void mdss_mdp_pipe_calc_pixel_extn(struct mdss_mdp_pipe *pipe); void mdss_mdp_pipe_calc_qseed3_cfg(struct mdss_mdp_pipe *pipe); @@ -1701,6 +1733,8 @@ int mdss_mdp_cmd_get_autorefresh_mode(struct mdss_mdp_ctl *ctl); int mdss_mdp_ctl_cmd_set_autorefresh(struct mdss_mdp_ctl *ctl, int frame_cnt); int mdss_mdp_ctl_cmd_get_autorefresh(struct mdss_mdp_ctl *ctl); int mdss_mdp_pp_get_version(struct mdp_pp_feature_version *version); +int mdss_mdp_layer_pre_commit_cwb(struct msm_fb_data_type *mfd, + struct mdp_layer_commit_v1 *commit); struct mdss_mdp_ctl *mdss_mdp_ctl_alloc(struct mdss_data_type *mdata, u32 off); diff --git a/drivers/video/fbdev/msm/mdss_mdp_ctl.c b/drivers/video/fbdev/msm/mdss_mdp_ctl.c index 6c1b5e13db5f..6626c7eb2326 100644 --- a/drivers/video/fbdev/msm/mdss_mdp_ctl.c +++ b/drivers/video/fbdev/msm/mdss_mdp_ctl.c @@ -3314,6 +3314,100 @@ static int mdss_mdp_ctl_fbc_enable(int enable, return 0; } +int mdss_mdp_cwb_setup(struct mdss_mdp_ctl *ctl) +{ + struct mdss_mdp_cwb *cwb = NULL; + struct mdss_mdp_writeback *wb = NULL; + struct mdss_overlay_private *mdp5_data = NULL; + struct mdss_mdp_wb_data *cwb_data; + struct mdss_mdp_writeback_arg wb_args; + struct mdss_mdp_ctl *sctl = NULL; + struct mdss_data_type *mdata = mdss_mdp_get_mdata(); + + u32 opmode, data_point; + int rc = 0; + + if (!ctl->mfd) + return -ENODEV; + + mdp5_data = mfd_to_mdp5_data(ctl->mfd); + cwb = &mdp5_data->cwb; + + if (!cwb->valid) + return rc; + + /* Wait for previous CWB job to complete */ + if (mdss_mdp_acquire_wb(ctl)) + return -EBUSY; + + wb = mdata->wb + cwb->wb_idx; + wb->base = mdata->mdss_io.base + mdata->wb_offsets[cwb->wb_idx]; + ctl->wb = wb; + + /* Get new instance of writeback interface context */ + cwb->priv_data = mdss_mdp_writeback_get_ctx_for_cwb(ctl); + if (cwb->priv_data == NULL) { + pr_err("fail to get writeback context\n"); + rc = -ENOMEM; + goto cwb_setup_done; + } + + mutex_lock(&cwb->queue_lock); + cwb_data = list_first_entry_or_null(&cwb->data_queue, + struct mdss_mdp_wb_data, next); + mutex_unlock(&cwb->queue_lock); + if (cwb_data == NULL) { + pr_err("no output buffer for cwb\n"); + rc = -ENOMEM; + goto cwb_setup_done; + } + + rc = mdss_mdp_data_map(&cwb_data->data, true, DMA_FROM_DEVICE); + if (rc) { + pr_err("fail to acquire CWB output buffer\n"); + goto cwb_setup_done; + } + + memset(&wb_args, 0, sizeof(wb_args)); + wb_args.data = &cwb_data->data; + + rc = mdss_mdp_writeback_prepare_cwb(ctl, &wb_args); + if (rc) { + pr_err("failed to writeback prepare cwb\n"); + goto cwb_setup_done; + } + + /* Select MEM_SEL to WB */ + ctl->opmode |= MDSS_MDP_CTL_OP_WFD_MODE; + sctl = mdss_mdp_get_split_ctl(ctl); + if (sctl) + sctl->opmode |= MDSS_MDP_CTL_OP_WFD_MODE; + + /* Select CWB data point */ + data_point = (cwb->layer->flags & MDP_COMMIT_CWB_DSPP) ? 0x4 : 0; + writel_relaxed(data_point, mdata->mdp_base + mdata->ppb_ctl[2]); + if (sctl) + writel_relaxed(data_point + 1, + mdata->mdp_base + mdata->ppb_ctl[3]); + + /* Flush WB */ + ctl->flush_bits |= BIT(16); + + opmode = mdss_mdp_ctl_read(ctl, MDSS_MDP_REG_CTL_TOP) | ctl->opmode; + mdss_mdp_ctl_write(ctl, MDSS_MDP_REG_CTL_TOP, opmode); + if (sctl) { + opmode = mdss_mdp_ctl_read(sctl, MDSS_MDP_REG_CTL_TOP) | + sctl->opmode; + mdss_mdp_ctl_write(sctl, MDSS_MDP_REG_CTL_TOP, opmode); + } + +cwb_setup_done: + cwb->valid = 0; + atomic_add_unless(&mdp5_data->wb_busy, -1, 0); + + return 0; +} + int mdss_mdp_ctl_setup(struct mdss_mdp_ctl *ctl) { struct mdss_mdp_ctl *split_ctl; @@ -3341,10 +3435,10 @@ int mdss_mdp_ctl_setup(struct mdss_mdp_ctl *ctl) max_mixer_width = ctl->mdata->max_mixer_width; split_fb = ((is_dual_lm_single_display(ctl->mfd)) && - (ctl->mfd->split_fb_left <= max_mixer_width) && - (ctl->mfd->split_fb_right <= max_mixer_width)) ? 1 : 0; + (ctl->mfd->split_fb_left <= max_mixer_width) && + (ctl->mfd->split_fb_right <= max_mixer_width)) ? 1 : 0; pr_debug("max=%d xres=%d left=%d right=%d\n", max_mixer_width, - width, ctl->mfd->split_fb_left, ctl->mfd->split_fb_right); + width, ctl->mfd->split_fb_left, ctl->mfd->split_fb_right); if ((split_ctl && (width > max_mixer_width)) || (width > (2 * max_mixer_width))) { @@ -3359,7 +3453,7 @@ int mdss_mdp_ctl_setup(struct mdss_mdp_ctl *ctl) if (!ctl->mixer_left) { ctl->mixer_left = mdss_mdp_mixer_alloc(ctl, MDSS_MDP_MIXER_TYPE_INTF, - ((width > max_mixer_width) || split_fb), 0); + ((width > max_mixer_width) || split_fb), 0); if (!ctl->mixer_left) { pr_err("unable to allocate layer mixer\n"); return -ENOMEM; @@ -5546,6 +5640,12 @@ int mdss_mdp_display_commit(struct mdss_mdp_ctl *ctl, void *arg, !bitmap_empty(mdata->bwc_enable_map, MAX_DRV_SUP_PIPES)) mdss_mdp_bwcpanic_ctrl(mdata, true); + ret = mdss_mdp_cwb_setup(ctl); + if (ret) + pr_warn("concurrent setup failed ctl=%d\n", ctl->num); + + ctl_flush_bits |= ctl->flush_bits; + ATRACE_BEGIN("flush_kickoff"); mdss_mdp_ctl_write(ctl, MDSS_MDP_REG_CTL_FLUSH, ctl_flush_bits); if (sctl && sctl_flush_bits) { diff --git a/drivers/video/fbdev/msm/mdss_mdp_intf_writeback.c b/drivers/video/fbdev/msm/mdss_mdp_intf_writeback.c index 5d9d8f45a965..6fcefb56a739 100644 --- a/drivers/video/fbdev/msm/mdss_mdp_intf_writeback.c +++ b/drivers/video/fbdev/msm/mdss_mdp_intf_writeback.c @@ -26,6 +26,8 @@ */ #define BWC_FMT_MASK 0xC3FFFFFF #define MDSS_DEFAULT_OT_SETTING 0x10 +#define CWB_PPB_0 2 +#define CWB_PPB_1 3 enum mdss_mdp_writeback_type { MDSS_MDP_WRITEBACK_TYPE_ROTATOR, @@ -196,6 +198,57 @@ static int mdss_mdp_writeback_cdm_setup(struct mdss_mdp_writeback_ctx *ctx, return mdss_mdp_cdm_setup(cdm, &setup); } +static void mdss_mdp_writeback_cwb_overflow(void *arg) +{ + struct mdss_mdp_ctl *ctl = arg; + struct mdss_overlay_private *mdp5_data = NULL; + + pr_err("Buffer overflow triggered ctl=%d\n", ctl->num); + MDSS_XLOG(ctl->num); + if (ctl->mfd) + return; + + mdp5_data = mfd_to_mdp5_data(ctl->mfd); + mdp5_data->cwb.valid = 0; + + blocking_notifier_call_chain(&mdp5_data->cwb.notifier_head, + MDP_NOTIFY_FRAME_TIMEOUT, NULL); + + mdss_mdp_irq_disable_nosync(MDSS_MDP_IRQ_TYPE_CWB_OVERFLOW, CWB_PPB_0); + if (mdss_mdp_get_split_ctl(ctl)) + mdss_mdp_irq_disable_nosync(MDSS_MDP_IRQ_TYPE_CWB_OVERFLOW, + CWB_PPB_1); + + if (!atomic_add_unless(&mdp5_data->wb_busy, -1, 0)) + pr_err("Invalid state for WB\n"); + + wake_up_all(&mdp5_data->wb_waitq); +} + +static void mdss_mdp_writeback_cwb_intr_done(void *arg) +{ + struct mdss_mdp_ctl *ctl = arg; + struct mdss_overlay_private *mdp5_data = NULL; + struct mdss_mdp_writeback_ctx *ctx = NULL; + + if (ctl->mfd) + return; + + mdp5_data = mfd_to_mdp5_data(ctl->mfd); + ctx = mdp5_data->cwb.priv_data; + mdp5_data->cwb.valid = 0; + + mdss_mdp_irq_disable_nosync(ctx->intr_type, ctx->intf_num); + + blocking_notifier_call_chain(&mdp5_data->cwb.notifier_head, + MDP_NOTIFY_FRAME_DONE, NULL); + + if (!atomic_add_unless(&mdp5_data->wb_busy, -1, 0)) + pr_err("Invalid state for WB\n"); + + wake_up_all(&mdp5_data->wb_waitq); +} + void mdss_mdp_set_wb_cdp(struct mdss_mdp_writeback_ctx *ctx, struct mdss_mdp_format_params *fmt) { @@ -355,6 +408,63 @@ static int mdss_mdp_writeback_format_setup(struct mdss_mdp_writeback_ctx *ctx, return 0; } +int mdss_mdp_writeback_prepare_cwb(struct mdss_mdp_ctl *ctl, + struct mdss_mdp_writeback_arg *wb_arg) +{ + struct mdss_overlay_private *mdp5_data; + struct mdss_mdp_writeback_ctx *ctx = NULL; + struct mdp_layer_buffer *buffer = NULL; + struct mdss_mdp_cwb *cwb = NULL; + struct mdss_mdp_ctl *sctl = NULL; + int ret = 0; + + mdp5_data = mfd_to_mdp5_data(ctl->mfd); + cwb = &mdp5_data->cwb; + ctx = (struct mdss_mdp_writeback_ctx *)cwb->priv_data; + + buffer = &cwb->layer->buffer; + + ctx->opmode = 0; + ctx->img_width = buffer->width; + ctx->img_height = buffer->height; + ctx->width = buffer->width; + ctx->height = buffer->height; + ctx->frame_rate = ctl->frame_rate; + ctx->dst_rect.x = 0; + ctx->dst_rect.y = 0; + ctx->dst_rect.w = ctx->width; + ctx->dst_rect.h = ctx->height; + + ret = mdss_mdp_writeback_format_setup(ctx, buffer->format, ctl); + if (ret) { + pr_err("format setup failed for cwb\n"); + return ret; + } + + ret = mdss_mdp_writeback_addr_setup(ctx, wb_arg->data); + if (ret) { + pr_err("cwb writeback data setup error\n"); + return ret; + } + mdss_mdp_irq_enable(ctx->intr_type, ctx->intf_num); + mdss_mdp_set_intr_callback(ctx->intr_type, ctx->intf_num, + mdss_mdp_writeback_cwb_intr_done, ctl); + + mdss_mdp_irq_enable(MDSS_MDP_IRQ_TYPE_CWB_OVERFLOW, ctl->intf_num); + mdss_mdp_set_intr_callback(MDSS_MDP_IRQ_TYPE_CWB_OVERFLOW, + ctl->intf_num, + mdss_mdp_writeback_cwb_overflow, ctl); + sctl = mdss_mdp_get_split_ctl(ctl); + if (sctl) { + mdss_mdp_irq_enable(MDSS_MDP_IRQ_TYPE_CWB_OVERFLOW, + sctl->intf_num); + mdss_mdp_set_intr_callback(MDSS_MDP_IRQ_TYPE_CWB_OVERFLOW, + sctl->intf_num, + mdss_mdp_writeback_cwb_overflow, sctl); + } + return ret; +} + static int mdss_mdp_writeback_prepare_wfd(struct mdss_mdp_ctl *ctl, void *arg) { struct mdss_mdp_writeback_ctx *ctx; @@ -557,6 +667,7 @@ static void mdss_mdp_writeback_intr_done(void *arg) spin_unlock(&ctx->wb_lock); complete_all(&ctx->wb_comp); + MDSS_XLOG(ctx->wb_num, ctx->type, ctx->xin_id, ctx->intf_num); } @@ -822,11 +933,43 @@ static int mdss_mdp_writeback_display(struct mdss_mdp_ctl *ctl, void *arg) return 0; } +static struct mdss_mdp_writeback_ctx *mdss_mdp_writeback_get_ctx(u32 opmode) +{ + u32 mem_sel = (opmode & 0xF) - 1; + + if (mem_sel < MDSS_MDP_MAX_WRITEBACK) + return &wb_ctx_list[mem_sel]; + return NULL; +} + +void *mdss_mdp_writeback_get_ctx_for_cwb(struct mdss_mdp_ctl *ctl) +{ + struct mdss_mdp_writeback_ctx *ctx = + mdss_mdp_writeback_get_ctx(MDSS_MDP_CTL_OP_WFD_MODE); + struct mdss_mdp_writeback_ctx *cwb_ctx = + kzalloc(sizeof(struct mdss_mdp_writeback_ctx), GFP_KERNEL); + + if (cwb_ctx == NULL) { + pr_err("fail to allocate CWB context\n"); + return NULL; + } + + /* Populate only needed parameters for CWB programming. */ + cwb_ctx->base = ctl->wb->base; + cwb_ctx->wb_num = ctl->wb->num; + cwb_ctx->intf_num = ctx->intf_num; + cwb_ctx->intr_type = ctx->intr_type; + cwb_ctx->xin_id = ctx->xin_id; + cwb_ctx->clk_ctrl.reg_off = ctx->clk_ctrl.reg_off; + cwb_ctx->clk_ctrl.bit_off = ctx->clk_ctrl.bit_off; + + return cwb_ctx; +} + int mdss_mdp_writeback_start(struct mdss_mdp_ctl *ctl) { struct mdss_mdp_writeback_ctx *ctx; struct mdss_mdp_writeback *wb; - u32 mem_sel; u32 mixer_type = MDSS_MDP_MIXER_TYPE_UNUSED; bool is_rot; @@ -838,16 +981,16 @@ int mdss_mdp_writeback_start(struct mdss_mdp_ctl *ctl) } wb = ctl->wb; - mem_sel = (ctl->opmode & 0xF) - 1; - if (mem_sel < MDSS_MDP_MAX_WRITEBACK) { - ctx = &wb_ctx_list[mem_sel]; + + ctx = mdss_mdp_writeback_get_ctx(ctl->opmode); + if (ctx) { if (ctx->ref_cnt) { - pr_err("writeback in use %d\n", mem_sel); + pr_err("writeback id: %d in use\n", wb->num); return -EBUSY; } ctx->ref_cnt++; } else { - pr_err("invalid writeback mode %d\n", mem_sel); + pr_err("unable to get wb context for wb id: %d\n", wb->num); return -EINVAL; } diff --git a/drivers/video/fbdev/msm/mdss_mdp_layer.c b/drivers/video/fbdev/msm/mdss_mdp_layer.c index bf7e65beb68c..8da8840f30ec 100644 --- a/drivers/video/fbdev/msm/mdss_mdp_layer.c +++ b/drivers/video/fbdev/msm/mdss_mdp_layer.c @@ -41,6 +41,7 @@ enum { MDSS_MDP_RELEASE_FENCE = 0, MDSS_MDP_RETIRE_FENCE, + MDSS_MDP_CWB_RETIRE_FENCE, }; enum layer_pipe_q { @@ -724,9 +725,13 @@ static struct sync_fence *__create_fence(struct msm_fb_data_type *mfd, if (fence_type == MDSS_MDP_RETIRE_FENCE) snprintf(fence_name, sizeof(fence_name), "fb%d_retire", mfd->index); - else + else if (fence_type == MDSS_MDP_RELEASE_FENCE) snprintf(fence_name, sizeof(fence_name), "fb%d_release", mfd->index); + else if (fence_type == MDSS_MDP_CWB_RETIRE_FENCE) + snprintf(fence_name, sizeof(fence_name), "cwb%d_retire", + mfd->index); + if ((fence_type == MDSS_MDP_RETIRE_FENCE) && (mfd->panel.type == MIPI_CMD_PANEL)) { @@ -738,9 +743,12 @@ static struct sync_fence *__create_fence(struct msm_fb_data_type *mfd, } else { return ERR_PTR(-EPERM); } + } else if (fence_type == MDSS_MDP_CWB_RETIRE_FENCE) { + sync_fence = mdss_fb_sync_get_fence(sync_pt_data->timeline, + fence_name, sync_pt_data->timeline_value + 1); } else { sync_fence = mdss_fb_sync_get_fence(sync_pt_data->timeline, - fence_name, value); + fence_name, value); } if (IS_ERR_OR_NULL(sync_fence)) { @@ -1914,6 +1922,19 @@ end: return ret; } +int __is_cwb_requested(uint32_t output_layer_flags) +{ + struct mdss_data_type *mdata = mdss_mdp_get_mdata(); + int req = 0; + + req = output_layer_flags & MDP_COMMIT_CWB_EN; + if (req && !test_bit(MDSS_CAPS_CWB_SUPPORTED, mdata->mdss_caps_map)) { + pr_err("CWB not supported"); + return -ENODEV; + } + return req; +} + /* * mdss_mdp_layer_pre_commit() - pre commit validation for input layers * @mfd: Framebuffer data structure for display @@ -1944,6 +1965,20 @@ int mdss_mdp_layer_pre_commit(struct msm_fb_data_type *mfd, if (!mdp5_data || !mdp5_data->ctl) return -EINVAL; + + if (commit->output_layer) { + ret = __is_cwb_requested(commit->output_layer->flags); + if (IS_ERR_VALUE(ret)) { + return ret; + } else if (ret) { + ret = mdss_mdp_layer_pre_commit_cwb(mfd, commit); + if (ret) { + pr_err("pre commit failed for CWB\n"); + return ret; + } + } + } + layer_list = commit->input_layers; /* handle null commit */ @@ -2018,6 +2053,22 @@ int mdss_mdp_layer_pre_commit(struct msm_fb_data_type *mfd, } ret = __handle_buffer_fences(mfd, commit, layer_list); + if (ret) { + pr_err("failed to handle fences for fb: %d", mfd->index); + goto map_err; + } + + if (mdp5_data->cwb.valid) { + struct sync_fence *retire_fence = NULL; + + retire_fence = __create_fence(mfd, + &mdp5_data->cwb.cwb_sync_pt_data, + MDSS_MDP_CWB_RETIRE_FENCE, + &commit->output_layer->buffer.fence, 0); + if (IS_ERR_OR_NULL(retire_fence)) { + pr_err("failed to handle cwb fence"); + } + } map_err: if (ret) { @@ -2046,6 +2097,7 @@ int mdss_mdp_layer_atomic_validate(struct msm_fb_data_type *mfd, struct file *file, struct mdp_layer_commit_v1 *commit) { struct mdss_overlay_private *mdp5_data; + int rc = 0; if (!mfd || !commit) { pr_err("invalid input params\n"); @@ -2065,9 +2117,63 @@ int mdss_mdp_layer_atomic_validate(struct msm_fb_data_type *mfd, return -EPERM; } + if (commit->output_layer) { + rc = __is_cwb_requested(commit->output_layer->flags); + if (IS_ERR_VALUE(rc)) { + return rc; + } else if (rc) { + rc = mdss_mdp_cwb_validate(mfd, commit->output_layer); + if (rc) { + pr_err("failed to validate CWB config!!!\n"); + return rc; + } + } + } + return __validate_layers(mfd, file, commit); } +int mdss_mdp_layer_pre_commit_cwb(struct msm_fb_data_type *mfd, + struct mdp_layer_commit_v1 *commit) +{ + struct mdss_overlay_private *mdp5_data; + struct mdss_mdp_wb_data *cwb_data = NULL; + int rc = 0; + + mdp5_data = mfd_to_mdp5_data(mfd); + + rc = mdss_mdp_cwb_check_resource(mfd_to_ctl(mfd), + commit->output_layer->writeback_ndx); + if (rc) { + pr_err("CWB resource not available\n"); + return rc; + } + + /* Add data to the cwb queue */ + cwb_data = kzalloc(sizeof(struct mdss_mdp_wb_data), GFP_KERNEL); + if (!cwb_data) + return -ENOMEM; + + cwb_data->layer = *commit->output_layer; + rc = mdss_mdp_wb_import_data(&mfd->pdev->dev, cwb_data); + if (rc) { + pr_err("failed to import data for cwb\n"); + kfree(cwb_data); + return rc; + } + + mdp5_data->cwb.layer = commit->output_layer; + mdp5_data->cwb.wb_idx = commit->output_layer->writeback_ndx; + + mutex_lock(&mdp5_data->cwb.queue_lock); + list_add_tail(&cwb_data->next, &mdp5_data->cwb.data_queue); + mutex_unlock(&mdp5_data->cwb.queue_lock); + + mdp5_data->cwb.valid = 1; + + return 0; +} + int mdss_mdp_layer_pre_commit_wfd(struct msm_fb_data_type *mfd, struct file *file, struct mdp_layer_commit_v1 *commit) { @@ -2075,7 +2181,7 @@ int mdss_mdp_layer_pre_commit_wfd(struct msm_fb_data_type *mfd, struct mdss_overlay_private *mdp5_data; struct mdss_mdp_wfd *wfd = NULL; struct mdp_output_layer *output_layer = NULL; - struct mdss_mdp_wfd_data *data = NULL; + struct mdss_mdp_wb_data *data = NULL; struct sync_fence *fence = NULL; struct msm_sync_pt_data *sync_pt_data = NULL; diff --git a/drivers/video/fbdev/msm/mdss_mdp_overlay.c b/drivers/video/fbdev/msm/mdss_mdp_overlay.c index d409618ae6c0..5d20357df14b 100644 --- a/drivers/video/fbdev/msm/mdss_mdp_overlay.c +++ b/drivers/video/fbdev/msm/mdss_mdp_overlay.c @@ -5430,6 +5430,7 @@ int mdss_mdp_overlay_init(struct msm_fb_data_type *mfd) struct msm_mdp_interface *mdp5_interface = &mfd->mdp; struct mdss_overlay_private *mdp5_data = NULL; struct irq_info *mdss_irq; + char timeline_name[32]; int rc; mdp5_data = kzalloc(sizeof(struct mdss_overlay_private), GFP_KERNEL); @@ -5491,6 +5492,21 @@ int mdss_mdp_overlay_init(struct msm_fb_data_type *mfd) mdp5_data->cursor_ndx[CURSOR_PIPE_LEFT] = MSMFB_NEW_REQUEST; mdp5_data->cursor_ndx[CURSOR_PIPE_RIGHT] = MSMFB_NEW_REQUEST; + init_waitqueue_head(&mdp5_data->wb_waitq); + mutex_init(&mdp5_data->cwb.queue_lock); + INIT_LIST_HEAD(&mdp5_data->cwb.data_queue); + + snprintf(timeline_name, sizeof(timeline_name), "cwb%d", mfd->index); + mdp5_data->cwb.cwb_sync_pt_data.timeline = + sw_sync_timeline_create(timeline_name); + if (mdp5_data->cwb.cwb_sync_pt_data.timeline == NULL) { + pr_err("failed to create sync pt timeline for cwb\n"); + return -ENOMEM; + } + + blocking_notifier_chain_register(&mdp5_data->cwb.notifier_head, + &mfd->mdp_sync_pt_data.notifier); + mfd->mdp.private1 = mdp5_data; mfd->wait_for_kickoff = true; diff --git a/drivers/video/fbdev/msm/mdss_mdp_wfd.c b/drivers/video/fbdev/msm/mdss_mdp_wfd.c index b1c414f53621..656967831810 100644 --- a/drivers/video/fbdev/msm/mdss_mdp_wfd.c +++ b/drivers/video/fbdev/msm/mdss_mdp_wfd.c @@ -48,7 +48,7 @@ struct mdss_mdp_wfd *mdss_mdp_wfd_init(struct device *device, void mdss_mdp_wfd_deinit(struct mdss_mdp_wfd *wfd) { - struct mdss_mdp_wfd_data *node, *temp; + struct mdss_mdp_wb_data *node, *temp; list_for_each_entry_safe(node, temp, &wfd->data_queue, next) mdss_mdp_wfd_remove_data(wfd, node); @@ -56,6 +56,27 @@ void mdss_mdp_wfd_deinit(struct mdss_mdp_wfd *wfd) kfree(wfd); } +int mdss_mdp_acquire_wb(struct mdss_mdp_ctl *ctl) +{ + struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(ctl->mfd); + int rc, ret = 0; + + if (atomic_read(&mdp5_data->wb_busy)) { + rc = wait_event_timeout(mdp5_data->wb_waitq, + atomic_read(&mdp5_data->wb_busy) == 0, KOFF_TIMEOUT); + if (!rc) { + pr_err("%s: Wait for WB timed out. wb_busy=%d", + __func__, atomic_read(&mdp5_data->wb_busy)); + ret = -ETIMEDOUT; + } else if (!atomic_read(&mdp5_data->wb_busy)) + ret = 0; + } + + if (!ret) + atomic_inc(&mdp5_data->wb_busy); + return ret; +} + int mdss_mdp_wfd_wait_for_finish(struct mdss_mdp_wfd *wfd) { int ret; @@ -261,8 +282,9 @@ wfd_setup_error: return ret; } -static int mdss_mdp_wfd_import_data(struct device *device, - struct mdss_mdp_wfd_data *wfd_data) + +int mdss_mdp_wb_import_data(struct device *device, + struct mdss_mdp_wb_data *wfd_data) { int i, ret = 0; u32 flags = 0; @@ -287,26 +309,26 @@ static int mdss_mdp_wfd_import_data(struct device *device, return ret; } -struct mdss_mdp_wfd_data *mdss_mdp_wfd_add_data( +struct mdss_mdp_wb_data *mdss_mdp_wfd_add_data( struct mdss_mdp_wfd *wfd, struct mdp_output_layer *layer) { int ret; - struct mdss_mdp_wfd_data *wfd_data; + struct mdss_mdp_wb_data *wfd_data; if (!wfd->ctl || !wfd->ctl->wb) { pr_err("wfd not setup\n"); return ERR_PTR(-EINVAL); } - wfd_data = kzalloc(sizeof(struct mdss_mdp_wfd_data), GFP_KERNEL); + wfd_data = kzalloc(sizeof(struct mdss_mdp_wb_data), GFP_KERNEL); if (!wfd_data) { pr_err("fail to allocate wfd data\n"); return ERR_PTR(-ENOMEM); } wfd_data->layer = *layer; - ret = mdss_mdp_wfd_import_data(wfd->device, wfd_data); + ret = mdss_mdp_wb_import_data(wfd->device, wfd_data); if (ret) { pr_err("fail to import data\n"); mdss_mdp_data_free(&wfd_data->data, true, DMA_FROM_DEVICE); @@ -322,7 +344,7 @@ struct mdss_mdp_wfd_data *mdss_mdp_wfd_add_data( } void mdss_mdp_wfd_remove_data(struct mdss_mdp_wfd *wfd, - struct mdss_mdp_wfd_data *wfd_data) + struct mdss_mdp_wb_data *wfd_data) { mutex_lock(&wfd->lock); list_del_init(&wfd_data->next); @@ -357,6 +379,65 @@ static int mdss_mdp_wfd_validate_out_configuration(struct mdss_mdp_wfd *wfd, return 0; } +int mdss_mdp_cwb_check_resource(struct mdss_mdp_ctl *ctl, u32 wb_idx) +{ + struct mdss_data_type *mdata = mdss_mdp_get_mdata(); + struct mdss_mdp_mixer *cwb_mixer = NULL; + struct mdss_mdp_writeback *wb = NULL; + + if (!mdata) { + pr_err("Invalid mdata\n"); + return -EINVAL; + } + + if (wb_idx != (mdata->nwb - 1)) { + pr_err("Invalid wb index for cwb: %d\n", wb_idx); + return -EINVAL; + } + + wb = mdata->wb + wb_idx; + if (atomic_read(&wb->kref.refcount)) { + pr_err("WB block busy\n"); + return -EBUSY; + } + + /* CWB path is hardwired from mixer0 to mixer2 and mixer1 to mixer5 */ + cwb_mixer = mdata->mixer_intf + ctl->mixer_left->num + 2; + if (cwb_mixer->ref_cnt) { + pr_err("mixer 2 is busy\n"); + return -EBUSY; + } + + if (ctl->mixer_right) { + cwb_mixer = mdata->mixer_intf + ctl->mixer_right->num + 2; + if (cwb_mixer->ref_cnt) { + pr_err("mixer 5 is busy\n"); + return -EBUSY; + } + } + return 0; +} + +int mdss_mdp_cwb_validate(struct msm_fb_data_type *mfd, + struct mdp_output_layer *layer) +{ + struct mdss_mdp_format_params *fmt = NULL; + int rc = 0; + + rc = mdss_mdp_cwb_check_resource(mfd_to_ctl(mfd), + layer->writeback_ndx); + if (rc) + return rc; + + fmt = mdss_mdp_get_format_params(layer->buffer.format); + if (!(fmt->flag & VALID_MDP_WB_INTF_FORMAT)) { + pr_err("wb does not support dst fmt:%d\n", + layer->buffer.format); + return -EINVAL; + } + return 0; +} + int mdss_mdp_wfd_validate(struct mdss_mdp_wfd *wfd, struct mdp_output_layer *layer) { @@ -378,7 +459,8 @@ int mdss_mdp_wfd_kickoff(struct mdss_mdp_wfd *wfd, { struct mdss_mdp_ctl *ctl = wfd->ctl; struct mdss_mdp_writeback_arg wb_args; - struct mdss_mdp_wfd_data *wfd_data; + struct mdss_mdp_wb_data *wfd_data; + struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(ctl->mfd); int ret = 0; if (!ctl) { @@ -391,6 +473,12 @@ int mdss_mdp_wfd_kickoff(struct mdss_mdp_wfd *wfd, return -EINVAL; } + if (mdp5_data->cwb.valid) { + pr_err("Skipping the frame as WB is in use\n"); + mdss_mdp_ctl_notify(ctl, MDP_NOTIFY_FRAME_DONE); + return 0; + } + mutex_lock(&wfd->lock); if (list_empty(&wfd->data_queue)) { pr_debug("no output buffer\n"); @@ -399,7 +487,7 @@ int mdss_mdp_wfd_kickoff(struct mdss_mdp_wfd *wfd, return 0; } wfd_data = list_first_entry(&wfd->data_queue, - struct mdss_mdp_wfd_data, next); + struct mdss_mdp_wb_data, next); mutex_unlock(&wfd->lock); ret = mdss_mdp_data_map(&wfd_data->data, true, DMA_FROM_DEVICE); @@ -435,7 +523,7 @@ kickoff_error: int mdss_mdp_wfd_commit_done(struct mdss_mdp_wfd *wfd) { - struct mdss_mdp_wfd_data *wfd_data; + struct mdss_mdp_wb_data *wfd_data; mutex_lock(&wfd->lock); if (list_empty(&wfd->data_queue)) { @@ -444,7 +532,7 @@ int mdss_mdp_wfd_commit_done(struct mdss_mdp_wfd *wfd) return -EINVAL; } wfd_data = list_first_entry(&wfd->data_queue, - struct mdss_mdp_wfd_data, next); + struct mdss_mdp_wb_data, next); mutex_unlock(&wfd->lock); mdss_mdp_wfd_remove_data(wfd, wfd_data); diff --git a/drivers/video/fbdev/msm/mdss_mdp_wfd.h b/drivers/video/fbdev/msm/mdss_mdp_wfd.h index 704597bdfbfa..4261205ca74f 100644 --- a/drivers/video/fbdev/msm/mdss_mdp_wfd.h +++ b/drivers/video/fbdev/msm/mdss_mdp_wfd.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2015, The Linux Foundation. All rights reserved. +/* Copyright (c) 2016, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -22,13 +22,6 @@ #include "mdss_mdp.h" -struct mdss_mdp_wfd_data { - struct mdp_output_layer layer; - struct mdss_mdp_data data; - bool signal_required; - struct list_head next; -}; - struct mdss_mdp_wfd { struct mutex lock; struct list_head data_queue; @@ -47,12 +40,12 @@ int mdss_mdp_wfd_setup(struct mdss_mdp_wfd *wfd, void mdss_mdp_wfd_destroy(struct mdss_mdp_wfd *wfd); -struct mdss_mdp_wfd_data *mdss_mdp_wfd_add_data( +struct mdss_mdp_wb_data *mdss_mdp_wfd_add_data( struct mdss_mdp_wfd *wfd, struct mdp_output_layer *layer); void mdss_mdp_wfd_remove_data(struct mdss_mdp_wfd *wfd, - struct mdss_mdp_wfd_data *data); + struct mdss_mdp_wb_data *data); int mdss_mdp_wfd_validate(struct mdss_mdp_wfd *wfd, struct mdp_output_layer *layer); -- cgit v1.2.3