diff --git a/arch/arm64/boot/dts/qcom/msm8916.dtsi b/arch/arm64/boot/dts/qcom/msm8916.dtsi index 3e9de157cefbc8..cee1b20301819e 100644 --- a/arch/arm64/boot/dts/qcom/msm8916.dtsi +++ b/arch/arm64/boot/dts/qcom/msm8916.dtsi @@ -492,6 +492,16 @@ #address-cells = <1>; #size-cells = <1>; + speedbin_efuse_version: speedbin@4{ + reg = <0x6 0x1>; + bits = <2 2>; + }; + + speedbin_efuse: speedbin@c{ + reg = <0xc 0x1>; + bits = <2 3>; + }; + tsens_base1: base1@d0 { reg = <0xd0 0x1>; bits = <0 7>; @@ -1828,14 +1838,53 @@ iommus = <&gpu_iommu 1>, <&gpu_iommu 2>; status = "disabled"; + // opp-avg-kBps / opp-peak-kBps: + // need CONFIG_INTERCONNECT=y working, it's bugged: + // interconnects = <&bimc MASTER_GRAPHICS_3D &bimc SLAVE_EBI_CH0>; + // interconnect-names = "gfx-mem"; + + // defining speed bin, we must define opp-supported-hw! + // if speedbin val is 0, driver must version 0x1 + nvmem-cells = <&speedbin_efuse_version>, <&speedbin_efuse>; + nvmem-cell-names = "speed_bin_version", "speed_bin"; + gpu_opp_table: opp-table { compatible = "operating-points-v2"; + opp-465000000 { + opp-hz = /bits/ 64 <465000000>; + opp-supported-hw = <0x02>; + // interconnect: + // opp-avg-kBps = <1190400>; + // opp-peak-kBps = <4761600>; + }; opp-400000000 { opp-hz = /bits/ 64 <400000000>; + opp-supported-hw = <0xffffffff>; + // opp-avg-kBps = <1066000>; + // opp-peak-kBps = <4264000>; + }; + opp-310000000 { + opp-hz = /bits/ 64 <310000000>; + opp-supported-hw = <0xffffffff>; + // opp-avg-kBps = <800000>; + // opp-peak-kBps = <3200000>; + }; + opp-200000000 { + opp-hz = /bits/ 64 <200000000>; + opp-supported-hw = <0xffffffff>; + // opp-avg-kBps = <160000>; + // opp-peak-kBps = <1600000>; + }; + opp-100000000 { + opp-hz = /bits/ 64 <100000000>; + opp-supported-hw = <0xffffffff>; }; opp-19200000 { opp-hz = /bits/ 64 <19200000>; + opp-supported-hw = <0xffffffff>; + // opp-avg-kBps = <0>; + // opp-peak-kBps = <0>; }; }; }; diff --git a/drivers/clk/qcom/gcc-msm8916.c b/drivers/clk/qcom/gcc-msm8916.c index 9c7c6b23ce3271..556365a096f0e5 100644 --- a/drivers/clk/qcom/gcc-msm8916.c +++ b/drivers/clk/qcom/gcc-msm8916.c @@ -191,7 +191,7 @@ static const struct parent_map gcc_xo_gpll0a_gpll1_gpll2a_map[] = { { P_XO, 0 }, { P_GPLL0_AUX, 3 }, { P_GPLL1, 1 }, - { P_GPLL2_AUX, 2 }, + { P_GPLL2, 2 }, }; static const struct clk_parent_data gcc_xo_gpll0a_gpll1_gpll2a[] = { @@ -488,6 +488,7 @@ static const struct freq_tbl ftbl_gcc_oxili_gfx3d_clk[] = { F(294912000, P_GPLL1, 3, 0, 0), F(310000000, P_GPLL2, 3, 0, 0), F(400000000, P_GPLL0_AUX, 2, 0, 0), + F(465000000, P_GPLL2, 2, 0, 0), { } }; diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c index 474d81831ad36b..383b875b1a6aec 100644 --- a/drivers/devfreq/devfreq.c +++ b/drivers/devfreq/devfreq.c @@ -1766,6 +1766,27 @@ static ssize_t trans_stat_store(struct device *dev, } static DEVICE_ATTR_RW(trans_stat); +static ssize_t load_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct devfreq *df = to_devfreq(dev); + + if (!df->profile) + return -EINVAL; + + unsigned long total_time = df->last_status.total_time; + if (total_time) { + uint32_t load = 1000 * df->last_status.busy_time / total_time; + return sprintf(buf, "%d.%02d %%%s\n", + load / 10, load % 10, + (df->stop_polling == true ? " suspend" : "") + ); + } + + return sprintf(buf, "?\n"); +} +static DEVICE_ATTR_RO(load); + static struct attribute *devfreq_attrs[] = { &dev_attr_name.attr, &dev_attr_governor.attr, @@ -1776,6 +1797,7 @@ static struct attribute *devfreq_attrs[] = { &dev_attr_min_freq.attr, &dev_attr_max_freq.attr, &dev_attr_trans_stat.attr, + &dev_attr_load.attr, NULL, }; ATTRIBUTE_GROUPS(devfreq); diff --git a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c index c86b377f6f0d72..0edc5fcd71bb6c 100644 --- a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c @@ -7,6 +7,7 @@ */ #include "a3xx_gpu.h" +#include #define A3XX_INT0_MASK \ (A3XX_INT0_RBBM_AHB_ERROR | \ @@ -481,7 +482,8 @@ static u64 a3xx_gpu_busy(struct msm_gpu *gpu, unsigned long *out_sample_rate) { u64 busy_cycles; - busy_cycles = gpu_read64(gpu, REG_A3XX_RBBM_PERFCTR_RBBM_1_LO); + busy_cycles = gpu_read64(gpu, REG_A3XX_RBBM_PERFCTR_PWR_1_LO); + //busy_cycles = gpu_read64(gpu, REG_A3XX_RBBM_PERFCTR_RBBM_1_LO); *out_sample_rate = clk_get_rate(gpu->core_clk); return busy_cycles; @@ -493,6 +495,43 @@ static u32 a3xx_get_rptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring) return ring->memptrs->rptr; } +static int a3xx_set_supported_hw(struct device *dev, struct adreno_gpu *adreno_gpu) +{ + u32 speedbin, version; + int ret; + + ret = nvmem_cell_read_variable_le_u32(dev, "speed_bin_version", &version); + /* + * -ENOENT means that the platform doesn't support speedbin which is + * fine + */ + if (ret == -ENOENT) { + return 0; + } else if (ret) { + dev_err_probe(dev, ret, + "failed to read speed-bin-version. Some OPPs may not be supported by hardware\n"); + return ret; + } + + ret = nvmem_cell_read_variable_le_u32(dev, "speed_bin", &speedbin); + if (ret) { + dev_err_probe(dev, ret, + "failed to read speed-bin. Some OPPs may not be supported by hardware\n"); + return ret; + } + dev_info(dev, "speed-bin version: %u value: %u\n", version, speedbin); + + // if (version > 0) + // we must set at least version 0x1 otherwise no opp gets selected + // having opp-supported-hw property + if (speedbin == 0) + speedbin = 0x1; + + return devm_pm_opp_set_supported_hw(dev, &speedbin, 1); + + return 0; +} + static const struct adreno_gpu_funcs funcs = { .base = { .get_param = adreno_get_param, @@ -554,6 +593,9 @@ struct msm_gpu *a3xx_gpu_init(struct drm_device *dev) adreno_gpu->registers = a3xx_registers; + /* speed-bin */ + a3xx_set_supported_hw(&pdev->dev, adreno_gpu); + ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 1); if (ret) goto fail; diff --git a/drivers/gpu/drm/msm/msm_fence.c b/drivers/gpu/drm/msm/msm_fence.c index 1a5d4f1c8b422b..383ec593c95d8e 100644 --- a/drivers/gpu/drm/msm/msm_fence.c +++ b/drivers/gpu/drm/msm/msm_fence.c @@ -35,7 +35,6 @@ static void deadline_work(struct kthread_work *work) if (msm_fence_completed(fctx, fctx->next_deadline_fence)) return; - msm_devfreq_boost(fctx2gpu(fctx), 2); } diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c index 7f64c66673002f..a348f92d9acc4a 100644 --- a/drivers/gpu/drm/msm/msm_gpu.c +++ b/drivers/gpu/drm/msm/msm_gpu.c @@ -57,31 +57,44 @@ static int disable_pwrrail(struct msm_gpu *gpu) static int enable_clk(struct msm_gpu *gpu) { + int ret; + ret = clk_bulk_prepare_enable(gpu->nr_clocks, gpu->grp_clks); + if (ret) + return ret; + /* + devfreq_resume_device() can do set_rate + if an "opp-suspend" exists, + I actually see no need ... + if (gpu->core_clk && gpu->fast_rate) dev_pm_opp_set_rate(&gpu->pdev->dev, gpu->fast_rate); + */ /* Set the RBBM timer rate to 19.2Mhz */ if (gpu->rbbmtimer_clk) clk_set_rate(gpu->rbbmtimer_clk, 19200000); - return clk_bulk_prepare_enable(gpu->nr_clocks, gpu->grp_clks); + return 0; } static int disable_clk(struct msm_gpu *gpu) { - clk_bulk_disable_unprepare(gpu->nr_clocks, gpu->grp_clks); - /* * Set the clock to a deliberately low rate. On older targets the clock * speed had to be non zero to avoid problems. On newer targets this * will be rounded down to zero anyway so it all works out. - */ + + Can declare "opp-suspend" if theres need ... + if (gpu->core_clk) - dev_pm_opp_set_rate(&gpu->pdev->dev, 27000000); + dev_pm_opp_set_rate(&gpu->pdev->dev, 1); + */ if (gpu->rbbmtimer_clk) clk_set_rate(gpu->rbbmtimer_clk, 0); + clk_bulk_disable_unprepare(gpu->nr_clocks, gpu->grp_clks); + return 0; } @@ -101,6 +114,7 @@ int msm_gpu_pm_resume(struct msm_gpu *gpu) int ret; DBG("%s", gpu->name); + dev_dbg(&gpu->pdev->dev, "%s\n", __func__); trace_msm_gpu_resume(0); ret = enable_pwrrail(gpu); @@ -118,6 +132,7 @@ int msm_gpu_pm_resume(struct msm_gpu *gpu) msm_devfreq_resume(gpu); gpu->needs_hw_init = true; + dev_dbg(&gpu->pdev->dev, "%s done\n", __func__); return 0; } @@ -127,6 +142,7 @@ int msm_gpu_pm_suspend(struct msm_gpu *gpu) int ret; DBG("%s", gpu->name); + dev_dbg(&gpu->pdev->dev, "%s\n", __func__); trace_msm_gpu_suspend(0); msm_devfreq_suspend(gpu); @@ -144,6 +160,7 @@ int msm_gpu_pm_suspend(struct msm_gpu *gpu) return ret; gpu->suspend_count++; + dev_dbg(&gpu->pdev->dev, "%s done\n", __func__); return 0; } @@ -682,7 +699,6 @@ static void retire_submit(struct msm_gpu *gpu, struct msm_ringbuffer *ring, gpu->active_submits--; WARN_ON(gpu->active_submits < 0); if (!gpu->active_submits) { - msm_devfreq_idle(gpu); pm_runtime_put_autosuspend(&gpu->pdev->dev); } @@ -771,7 +787,6 @@ void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) mutex_lock(&gpu->active_lock); if (!gpu->active_submits) { pm_runtime_get(&gpu->pdev->dev); - msm_devfreq_active(gpu); } gpu->active_submits++; mutex_unlock(&gpu->active_lock); diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h index 4252e3839fbc83..a04c52df105bb9 100644 --- a/drivers/gpu/drm/msm/msm_gpu.h +++ b/drivers/gpu/drm/msm/msm_gpu.h @@ -113,25 +113,6 @@ struct msm_gpu_devfreq { /** lock: lock for "suspended", "busy_cycles", and "time" */ struct mutex lock; - /** - * idle_freq: - * - * Shadow frequency used while the GPU is idle. From the PoV of - * the devfreq governor, we are continuing to sample busyness and - * adjust frequency while the GPU is idle, but we use this shadow - * value as the GPU is actually clamped to minimum frequency while - * it is inactive. - */ - unsigned long idle_freq; - - /** - * boost_constraint: - * - * A PM QoS constraint to boost min freq for a period of time - * until the boost expires. - */ - struct dev_pm_qos_request boost_freq; - /** * busy_cycles: Last busy counter value, for calculating elapsed busy * cycles since last sampling period. @@ -141,23 +122,9 @@ struct msm_gpu_devfreq { /** time: Time of last sampling period. */ ktime_t time; - /** idle_time: Time of last transition to idle: */ - ktime_t idle_time; - - /** - * idle_work: - * - * Used to delay clamping to idle freq on active->idle transition. - */ - struct msm_hrtimer_work idle_work; - - /** - * boost_work: - * - * Used to reset the boost_constraint after the boost period has - * elapsed - */ - struct msm_hrtimer_work boost_work; + /** load avg **/ + unsigned long load; + unsigned long load_avg; /** suspended: tracks if we're suspended */ bool suspended; @@ -251,6 +218,7 @@ struct msm_gpu { /* Hang and Inactivity Detection: */ #define DRM_MSM_INACTIVE_PERIOD 66 /* in ms (roughly four frames) */ +// #define DRM_MSM_INACTIVE_PERIOD 120 #define DRM_MSM_HANGCHECK_DEFAULT_PERIOD 500 /* in ms */ #define DRM_MSM_HANGCHECK_PROGRESS_RETRIES 3 @@ -638,9 +606,6 @@ void msm_devfreq_init(struct msm_gpu *gpu); void msm_devfreq_cleanup(struct msm_gpu *gpu); void msm_devfreq_resume(struct msm_gpu *gpu); void msm_devfreq_suspend(struct msm_gpu *gpu); -void msm_devfreq_boost(struct msm_gpu *gpu, unsigned factor); -void msm_devfreq_active(struct msm_gpu *gpu); -void msm_devfreq_idle(struct msm_gpu *gpu); int msm_gpu_hw_init(struct msm_gpu *gpu); diff --git a/drivers/gpu/drm/msm/msm_gpu_devfreq.c b/drivers/gpu/drm/msm/msm_gpu_devfreq.c index ea70c1c32d9401..ad54086e77002f 100644 --- a/drivers/gpu/drm/msm/msm_gpu_devfreq.c +++ b/drivers/gpu/drm/msm/msm_gpu_devfreq.c @@ -15,6 +15,7 @@ /* * Power Management: */ +static unsigned long get_freq(struct msm_gpu *gpu); static int msm_devfreq_target(struct device *dev, unsigned long *freq, u32 flags) @@ -23,25 +24,32 @@ static int msm_devfreq_target(struct device *dev, unsigned long *freq, struct msm_gpu_devfreq *df = &gpu->devfreq; struct dev_pm_opp *opp; + // opp-suspend kacke .... + if (df->suspended) + dev_err(dev, "%s while suspended ??\n", __func__); + + dev_dbg(dev, "suggest: %lu\n", *freq); + + unsigned long curr_freq = get_freq(gpu); + if (*freq == curr_freq) + return 0; + /* * Note that devfreq_recommended_opp() can modify the freq * to something that actually is in the opp table: */ + unsigned long fff = *freq; opp = devfreq_recommended_opp(dev, freq, flags); if (IS_ERR(opp)) return PTR_ERR(opp); - trace_msm_gpu_freq_change(dev_pm_opp_get_freq(opp)); - /* - * If the GPU is idle, devfreq is not aware, so just stash - * the new target freq (to use when we return to active) - */ - if (df->idle_freq) { - df->idle_freq = *freq; - dev_pm_opp_put(opp); + if (*freq == curr_freq) return 0; - } + + dev_dbg(dev, "%s %lu => %lu (%lu)\n", __func__, get_freq(gpu), *freq, fff); + + trace_msm_gpu_freq_change(dev_pm_opp_get_freq(opp)); if (gpu->funcs->gpu_set_freq) { mutex_lock(&df->lock); @@ -58,16 +66,6 @@ static int msm_devfreq_target(struct device *dev, unsigned long *freq, static unsigned long get_freq(struct msm_gpu *gpu) { - struct msm_gpu_devfreq *df = &gpu->devfreq; - - /* - * If the GPU is idle, use the shadow/saved freq to avoid - * confusing devfreq (which is unaware that we are switching - * to lowest freq until the device is active again) - */ - if (df->idle_freq) - return df->idle_freq; - if (gpu->funcs->gpu_get_freq) return gpu->funcs->gpu_get_freq(gpu); @@ -90,7 +88,7 @@ static int msm_devfreq_get_dev_status(struct device *dev, status->total_time = ktime_us_delta(time, df->time); df->time = time; - if (df->suspended) { + if ((df->suspended) || (status->total_time == 0)) { mutex_unlock(&df->lock); status->busy_time = 0; return 0; @@ -100,14 +98,36 @@ static int msm_devfreq_get_dev_status(struct device *dev, busy_time = busy_cycles - df->busy_cycles; df->busy_cycles = busy_cycles; - mutex_unlock(&df->lock); busy_time *= USEC_PER_SEC; busy_time = div64_ul(busy_time, sample_rate); if (WARN_ON(busy_time > ~0LU)) busy_time = ~0LU; - status->busy_time = busy_time; + /* +running avg: +a=((a*2) + (p+c))/4; p=c; a +load per mille: + */ + unsigned long load = busy_time*1000 / status->total_time; + df->load_avg = ((df->load_avg * 2) + (df->load + load)) / 4; + df->load = load; + + status->busy_time = df->load_avg; + status->total_time = 1000; + + mutex_unlock(&df->lock); + + // status->busy_time = busy_time; + + dev_dbg(&gpu->pdev->dev, + "busy %lu / total %lu = %lu | freq %lu MHz load: %lu | srate: %lu\n", + status->busy_time, status->total_time, + status->busy_time * 100 / status->total_time, + status->current_frequency / 1000 / 1000, + load, + sample_rate); + return 0; } @@ -127,9 +147,6 @@ static struct devfreq_dev_profile msm_devfreq_profile = { .get_cur_freq = msm_devfreq_get_cur_freq, }; -static void msm_devfreq_boost_work(struct kthread_work *work); -static void msm_devfreq_idle_work(struct kthread_work *work); - static bool has_devfreq(struct msm_gpu *gpu) { struct msm_gpu_devfreq *df = &gpu->devfreq; @@ -151,14 +168,11 @@ void msm_devfreq_init(struct msm_gpu *gpu) * where due to stalling waiting for vblank we could get stuck * at (for ex) 30fps at 50% utilization. */ - priv->gpu_devfreq_config.upthreshold = 50; - priv->gpu_devfreq_config.downdifferential = 10; + priv->gpu_devfreq_config.upthreshold = 80; + priv->gpu_devfreq_config.downdifferential = 20; mutex_init(&df->lock); - dev_pm_qos_add_request(&gpu->pdev->dev, &df->boost_freq, - DEV_PM_QOS_MIN_FREQUENCY, 0); - msm_devfreq_profile.initial_freq = gpu->fast_rate; /* @@ -176,7 +190,6 @@ void msm_devfreq_init(struct msm_gpu *gpu) if (IS_ERR(df->devfreq)) { DRM_DEV_ERROR(&gpu->pdev->dev, "Couldn't initialize GPU devfreq\n"); - dev_pm_qos_remove_request(&df->boost_freq); df->devfreq = NULL; return; } @@ -190,22 +203,6 @@ void msm_devfreq_init(struct msm_gpu *gpu) gpu->cooling = NULL; } - msm_hrtimer_work_init(&df->boost_work, gpu->worker, msm_devfreq_boost_work, - CLOCK_MONOTONIC, HRTIMER_MODE_REL); - msm_hrtimer_work_init(&df->idle_work, gpu->worker, msm_devfreq_idle_work, - CLOCK_MONOTONIC, HRTIMER_MODE_REL); -} - -static void cancel_idle_work(struct msm_gpu_devfreq *df) -{ - hrtimer_cancel(&df->idle_work.timer); - kthread_cancel_work_sync(&df->idle_work.work); -} - -static void cancel_boost_work(struct msm_gpu_devfreq *df) -{ - hrtimer_cancel(&df->boost_work.timer); - kthread_cancel_work_sync(&df->boost_work.work); } void msm_devfreq_cleanup(struct msm_gpu *gpu) @@ -215,8 +212,8 @@ void msm_devfreq_cleanup(struct msm_gpu *gpu) if (!has_devfreq(gpu)) return; + devm_devfreq_remove_device(&gpu->pdev->dev, df->devfreq); devfreq_cooling_unregister(gpu->cooling); - dev_pm_qos_remove_request(&df->boost_freq); } void msm_devfreq_resume(struct msm_gpu *gpu) @@ -243,127 +240,10 @@ void msm_devfreq_suspend(struct msm_gpu *gpu) if (!has_devfreq(gpu)) return; + devfreq_suspend_device(df->devfreq); + mutex_lock(&df->lock); df->suspended = true; mutex_unlock(&df->lock); - - devfreq_suspend_device(df->devfreq); - - cancel_idle_work(df); - cancel_boost_work(df); } -static void msm_devfreq_boost_work(struct kthread_work *work) -{ - struct msm_gpu_devfreq *df = container_of(work, - struct msm_gpu_devfreq, boost_work.work); - - dev_pm_qos_update_request(&df->boost_freq, 0); -} - -void msm_devfreq_boost(struct msm_gpu *gpu, unsigned factor) -{ - struct msm_gpu_devfreq *df = &gpu->devfreq; - uint64_t freq; - - if (!has_devfreq(gpu)) - return; - - freq = get_freq(gpu); - freq *= factor; - - /* - * A nice little trap is that PM QoS operates in terms of KHz, - * while devfreq operates in terms of Hz: - */ - do_div(freq, HZ_PER_KHZ); - - dev_pm_qos_update_request(&df->boost_freq, freq); - - msm_hrtimer_queue_work(&df->boost_work, - ms_to_ktime(msm_devfreq_profile.polling_ms), - HRTIMER_MODE_REL); -} - -void msm_devfreq_active(struct msm_gpu *gpu) -{ - struct msm_gpu_devfreq *df = &gpu->devfreq; - unsigned int idle_time; - unsigned long target_freq; - - if (!has_devfreq(gpu)) - return; - - /* - * Cancel any pending transition to idle frequency: - */ - cancel_idle_work(df); - - /* - * Hold devfreq lock to synchronize with get_dev_status()/ - * target() callbacks - */ - mutex_lock(&df->devfreq->lock); - - target_freq = df->idle_freq; - - idle_time = ktime_to_ms(ktime_sub(ktime_get(), df->idle_time)); - - df->idle_freq = 0; - - /* - * We could have become active again before the idle work had a - * chance to run, in which case the df->idle_freq would have - * still been zero. In this case, no need to change freq. - */ - if (target_freq) - msm_devfreq_target(&gpu->pdev->dev, &target_freq, 0); - - mutex_unlock(&df->devfreq->lock); - - /* - * If we've been idle for a significant fraction of a polling - * interval, then we won't meet the threshold of busyness for - * the governor to ramp up the freq.. so give some boost - */ - if (idle_time > msm_devfreq_profile.polling_ms) { - msm_devfreq_boost(gpu, 2); - } -} - - -static void msm_devfreq_idle_work(struct kthread_work *work) -{ - struct msm_gpu_devfreq *df = container_of(work, - struct msm_gpu_devfreq, idle_work.work); - struct msm_gpu *gpu = container_of(df, struct msm_gpu, devfreq); - struct msm_drm_private *priv = gpu->dev->dev_private; - unsigned long idle_freq, target_freq = 0; - - /* - * Hold devfreq lock to synchronize with get_dev_status()/ - * target() callbacks - */ - mutex_lock(&df->devfreq->lock); - - idle_freq = get_freq(gpu); - - if (priv->gpu_clamp_to_idle) - msm_devfreq_target(&gpu->pdev->dev, &target_freq, 0); - - df->idle_time = ktime_get(); - df->idle_freq = idle_freq; - - mutex_unlock(&df->devfreq->lock); -} - -void msm_devfreq_idle(struct msm_gpu *gpu) -{ - struct msm_gpu_devfreq *df = &gpu->devfreq; - - if (!has_devfreq(gpu)) - return; - - msm_hrtimer_queue_work(&df->idle_work, ms_to_ktime(1), - HRTIMER_MODE_REL); -} diff --git a/drivers/opp/core.c b/drivers/opp/core.c index 919cc53bc02e35..6db24ca86a12cf 100644 --- a/drivers/opp/core.c +++ b/drivers/opp/core.c @@ -920,7 +920,7 @@ _opp_config_clk_single(struct device *dev, struct opp_table *opp_table, ret = clk_set_rate(opp_table->clk, freq); if (ret) { - dev_err(dev, "%s: failed to set clock rate: %d\n", __func__, + dev_err(dev, "%s: failed to set clock rate to %lu: %d\n", __func__, freq, ret); } else { opp_table->rate_clk_single = freq; @@ -1010,6 +1010,8 @@ static int _set_opp_bw(const struct opp_table *opp_table, avg = opp->bandwidth[i].avg; peak = opp->bandwidth[i].peak; } + // without interconnect this is a nop + dev_dbg(dev, "set icc_set_bw to %u / %u\n", avg, peak); ret = icc_set_bw(opp_table->paths[i], avg, peak); if (ret) { dev_err(dev, "Failed to %s bandwidth[%d]: %d\n",