blob: 75e802374d62058c48ddcfd2a00dbeb87667a69e [file] [log] [blame]
Sidath Senanayake52c5bf52021-07-19 14:38:02 +01001// SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
Sidath Senanayake823a7602016-06-29 16:03:55 +02002/*
3 *
Debarshi Dutta20fff722023-06-02 13:36:22 +00004 * (C) COPYRIGHT 2010-2023 ARM Limited. All rights reserved.
Sidath Senanayake823a7602016-06-29 16:03:55 +02005 *
6 * This program is free software and is provided to you under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation, and any use by you of this program is subject to the terms
Sidath Senanayake8037b532021-04-14 19:14:30 +01009 * of such GNU license.
Sidath Senanayake823a7602016-06-29 16:03:55 +020010 *
Sidath Senanayakee42736e2018-01-22 13:55:38 +010011 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, you can access it online at
18 * http://www.gnu.org/licenses/gpl-2.0.html.
19 *
Sidath Senanayake823a7602016-06-29 16:03:55 +020020 */
21
Sidath Senanayake823a7602016-06-29 16:03:55 +020022#include <mali_kbase.h>
Sidath Senanayake823a7602016-06-29 16:03:55 +020023#include <mali_kbase_config_defaults.h>
Sidath Senanayake7ed9a0b2020-02-03 12:15:52 +010024#include <gpu/mali_kbase_gpu_regmap.h>
Sidath Senanayake823a7602016-06-29 16:03:55 +020025#include <mali_kbase_gator.h>
Sidath Senanayake823a7602016-06-29 16:03:55 +020026#include <mali_kbase_mem_linux.h>
27#ifdef CONFIG_MALI_DEVFREQ
Sidath Senanayake6f5ab3b2017-05-18 14:43:17 +020028#include <linux/devfreq.h>
Sidath Senanayake823a7602016-06-29 16:03:55 +020029#include <backend/gpu/mali_kbase_devfreq.h>
Sidath Senanayake2bfaaa52021-06-17 17:58:22 +010030#if IS_ENABLED(CONFIG_DEVFREQ_THERMAL)
Sidath Senanayake48f35542017-03-31 14:00:22 +020031#include <ipa/mali_kbase_ipa_debugfs.h>
32#endif /* CONFIG_DEVFREQ_THERMAL */
Sidath Senanayake823a7602016-06-29 16:03:55 +020033#endif /* CONFIG_MALI_DEVFREQ */
Siddharth Kapoor0207d6c2022-01-07 19:09:01 +080034#include "backend/gpu/mali_kbase_model_linux.h"
Debarshi Dutta20fff722023-06-02 13:36:22 +000035#include "uapi/gpu/arm/midgard/mali_kbase_mem_profile_debugfs_buf_size.h"
Sidath Senanayake823a7602016-06-29 16:03:55 +020036#include "mali_kbase_mem.h"
37#include "mali_kbase_mem_pool_debugfs.h"
Sidath Senanayake201c8bf2021-01-29 14:51:21 +000038#include "mali_kbase_mem_pool_group.h"
Sidath Senanayakee972f652019-04-10 14:37:00 +020039#include "mali_kbase_debugfs_helper.h"
Sidath Senanayake92327782016-11-09 14:53:08 +010040#include "mali_kbase_regs_history_debugfs.h"
Sidath Senanayake823a7602016-06-29 16:03:55 +020041#include <mali_kbase_hwaccess_backend.h>
Sidath Senanayake86966062019-08-23 15:40:27 +020042#include <mali_kbase_hwaccess_time.h>
Sidath Senanayake72f24572020-10-27 11:38:49 +000043#if !MALI_USE_CSF
Sidath Senanayake823a7602016-06-29 16:03:55 +020044#include <mali_kbase_hwaccess_jm.h>
Sidath Senanayake72f24572020-10-27 11:38:49 +000045#endif /* !MALI_USE_CSF */
Sidath Senanayake201c8bf2021-01-29 14:51:21 +000046#ifdef CONFIG_MALI_PRFCNT_SET_SELECT_VIA_DEBUG_FS
Sidath Senanayakeb64f5682020-04-14 14:55:25 +020047#include <mali_kbase_hwaccess_instr.h>
48#endif
Sidath Senanayake228451e2019-06-27 14:37:54 +020049#include <mali_kbase_reset_gpu.h>
Sidath Senanayakefca86132021-06-15 13:39:30 +010050#include <uapi/gpu/arm/midgard/mali_kbase_ioctl.h>
Sidath Senanayake72f24572020-10-27 11:38:49 +000051#if !MALI_USE_CSF
Sidath Senanayaked4ca6eb2020-09-11 16:44:12 +010052#include "mali_kbase_kinstr_jm.h"
Sidath Senanayake72f24572020-10-27 11:38:49 +000053#endif
Debarshi Dutta20fff722023-06-02 13:36:22 +000054#include "hwcnt/mali_kbase_hwcnt_context.h"
55#include "hwcnt/mali_kbase_hwcnt_virtualizer.h"
Jesse Hall0c596dc2021-11-23 14:38:46 -080056#include "mali_kbase_kinstr_prfcnt.h"
Sidath Senanayakea9704312018-12-06 09:09:59 +010057#include "mali_kbase_vinstr.h"
Sidath Senanayake72f24572020-10-27 11:38:49 +000058#if MALI_USE_CSF
59#include "csf/mali_kbase_csf_firmware.h"
60#include "csf/mali_kbase_csf_tiler_heap.h"
Sidath Senanayake72f24572020-10-27 11:38:49 +000061#include "csf/mali_kbase_csf_csg_debugfs.h"
Sidath Senanayake201c8bf2021-01-29 14:51:21 +000062#include "csf/mali_kbase_csf_cpu_queue_debugfs.h"
Siddharth Kapoor0207d6c2022-01-07 19:09:01 +080063#include "csf/mali_kbase_csf_event.h"
Sidath Senanayake72f24572020-10-27 11:38:49 +000064#endif
Sidath Senanayakebc3c01e2020-06-18 09:26:13 +020065#ifdef CONFIG_MALI_ARBITER_SUPPORT
66#include "arbiter/mali_kbase_arbiter_pm.h"
67#endif
Sidath Senanayake823a7602016-06-29 16:03:55 +020068
Sidath Senanayake7ed9a0b2020-02-03 12:15:52 +010069#include "mali_kbase_cs_experimental.h"
70
Sidath Senanayakef10b3de2018-09-27 14:34:14 +020071#ifdef CONFIG_MALI_CINSTR_GWT
Sidath Senanayakee42736e2018-01-22 13:55:38 +010072#include "mali_kbase_gwt.h"
73#endif
Sidath Senanayake2bfaaa52021-06-17 17:58:22 +010074#include "backend/gpu/mali_kbase_pm_internal.h"
Sidath Senanayake201c8bf2021-01-29 14:51:21 +000075#include "mali_kbase_dvfs_debugfs.h"
Jesse Hall0c596dc2021-11-23 14:38:46 -080076#if IS_ENABLED(CONFIG_DEBUG_FS)
77#include "mali_kbase_pbha_debugfs.h"
78#endif
Sidath Senanayakee42736e2018-01-22 13:55:38 +010079
Jack Diverc1bfc7d2023-01-11 15:33:12 +000080/* Pixel includes */
81#include "platform/pixel/pixel_gpu_slc.h"
82
Sidath Senanayake823a7602016-06-29 16:03:55 +020083#include <linux/module.h>
84#include <linux/init.h>
85#include <linux/poll.h>
86#include <linux/kernel.h>
87#include <linux/errno.h>
88#include <linux/of.h>
Siddharth Kapoor88d7d982022-03-02 14:51:29 +080089#include <linux/of_address.h>
Sidath Senanayake823a7602016-06-29 16:03:55 +020090#include <linux/platform_device.h>
Sidath Senanayake6f5ab3b2017-05-18 14:43:17 +020091#include <linux/of_platform.h>
Sidath Senanayake823a7602016-06-29 16:03:55 +020092#include <linux/miscdevice.h>
93#include <linux/list.h>
94#include <linux/semaphore.h>
95#include <linux/fs.h>
96#include <linux/uaccess.h>
97#include <linux/interrupt.h>
Debarshi Dutta20fff722023-06-02 13:36:22 +000098#include <linux/irq.h>
Sidath Senanayake823a7602016-06-29 16:03:55 +020099#include <linux/mm.h>
Sidath Senanayake3fe808a2018-04-27 13:23:04 +0200100#include <linux/compat.h> /* is_compat_task/in_compat_syscall */
Sidath Senanayake823a7602016-06-29 16:03:55 +0200101#include <linux/mman.h>
102#include <linux/version.h>
Jack Divere19249e2022-11-07 12:13:47 +0000103#include <linux/version_compat_defs.h>
Sidath Senanayake823a7602016-06-29 16:03:55 +0200104#include <mali_kbase_hw.h>
Debarshi Dutta20fff722023-06-02 13:36:22 +0000105#if IS_ENABLED(CONFIG_SYNC_FILE)
Sidath Senanayake823a7602016-06-29 16:03:55 +0200106#include <mali_kbase_sync.h>
Debarshi Dutta20fff722023-06-02 13:36:22 +0000107#endif /* CONFIG_SYNC_FILE */
Sidath Senanayake823a7602016-06-29 16:03:55 +0200108#include <linux/clk.h>
Sidath Senanayake228451e2019-06-27 14:37:54 +0200109#include <linux/clk-provider.h>
Sidath Senanayake823a7602016-06-29 16:03:55 +0200110#include <linux/delay.h>
Sidath Senanayakee42736e2018-01-22 13:55:38 +0100111#include <linux/log2.h>
Sidath Senanayake823a7602016-06-29 16:03:55 +0200112
113#include <mali_kbase_config.h>
114
Sidath Senanayake823a7602016-06-29 16:03:55 +0200115#include <linux/pm_opp.h>
Sidath Senanayakee972f652019-04-10 14:37:00 +0200116#include <linux/pm_runtime.h>
117
Sidath Senanayakeb2b17642020-02-27 15:37:17 +0100118#include <tl/mali_kbase_timeline.h>
Sidath Senanayake823a7602016-06-29 16:03:55 +0200119
Sidath Senanayake192bd792016-11-09 14:14:45 +0100120#include <mali_kbase_as_fault_debugfs.h>
Sidath Senanayake7ed9a0b2020-02-03 12:15:52 +0100121#include <device/mali_kbase_device.h>
Sidath Senanayakeb2b17642020-02-27 15:37:17 +0100122#include <context/mali_kbase_context.h>
Sidath Senanayake192bd792016-11-09 14:14:45 +0100123
Sidath Senanayaked4ca6eb2020-09-11 16:44:12 +0100124#include <mali_kbase_caps.h>
125
Sidath Senanayake823a7602016-06-29 16:03:55 +0200126#define KERNEL_SIDE_DDK_VERSION_STRING "K:" MALI_RELEASE_NAME "(GPL)"
Sidath Senanayake823a7602016-06-29 16:03:55 +0200127
Sidath Senanayakeac90f0d2019-05-06 12:21:44 +0200128/**
Sidath Senanayake8037b532021-04-14 19:14:30 +0100129 * KBASE_API_VERSION - KBase API Version
130 * @major: Kernel major version
131 * @minor: Kernel minor version
Sidath Senanayaked4ca6eb2020-09-11 16:44:12 +0100132 */
133#define KBASE_API_VERSION(major, minor) ((((major) & 0xFFF) << 20) | \
134 (((minor) & 0xFFF) << 8) | \
135 ((0 & 0xFF) << 0))
136
Sidath Senanayaked4ca6eb2020-09-11 16:44:12 +0100137/**
Siddharth Kapoor88d7d982022-03-02 14:51:29 +0800138 * struct mali_kbase_capability_def - kbase capabilities table
139 *
140 * @required_major: required major
141 * @required_minor: required minor
Sidath Senanayaked4ca6eb2020-09-11 16:44:12 +0100142 */
Siddharth Kapoor88d7d982022-03-02 14:51:29 +0800143struct mali_kbase_capability_def {
Sidath Senanayaked4ca6eb2020-09-11 16:44:12 +0100144 u16 required_major;
145 u16 required_minor;
Siddharth Kapoor88d7d982022-03-02 14:51:29 +0800146};
Sidath Senanayaked4ca6eb2020-09-11 16:44:12 +0100147
Sidath Senanayake8037b532021-04-14 19:14:30 +0100148/*
Sidath Senanayaked4ca6eb2020-09-11 16:44:12 +0100149 * This must be kept in-sync with mali_kbase_cap
150 *
151 * TODO: The alternative approach would be to embed the cap enum values
152 * in the table. Less efficient but potentially safer.
153 */
Siddharth Kapoor88d7d982022-03-02 14:51:29 +0800154static const struct mali_kbase_capability_def kbase_caps_table[MALI_KBASE_NUM_CAPS] = {
Sidath Senanayake72f24572020-10-27 11:38:49 +0000155#if MALI_USE_CSF
Siddharth Kapoor88d7d982022-03-02 14:51:29 +0800156 { 1, 0 }, /* SYSTEM_MONITOR */
157 { 1, 0 }, /* JIT_PRESSURE_LIMIT */
158 { 1, 0 }, /* MEM_GROW_ON_GPF */
159 { 1, 0 } /* MEM_PROTECTED */
Sidath Senanayake72f24572020-10-27 11:38:49 +0000160#else
Siddharth Kapoor88d7d982022-03-02 14:51:29 +0800161 { 11, 15 }, /* SYSTEM_MONITOR */
162 { 11, 25 }, /* JIT_PRESSURE_LIMIT */
163 { 11, 2 }, /* MEM_GROW_ON_GPF */
164 { 11, 2 } /* MEM_PROTECTED */
Sidath Senanayake72f24572020-10-27 11:38:49 +0000165#endif
Sidath Senanayaked4ca6eb2020-09-11 16:44:12 +0100166};
167
Jack Divere19249e2022-11-07 12:13:47 +0000168#if (KERNEL_VERSION(5, 3, 0) <= LINUX_VERSION_CODE)
169/* Mutex to synchronize the probe of multiple kbase instances */
170static struct mutex kbase_probe_mutex;
171#endif
172
Jörg Wagnerdacf0042023-08-01 13:38:22 +0000173static void kbase_file_destroy_kctx_worker(struct work_struct *work);
174
Sidath Senanayaked4ca6eb2020-09-11 16:44:12 +0100175/**
176 * mali_kbase_supports_cap - Query whether a kbase capability is supported
177 *
Siddharth Kapoor88d7d982022-03-02 14:51:29 +0800178 * @api_version: API version to convert
179 * @cap: Capability to query for - see mali_kbase_caps.h
180 *
181 * Return: true if the capability is supported
Sidath Senanayaked4ca6eb2020-09-11 16:44:12 +0100182 */
Siddharth Kapoor88d7d982022-03-02 14:51:29 +0800183bool mali_kbase_supports_cap(unsigned long api_version, enum mali_kbase_cap cap)
Sidath Senanayaked4ca6eb2020-09-11 16:44:12 +0100184{
185 bool supported = false;
186 unsigned long required_ver;
187
Siddharth Kapoor88d7d982022-03-02 14:51:29 +0800188 struct mali_kbase_capability_def const *cap_def;
Sidath Senanayaked4ca6eb2020-09-11 16:44:12 +0100189
190 if (WARN_ON(cap < 0))
191 return false;
192
193 if (WARN_ON(cap >= MALI_KBASE_NUM_CAPS))
194 return false;
195
196 cap_def = &kbase_caps_table[(int)cap];
197 required_ver = KBASE_API_VERSION(cap_def->required_major, cap_def->required_minor);
198 supported = (api_version >= required_ver);
199
200 return supported;
201}
202
Jörg Wagnerb89e4f22023-09-01 20:50:01 +0000203static void kbase_set_sched_rt(struct kbase_device *kbdev, struct task_struct *task, char *thread_name)
Sidath Senanayake3b197c62021-03-03 17:42:16 +0000204{
205 unsigned int i;
Sidath Senanayake3b197c62021-03-03 17:42:16 +0000206 static const struct sched_param param = {
207 .sched_priority = KBASE_RT_THREAD_PRIO,
208 };
209
Jörg Wagnerb89e4f22023-09-01 20:50:01 +0000210 cpumask_t mask = { CPU_BITS_NONE };
211 for (i = KBASE_RT_THREAD_CPUMASK_MIN; i <= KBASE_RT_THREAD_CPUMASK_MAX ; i++)
212 cpumask_set_cpu(i, &mask);
213 kthread_bind_mask(task, &mask);
214
215 wake_up_process(task);
216
217 if (sched_setscheduler_nocheck(task, SCHED_FIFO, &param))
218 dev_warn(kbdev->dev, "%s not set to RT prio", thread_name);
219 else
220 dev_dbg(kbdev->dev, "%s set to RT prio: %i",
221 thread_name, param.sched_priority);
222}
223
224struct task_struct *kbase_kthread_run_rt(struct kbase_device *kbdev,
225 int (*threadfn)(void *data), void *thread_param, const char namefmt[], ...)
226{
227 struct task_struct *task;
228 va_list args;
229 char name_buf[128];
230 int len;
Sidath Senanayake3b197c62021-03-03 17:42:16 +0000231
Jack Divere19249e2022-11-07 12:13:47 +0000232 /* Construct the thread name */
233 va_start(args, namefmt);
234 len = vsnprintf(name_buf, sizeof(name_buf), namefmt, args);
235 va_end(args);
236 if (len + 1 > sizeof(name_buf)) {
237 dev_warn(kbdev->dev, "RT thread name truncated to %s", name_buf);
238 }
239
Jörg Wagnerb89e4f22023-09-01 20:50:01 +0000240 task = kthread_create(threadfn, thread_param, name_buf);
241
242 if (!IS_ERR(task)) {
243 kbase_set_sched_rt(kbdev, task, name_buf);
244 }
245
246 return task;
247}
248
249int kbase_kthread_run_worker_rt(struct kbase_device *kbdev,
250 struct kthread_worker *worker, const char namefmt[], ...)
251{
252 struct task_struct *task;
253 va_list args;
254 char name_buf[128];
255 int len;
256
257 /* Construct the thread name */
258 va_start(args, namefmt);
259 len = vsnprintf(name_buf, sizeof(name_buf), namefmt, args);
260 va_end(args);
261 if (len + 1 > sizeof(name_buf)) {
262 dev_warn(kbdev->dev, "RT thread name truncated to %s", name_buf);
263 }
264
265 kthread_init_worker(worker);
266
Jack Divere19249e2022-11-07 12:13:47 +0000267 task = kthread_create(kthread_worker_fn, worker, name_buf);
268
269 if (!IS_ERR(task)) {
Jack Divere19249e2022-11-07 12:13:47 +0000270 worker->task = task;
Jörg Wagnerb89e4f22023-09-01 20:50:01 +0000271 kbase_set_sched_rt(kbdev, task, name_buf);
272 return 0;
Sidath Senanayake3b197c62021-03-03 17:42:16 +0000273 }
274
Jörg Wagnerb89e4f22023-09-01 20:50:01 +0000275 return PTR_ERR(task);
Jack Divere19249e2022-11-07 12:13:47 +0000276}
277
278void kbase_destroy_kworker_stack(struct kthread_worker *worker)
279{
280 struct task_struct *task;
281
282 task = worker->task;
283 if (WARN_ON(!task))
284 return;
285
286 kthread_flush_worker(worker);
287 kthread_stop(task);
288 WARN_ON(!list_empty(&worker->work_list));
Sidath Senanayake3b197c62021-03-03 17:42:16 +0000289}
290
Sidath Senanayaked4ca6eb2020-09-11 16:44:12 +0100291/**
Sidath Senanayakeac90f0d2019-05-06 12:21:44 +0200292 * kbase_file_new - Create an object representing a device file
293 *
294 * @kbdev: An instance of the GPU platform device, allocated from the probe
295 * method of the driver.
296 * @filp: Pointer to the struct file corresponding to device file
297 * /dev/malixx instance, passed to the file's open method.
298 *
299 * In its initial state, the device file has no context (i.e. no GPU
300 * address space) and no API version number. Both must be assigned before
301 * kbase_file_get_kctx_if_setup_complete() can be used successfully.
302 *
Siddharth Kapoor88d7d982022-03-02 14:51:29 +0800303 * Return: Address of an object representing a simulated device file, or NULL
Sidath Senanayakeac90f0d2019-05-06 12:21:44 +0200304 * on failure.
Jörg Wagnerdacf0042023-08-01 13:38:22 +0000305 *
306 * Note: This function always gets called in Userspace context.
Sidath Senanayakeac90f0d2019-05-06 12:21:44 +0200307 */
308static struct kbase_file *kbase_file_new(struct kbase_device *const kbdev,
309 struct file *const filp)
310{
311 struct kbase_file *const kfile = kmalloc(sizeof(*kfile), GFP_KERNEL);
312
313 if (kfile) {
314 kfile->kbdev = kbdev;
315 kfile->filp = filp;
316 kfile->kctx = NULL;
317 kfile->api_version = 0;
318 atomic_set(&kfile->setup_state, KBASE_FILE_NEED_VSN);
Jörg Wagnerdacf0042023-08-01 13:38:22 +0000319 /* Store the pointer to the file table structure of current process. */
320 kfile->owner = current->files;
321 INIT_WORK(&kfile->destroy_kctx_work, kbase_file_destroy_kctx_worker);
322 spin_lock_init(&kfile->lock);
323 kfile->fops_count = 0;
324 kfile->map_count = 0;
325 typecheck(typeof(kfile->map_count), typeof(current->mm->map_count));
326#if IS_ENABLED(CONFIG_DEBUG_FS)
327 init_waitqueue_head(&kfile->zero_fops_count_wait);
328#endif
Varad Gautam66e3cfc2023-09-27 13:34:16 +0000329 init_waitqueue_head(&kfile->event_queue);
Sidath Senanayakeac90f0d2019-05-06 12:21:44 +0200330 }
331 return kfile;
332}
333
334/**
Sidath Senanayaked4ca6eb2020-09-11 16:44:12 +0100335 * kbase_file_set_api_version - Set the application programmer interface version
Sidath Senanayakeac90f0d2019-05-06 12:21:44 +0200336 *
337 * @kfile: A device file created by kbase_file_new()
338 * @major: Major version number (must not exceed 12 bits)
339 * @minor: Major version number (must not exceed 12 bits)
340 *
341 * An application programmer interface (API) version must be specified
342 * before calling kbase_file_create_kctx(), otherwise an error is returned.
343 *
344 * If a version number was already set for the given @kfile (or is in the
345 * process of being set by another thread) then an error is returned.
346 *
347 * Return: 0 if successful, otherwise a negative error code.
348 */
349static int kbase_file_set_api_version(struct kbase_file *const kfile,
350 u16 const major, u16 const minor)
351{
352 if (WARN_ON(!kfile))
353 return -EINVAL;
354
355 /* setup pending, try to signal that we'll do the setup,
356 * if setup was already in progress, err this call
357 */
358 if (atomic_cmpxchg(&kfile->setup_state, KBASE_FILE_NEED_VSN,
359 KBASE_FILE_VSN_IN_PROGRESS) != KBASE_FILE_NEED_VSN)
360 return -EPERM;
361
362 /* save the proposed version number for later use */
363 kfile->api_version = KBASE_API_VERSION(major, minor);
364
365 atomic_set(&kfile->setup_state, KBASE_FILE_NEED_CTX);
366 return 0;
367}
368
369/**
370 * kbase_file_get_api_version - Get the application programmer interface version
371 *
372 * @kfile: A device file created by kbase_file_new()
373 *
374 * Return: The version number (encoded with KBASE_API_VERSION) or 0 if none has
375 * been set.
376 */
377static unsigned long kbase_file_get_api_version(struct kbase_file *const kfile)
378{
379 if (WARN_ON(!kfile))
380 return 0;
381
382 if (atomic_read(&kfile->setup_state) < KBASE_FILE_NEED_CTX)
383 return 0;
384
385 return kfile->api_version;
386}
387
388/**
389 * kbase_file_create_kctx - Create a kernel base context
390 *
391 * @kfile: A device file created by kbase_file_new()
392 * @flags: Flags to set, which can be any combination of
393 * BASEP_CONTEXT_CREATE_KERNEL_FLAGS.
394 *
395 * This creates a new context for the GPU platform device instance that was
396 * specified when kbase_file_new() was called. Each context has its own GPU
397 * address space. If a context was already created for the given @kfile (or is
398 * in the process of being created for it by another thread) then an error is
399 * returned.
400 *
401 * An API version number must have been set by kbase_file_set_api_version()
402 * before calling this function, otherwise an error is returned.
403 *
404 * Return: 0 if a new context was created, otherwise a negative error code.
405 */
406static int kbase_file_create_kctx(struct kbase_file *kfile,
407 base_context_create_flags flags);
408
409/**
Jörg Wagnerdacf0042023-08-01 13:38:22 +0000410 * kbase_file_inc_fops_count_if_allowed - Increment the kfile::fops_count value if the file
411 * operation is allowed for the current process.
412 *
413 * @kfile: Pointer to the object representing the /dev/malixx device file instance.
414 *
415 * The function shall be called at the beginning of certain file operation methods
416 * implemented for @kbase_fops, like ioctl, poll, read and mmap.
417 *
418 * kbase_file_dec_fops_count() shall be called if the increment was done.
419 *
420 * Return: true if the increment was done otherwise false.
421 *
422 * Note: This function shall always be called in Userspace context.
423 */
424static bool kbase_file_inc_fops_count_if_allowed(struct kbase_file *const kfile)
425{
426 /* Disallow file operations from the other process that shares the instance
427 * of /dev/malixx file i.e. 'kfile' or disallow file operations if parent
428 * process has closed the file instance.
429 */
430 if (unlikely(kfile->owner != current->files))
431 return false;
432
433 return kbase_file_inc_fops_count_unless_closed(kfile);
434}
435
436/**
Sidath Senanayakeac90f0d2019-05-06 12:21:44 +0200437 * kbase_file_get_kctx_if_setup_complete - Get a kernel base context
438 * pointer from a device file
439 *
440 * @kfile: A device file created by kbase_file_new()
441 *
Jack Divere19249e2022-11-07 12:13:47 +0000442 * This function returns NULL if no context has been created for the given @kfile.
443 * This makes it safe to use in circumstances where the order of initialization
444 * cannot be enforced, but only if the caller checks the return value.
Sidath Senanayakeac90f0d2019-05-06 12:21:44 +0200445 *
446 * Return: Address of the kernel base context associated with the @kfile, or
447 * NULL if no context exists.
Jörg Wagnerdacf0042023-08-01 13:38:22 +0000448 *
449 * Note: This function shall always be called in Userspace context.
Sidath Senanayakeac90f0d2019-05-06 12:21:44 +0200450 */
451static struct kbase_context *kbase_file_get_kctx_if_setup_complete(
452 struct kbase_file *const kfile)
453{
454 if (WARN_ON(!kfile) ||
455 atomic_read(&kfile->setup_state) != KBASE_FILE_COMPLETE ||
456 WARN_ON(!kfile->kctx))
457 return NULL;
458
459 return kfile->kctx;
460}
461
462/**
Jörg Wagnerdacf0042023-08-01 13:38:22 +0000463 * kbase_file_destroy_kctx - Destroy the Kbase context created for @kfile.
464 *
465 * @kfile: A device file created by kbase_file_new()
466 */
467static void kbase_file_destroy_kctx(struct kbase_file *const kfile)
468{
469 if (atomic_cmpxchg(&kfile->setup_state, KBASE_FILE_COMPLETE,
470 KBASE_FILE_DESTROY_CTX) != KBASE_FILE_COMPLETE)
471 return;
472
473#if IS_ENABLED(CONFIG_DEBUG_FS)
474 kbasep_mem_profile_debugfs_remove(kfile->kctx);
475 kbase_context_debugfs_term(kfile->kctx);
476#endif
477
478 kbase_destroy_context(kfile->kctx);
479 dev_dbg(kfile->kbdev->dev, "Deleted kbase context");
480}
481
482/**
483 * kbase_file_destroy_kctx_worker - Work item to destroy the Kbase context.
484 *
485 * @work: Pointer to the kfile::destroy_kctx_work.
486 *
487 * The work item shall only be enqueued if the context termination could not
488 * be done from @kbase_flush().
489 */
490static void kbase_file_destroy_kctx_worker(struct work_struct *work)
491{
492 struct kbase_file *kfile =
493 container_of(work, struct kbase_file, destroy_kctx_work);
494
495 WARN_ON_ONCE(kfile->owner);
496 WARN_ON_ONCE(kfile->map_count);
497 WARN_ON_ONCE(kfile->fops_count);
498
499 kbase_file_destroy_kctx(kfile);
500}
501
502/**
503 * kbase_file_destroy_kctx_on_flush - Try destroy the Kbase context from the flush()
504 * method of @kbase_fops.
505 *
506 * @kfile: A device file created by kbase_file_new()
507 */
508static void kbase_file_destroy_kctx_on_flush(struct kbase_file *const kfile)
509{
510 bool can_destroy_context = false;
511
512 spin_lock(&kfile->lock);
513 kfile->owner = NULL;
514 /* To destroy the context from flush() method, unlike the release()
515 * method, need to synchronize manually against the other threads in
516 * the current process that could be operating on the /dev/malixx file.
517 *
518 * Only destroy the context if all the memory mappings on the
519 * /dev/malixx file instance have been closed. If there are mappings
520 * present then the context would be destroyed later when the last
521 * mapping is closed.
522 * Also, only destroy the context if no file operations are in progress.
523 */
524 can_destroy_context = !kfile->map_count && !kfile->fops_count;
525 spin_unlock(&kfile->lock);
526
527 if (likely(can_destroy_context)) {
528 WARN_ON_ONCE(work_pending(&kfile->destroy_kctx_work));
529 kbase_file_destroy_kctx(kfile);
530 }
531}
532
533/**
Sidath Senanayakeac90f0d2019-05-06 12:21:44 +0200534 * kbase_file_delete - Destroy an object representing a device file
535 *
536 * @kfile: A device file created by kbase_file_new()
537 *
Jörg Wagnerdacf0042023-08-01 13:38:22 +0000538 * If any context was created for the @kfile and is still alive, then it is destroyed.
Sidath Senanayakeac90f0d2019-05-06 12:21:44 +0200539 */
540static void kbase_file_delete(struct kbase_file *const kfile)
541{
Sidath Senanayakeac90f0d2019-05-06 12:21:44 +0200542 if (WARN_ON(!kfile))
543 return;
544
Jörg Wagnerdacf0042023-08-01 13:38:22 +0000545 /* All the CPU mappings on the device file should have been closed */
546 WARN_ON_ONCE(kfile->map_count);
Sidath Senanayake2bfaaa52021-06-17 17:58:22 +0100547#if IS_ENABLED(CONFIG_DEBUG_FS)
Jörg Wagnerdacf0042023-08-01 13:38:22 +0000548 /* There could still be file operations due to the debugfs file (mem_view) */
549 wait_event(kfile->zero_fops_count_wait, !kbase_file_fops_count(kfile));
550#else
551 /* There shall not be any file operations in progress on the device file */
552 WARN_ON_ONCE(kfile->fops_count);
Sidath Senanayakeac90f0d2019-05-06 12:21:44 +0200553#endif
Sidath Senanayakeb2b17642020-02-27 15:37:17 +0100554
Jörg Wagnerdacf0042023-08-01 13:38:22 +0000555 kfile->filp->private_data = NULL;
556 cancel_work_sync(&kfile->destroy_kctx_work);
557 /* Destroy the context if it wasn't done earlier from the flush() method. */
558 kbase_file_destroy_kctx(kfile);
559 kbase_release_device(kfile->kbdev);
Sidath Senanayakeac90f0d2019-05-06 12:21:44 +0200560 kfree(kfile);
561}
562
563static int kbase_api_handshake(struct kbase_file *kfile,
Sidath Senanayake8946bcd2018-03-19 13:26:23 +0100564 struct kbase_ioctl_version_check *version)
Sidath Senanayake823a7602016-06-29 16:03:55 +0200565{
Sidath Senanayakeac90f0d2019-05-06 12:21:44 +0200566 int err = 0;
567
Sidath Senanayake823a7602016-06-29 16:03:55 +0200568 switch (version->major) {
Sidath Senanayake823a7602016-06-29 16:03:55 +0200569 case BASE_UK_VERSION_MAJOR:
570 /* set minor to be the lowest common */
571 version->minor = min_t(int, BASE_UK_VERSION_MINOR,
Sidath Senanayake8946bcd2018-03-19 13:26:23 +0100572 (int)version->minor);
Sidath Senanayake823a7602016-06-29 16:03:55 +0200573 break;
574 default:
575 /* We return our actual version regardless if it
576 * matches the version returned by userspace -
577 * userspace can bail if it can't handle this
Sidath Senanayake8946bcd2018-03-19 13:26:23 +0100578 * version
579 */
Sidath Senanayake823a7602016-06-29 16:03:55 +0200580 version->major = BASE_UK_VERSION_MAJOR;
581 version->minor = BASE_UK_VERSION_MINOR;
582 break;
583 }
Sidath Senanayake6f5ab3b2017-05-18 14:43:17 +0200584
585 /* save the proposed version number for later use */
Sidath Senanayakeac90f0d2019-05-06 12:21:44 +0200586 err = kbase_file_set_api_version(kfile, version->major, version->minor);
587 if (unlikely(err))
588 return err;
Sidath Senanayake6f5ab3b2017-05-18 14:43:17 +0200589
Sidath Senanayakeac90f0d2019-05-06 12:21:44 +0200590 /* For backward compatibility, we may need to create the context before
591 * the flags have been set. Originally it was created on file open
592 * (with job submission disabled) but we don't support that usage.
593 */
Sidath Senanayaked4ca6eb2020-09-11 16:44:12 +0100594 if (!mali_kbase_supports_system_monitor(kbase_file_get_api_version(kfile)))
Sidath Senanayakeac90f0d2019-05-06 12:21:44 +0200595 err = kbase_file_create_kctx(kfile,
596 BASE_CONTEXT_SYSTEM_MONITOR_SUBMIT_DISABLED);
597
598 return err;
Sidath Senanayake823a7602016-06-29 16:03:55 +0200599}
600
Sidath Senanayake72f24572020-10-27 11:38:49 +0000601static int kbase_api_handshake_dummy(struct kbase_file *kfile,
602 struct kbase_ioctl_version_check *version)
603{
604 return -EPERM;
605}
606
Jesse Hall0c596dc2021-11-23 14:38:46 -0800607static int kbase_api_kinstr_prfcnt_enum_info(
608 struct kbase_file *kfile,
609 struct kbase_ioctl_kinstr_prfcnt_enum_info *prfcnt_enum_info)
610{
611 return kbase_kinstr_prfcnt_enum_info(kfile->kbdev->kinstr_prfcnt_ctx,
612 prfcnt_enum_info);
613}
614
615static int kbase_api_kinstr_prfcnt_setup(
616 struct kbase_file *kfile,
617 union kbase_ioctl_kinstr_prfcnt_setup *prfcnt_setup)
618{
619 return kbase_kinstr_prfcnt_setup(kfile->kbdev->kinstr_prfcnt_ctx,
620 prfcnt_setup);
621}
622
Sidath Senanayake823a7602016-06-29 16:03:55 +0200623static struct kbase_device *to_kbase_device(struct device *dev)
624{
625 return dev_get_drvdata(dev);
626}
627
Sidath Senanayake7ed9a0b2020-02-03 12:15:52 +0100628int assign_irqs(struct kbase_device *kbdev)
Sidath Senanayake823a7602016-06-29 16:03:55 +0200629{
Debarshi Dutta20fff722023-06-02 13:36:22 +0000630 static const char *const irq_names_caps[] = { "JOB", "MMU", "GPU" };
631
632#if IS_ENABLED(CONFIG_OF)
633 static const char *const irq_names[] = { "job", "mmu", "gpu" };
634#endif
635
Sidath Senanayake7ed9a0b2020-02-03 12:15:52 +0100636 struct platform_device *pdev;
Sidath Senanayake823a7602016-06-29 16:03:55 +0200637 int i;
638
639 if (!kbdev)
640 return -ENODEV;
641
Sidath Senanayake7ed9a0b2020-02-03 12:15:52 +0100642 pdev = to_platform_device(kbdev->dev);
Sidath Senanayake823a7602016-06-29 16:03:55 +0200643
Debarshi Dutta20fff722023-06-02 13:36:22 +0000644 for (i = 0; i < ARRAY_SIZE(irq_names_caps); i++) {
645 int irq;
Sidath Senanayake823a7602016-06-29 16:03:55 +0200646
Sidath Senanayake2bfaaa52021-06-17 17:58:22 +0100647#if IS_ENABLED(CONFIG_OF)
Debarshi Dutta20fff722023-06-02 13:36:22 +0000648 /* We recommend using Upper case for the irq names in dts, but if
649 * there are devices in the world using Lower case then we should
650 * avoid breaking support for them. So try using names in Upper case
651 * first then try using Lower case names. If both attempts fail then
652 * we assume there is no IRQ resource specified for the GPU.
653 */
654 irq = platform_get_irq_byname(pdev, irq_names_caps[i]);
655 if (irq < 0)
656 irq = platform_get_irq_byname(pdev, irq_names[i]);
Sidath Senanayake823a7602016-06-29 16:03:55 +0200657#else
Debarshi Dutta20fff722023-06-02 13:36:22 +0000658 irq = platform_get_irq(pdev, i);
Sidath Senanayake823a7602016-06-29 16:03:55 +0200659#endif /* CONFIG_OF */
Debarshi Dutta20fff722023-06-02 13:36:22 +0000660
661 if (irq < 0) {
662 dev_err(kbdev->dev, "No IRQ resource '%s'\n", irq_names_caps[i]);
663 return irq;
664 }
665
666 kbdev->irqs[i].irq = irq;
667 kbdev->irqs[i].flags = irqd_get_trigger_type(irq_get_irq_data(irq));
Sidath Senanayake823a7602016-06-29 16:03:55 +0200668 }
669
670 return 0;
671}
672
Sidath Senanayake823a7602016-06-29 16:03:55 +0200673/* Find a particular kbase device (as specified by minor number), or find the "first" device if -1 is specified */
674struct kbase_device *kbase_find_device(int minor)
675{
676 struct kbase_device *kbdev = NULL;
677 struct list_head *entry;
Sidath Senanayake7ed9a0b2020-02-03 12:15:52 +0100678 const struct list_head *dev_list = kbase_device_get_list();
Sidath Senanayake823a7602016-06-29 16:03:55 +0200679
680 list_for_each(entry, dev_list) {
681 struct kbase_device *tmp;
682
683 tmp = list_entry(entry, struct kbase_device, entry);
684 if (tmp->mdev.minor == minor || minor == -1) {
685 kbdev = tmp;
686 get_device(kbdev->dev);
687 break;
688 }
689 }
Sidath Senanayake7ed9a0b2020-02-03 12:15:52 +0100690 kbase_device_put_list(dev_list);
Sidath Senanayake823a7602016-06-29 16:03:55 +0200691
692 return kbdev;
693}
694EXPORT_SYMBOL(kbase_find_device);
695
696void kbase_release_device(struct kbase_device *kbdev)
697{
698 put_device(kbdev->dev);
699}
700EXPORT_SYMBOL(kbase_release_device);
701
Sidath Senanayake2bfaaa52021-06-17 17:58:22 +0100702#if IS_ENABLED(CONFIG_DEBUG_FS)
Sidath Senanayake92327782016-11-09 14:53:08 +0100703static ssize_t write_ctx_infinite_cache(struct file *f, const char __user *ubuf, size_t size, loff_t *off)
704{
705 struct kbase_context *kctx = f->private_data;
706 int err;
707 bool value;
708
709 err = kstrtobool_from_user(ubuf, size, &value);
710 if (err)
711 return err;
712
713 if (value)
714 kbase_ctx_flag_set(kctx, KCTX_INFINITE_CACHE);
715 else
716 kbase_ctx_flag_clear(kctx, KCTX_INFINITE_CACHE);
717
718 return size;
719}
720
721static ssize_t read_ctx_infinite_cache(struct file *f, char __user *ubuf, size_t size, loff_t *off)
722{
723 struct kbase_context *kctx = f->private_data;
724 char buf[32];
725 int count;
726 bool value;
727
728 value = kbase_ctx_flag(kctx, KCTX_INFINITE_CACHE);
729
730 count = scnprintf(buf, sizeof(buf), "%s\n", value ? "Y" : "N");
731
732 return simple_read_from_buffer(ubuf, size, off, buf, count);
733}
734
735static const struct file_operations kbase_infinite_cache_fops = {
Sidath Senanayake228451e2019-06-27 14:37:54 +0200736 .owner = THIS_MODULE,
Sidath Senanayake92327782016-11-09 14:53:08 +0100737 .open = simple_open,
738 .write = write_ctx_infinite_cache,
739 .read = read_ctx_infinite_cache,
740};
741
Sidath Senanayakee42736e2018-01-22 13:55:38 +0100742static ssize_t write_ctx_force_same_va(struct file *f, const char __user *ubuf,
743 size_t size, loff_t *off)
744{
745 struct kbase_context *kctx = f->private_data;
746 int err;
747 bool value;
748
749 err = kstrtobool_from_user(ubuf, size, &value);
750 if (err)
751 return err;
752
753 if (value) {
754#if defined(CONFIG_64BIT)
755 /* 32-bit clients cannot force SAME_VA */
756 if (kbase_ctx_flag(kctx, KCTX_COMPAT))
757 return -EINVAL;
758 kbase_ctx_flag_set(kctx, KCTX_FORCE_SAME_VA);
759#else /* defined(CONFIG_64BIT) */
760 /* 32-bit clients cannot force SAME_VA */
761 return -EINVAL;
762#endif /* defined(CONFIG_64BIT) */
763 } else {
764 kbase_ctx_flag_clear(kctx, KCTX_FORCE_SAME_VA);
765 }
766
767 return size;
768}
769
770static ssize_t read_ctx_force_same_va(struct file *f, char __user *ubuf,
771 size_t size, loff_t *off)
772{
773 struct kbase_context *kctx = f->private_data;
774 char buf[32];
775 int count;
776 bool value;
777
778 value = kbase_ctx_flag(kctx, KCTX_FORCE_SAME_VA);
779
780 count = scnprintf(buf, sizeof(buf), "%s\n", value ? "Y" : "N");
781
782 return simple_read_from_buffer(ubuf, size, off, buf, count);
783}
784
785static const struct file_operations kbase_force_same_va_fops = {
Sidath Senanayake228451e2019-06-27 14:37:54 +0200786 .owner = THIS_MODULE,
Sidath Senanayakee42736e2018-01-22 13:55:38 +0100787 .open = simple_open,
788 .write = write_ctx_force_same_va,
789 .read = read_ctx_force_same_va,
790};
Sidath Senanayake228451e2019-06-27 14:37:54 +0200791#endif /* CONFIG_DEBUG_FS */
Sidath Senanayakee42736e2018-01-22 13:55:38 +0100792
Sidath Senanayakeac90f0d2019-05-06 12:21:44 +0200793static int kbase_file_create_kctx(struct kbase_file *const kfile,
794 base_context_create_flags const flags)
Sidath Senanayake823a7602016-06-29 16:03:55 +0200795{
796 struct kbase_device *kbdev = NULL;
Sidath Senanayakeac90f0d2019-05-06 12:21:44 +0200797 struct kbase_context *kctx = NULL;
Sidath Senanayake2bfaaa52021-06-17 17:58:22 +0100798#if IS_ENABLED(CONFIG_DEBUG_FS)
Sidath Senanayake823a7602016-06-29 16:03:55 +0200799 char kctx_name[64];
800#endif
801
Sidath Senanayakeac90f0d2019-05-06 12:21:44 +0200802 if (WARN_ON(!kfile))
803 return -EINVAL;
Sidath Senanayake823a7602016-06-29 16:03:55 +0200804
Sidath Senanayakeac90f0d2019-05-06 12:21:44 +0200805 /* setup pending, try to signal that we'll do the setup,
806 * if setup was already in progress, err this call
807 */
808 if (atomic_cmpxchg(&kfile->setup_state, KBASE_FILE_NEED_CTX,
809 KBASE_FILE_CTX_IN_PROGRESS) != KBASE_FILE_NEED_CTX)
810 return -EPERM;
811
812 kbdev = kfile->kbdev;
Sidath Senanayake823a7602016-06-29 16:03:55 +0200813
Sidath Senanayakeac90f0d2019-05-06 12:21:44 +0200814 kctx = kbase_create_context(kbdev, in_compat_syscall(),
Jörg Wagnerdacf0042023-08-01 13:38:22 +0000815 flags, kfile->api_version, kfile);
Sidath Senanayake823a7602016-06-29 16:03:55 +0200816
Sidath Senanayakeac90f0d2019-05-06 12:21:44 +0200817 /* if bad flags, will stay stuck in setup mode */
818 if (!kctx)
819 return -ENOMEM;
Sidath Senanayake823a7602016-06-29 16:03:55 +0200820
Sidath Senanayake92327782016-11-09 14:53:08 +0100821 if (kbdev->infinite_cache_active_default)
822 kbase_ctx_flag_set(kctx, KCTX_INFINITE_CACHE);
Sidath Senanayake823a7602016-06-29 16:03:55 +0200823
Sidath Senanayake2bfaaa52021-06-17 17:58:22 +0100824#if IS_ENABLED(CONFIG_DEBUG_FS)
Debarshi Dutta20fff722023-06-02 13:36:22 +0000825 if (unlikely(!scnprintf(kctx_name, 64, "%d_%d", kctx->tgid, kctx->id)))
826 return -ENOMEM;
Sidath Senanayake823a7602016-06-29 16:03:55 +0200827
Sidath Senanayake72f24572020-10-27 11:38:49 +0000828 mutex_init(&kctx->mem_profile_lock);
829
Sidath Senanayake823a7602016-06-29 16:03:55 +0200830 kctx->kctx_dentry = debugfs_create_dir(kctx_name,
831 kbdev->debugfs_ctx_directory);
832
833 if (IS_ERR_OR_NULL(kctx->kctx_dentry)) {
Sidath Senanayakeac90f0d2019-05-06 12:21:44 +0200834 /* we don't treat this as a fail - just warn about it */
835 dev_warn(kbdev->dev, "couldn't create debugfs dir for kctx\n");
836 } else {
837 debugfs_create_file("infinite_cache", 0644, kctx->kctx_dentry,
Sidath Senanayakeb2b17642020-02-27 15:37:17 +0100838 kctx, &kbase_infinite_cache_fops);
Sidath Senanayakeb2b17642020-02-27 15:37:17 +0100839 debugfs_create_file("force_same_va", 0600, kctx->kctx_dentry,
840 kctx, &kbase_force_same_va_fops);
Sidath Senanayakeac90f0d2019-05-06 12:21:44 +0200841
Sidath Senanayakeb2b17642020-02-27 15:37:17 +0100842 kbase_context_debugfs_init(kctx);
Sidath Senanayake823a7602016-06-29 16:03:55 +0200843 }
Sidath Senanayake192bd792016-11-09 14:14:45 +0100844#endif /* CONFIG_DEBUG_FS */
Sidath Senanayake823a7602016-06-29 16:03:55 +0200845
846 dev_dbg(kbdev->dev, "created base context\n");
847
Sidath Senanayakeac90f0d2019-05-06 12:21:44 +0200848 kfile->kctx = kctx;
849 atomic_set(&kfile->setup_state, KBASE_FILE_COMPLETE);
Sidath Senanayake823a7602016-06-29 16:03:55 +0200850
Sidath Senanayakeac90f0d2019-05-06 12:21:44 +0200851 return 0;
852}
853
854static int kbase_open(struct inode *inode, struct file *filp)
855{
856 struct kbase_device *kbdev = NULL;
857 struct kbase_file *kfile;
858 int ret = 0;
859
860 kbdev = kbase_find_device(iminor(inode));
861
862 if (!kbdev)
863 return -ENODEV;
864
Debarshi Dutta20fff722023-06-02 13:36:22 +0000865#if (KERNEL_VERSION(6, 0, 0) > LINUX_VERSION_CODE)
866 /* Set address space operations for page migration */
867 kbase_mem_migrate_set_address_space_ops(kbdev, filp);
868#endif
869
Sidath Senanayake2bfaaa52021-06-17 17:58:22 +0100870 /* Device-wide firmware load is moved here from probing to comply with
871 * Android GKI vendor guideline.
872 */
873 ret = kbase_device_firmware_init_once(kbdev);
874 if (ret)
875 goto out;
876
Sidath Senanayakeac90f0d2019-05-06 12:21:44 +0200877 kfile = kbase_file_new(kbdev, filp);
878 if (!kfile) {
879 ret = -ENOMEM;
880 goto out;
Sidath Senanayake823a7602016-06-29 16:03:55 +0200881 }
Sidath Senanayakeac90f0d2019-05-06 12:21:44 +0200882
883 filp->private_data = kfile;
884 filp->f_mode |= FMODE_UNSIGNED_OFFSET;
885
Sidath Senanayake823a7602016-06-29 16:03:55 +0200886 return 0;
887
Sidath Senanayake2bfaaa52021-06-17 17:58:22 +0100888out:
Sidath Senanayake823a7602016-06-29 16:03:55 +0200889 kbase_release_device(kbdev);
890 return ret;
891}
892
893static int kbase_release(struct inode *inode, struct file *filp)
894{
Sidath Senanayakeac90f0d2019-05-06 12:21:44 +0200895 struct kbase_file *const kfile = filp->private_data;
Sidath Senanayake823a7602016-06-29 16:03:55 +0200896
Sidath Senanayakeac90f0d2019-05-06 12:21:44 +0200897 kbase_file_delete(kfile);
Sidath Senanayake823a7602016-06-29 16:03:55 +0200898 return 0;
899}
900
Jörg Wagnerdacf0042023-08-01 13:38:22 +0000901/**
902 * kbase_flush - Function implementing the flush() method of @kbase_fops.
903 *
904 * @filp: Pointer to the /dev/malixx device file instance.
905 * @id: Pointer to the file table structure of current process.
906 * If @filp is being shared by multiple processes then @id can differ
907 * from kfile::owner.
908 *
909 * This function is called everytime the copy of @filp is closed. So if 3 processes
910 * are sharing the @filp then this function would be called 3 times and only after
911 * that kbase_release() would get called.
912 *
913 * Return: 0 if successful, otherwise a negative error code.
914 *
915 * Note: This function always gets called in Userspace context when the
916 * file is closed.
917 */
918static int kbase_flush(struct file *filp, fl_owner_t id)
919{
920 struct kbase_file *const kfile = filp->private_data;
921
922 /* Try to destroy the context if the flush() method has been called for the
923 * process that created the instance of /dev/malixx file i.e. 'kfile'.
924 */
925 if (kfile->owner == id)
926 kbase_file_destroy_kctx_on_flush(kfile);
927
928 return 0;
929}
930
Sidath Senanayakeac90f0d2019-05-06 12:21:44 +0200931static int kbase_api_set_flags(struct kbase_file *kfile,
Sidath Senanayake6f5ab3b2017-05-18 14:43:17 +0200932 struct kbase_ioctl_set_flags *flags)
933{
Sidath Senanayakeac90f0d2019-05-06 12:21:44 +0200934 int err = 0;
935 unsigned long const api_version = kbase_file_get_api_version(kfile);
936 struct kbase_context *kctx = NULL;
Sidath Senanayake6f5ab3b2017-05-18 14:43:17 +0200937
Sidath Senanayakeac90f0d2019-05-06 12:21:44 +0200938 /* Validate flags */
939 if (flags->create_flags !=
940 (flags->create_flags & BASEP_CONTEXT_CREATE_KERNEL_FLAGS))
Sidath Senanayake6f5ab3b2017-05-18 14:43:17 +0200941 return -EINVAL;
942
Sidath Senanayakeac90f0d2019-05-06 12:21:44 +0200943 /* For backward compatibility, the context may have been created before
944 * the flags were set.
945 */
Sidath Senanayaked4ca6eb2020-09-11 16:44:12 +0100946 if (mali_kbase_supports_system_monitor(api_version)) {
Sidath Senanayakeac90f0d2019-05-06 12:21:44 +0200947 err = kbase_file_create_kctx(kfile, flags->create_flags);
948 } else {
Sidath Senanayake72f24572020-10-27 11:38:49 +0000949#if !MALI_USE_CSF
Sidath Senanayakeac90f0d2019-05-06 12:21:44 +0200950 struct kbasep_js_kctx_info *js_kctx_info = NULL;
951 unsigned long irq_flags = 0;
Sidath Senanayake72f24572020-10-27 11:38:49 +0000952#endif
Sidath Senanayake6f5ab3b2017-05-18 14:43:17 +0200953
Sidath Senanayakeac90f0d2019-05-06 12:21:44 +0200954 /* If setup is incomplete (e.g. because the API version
955 * wasn't set) then we have to give up.
956 */
957 kctx = kbase_file_get_kctx_if_setup_complete(kfile);
958 if (unlikely(!kctx))
959 return -EPERM;
960
Sidath Senanayake72f24572020-10-27 11:38:49 +0000961#if MALI_USE_CSF
962 /* On CSF GPUs Job Manager interface isn't used to submit jobs
963 * (there are no job slots). So the legacy job manager path to
964 * submit jobs needs to remain disabled for CSF GPUs.
965 */
966#else
Sidath Senanayakeac90f0d2019-05-06 12:21:44 +0200967 js_kctx_info = &kctx->jctx.sched_info;
Devika Krishnadas5eb4e282022-05-06 20:45:05 +0000968 rt_mutex_lock(&js_kctx_info->ctx.jsctx_mutex);
Sidath Senanayakeac90f0d2019-05-06 12:21:44 +0200969 spin_lock_irqsave(&kctx->kbdev->hwaccess_lock, irq_flags);
Sidath Senanayakeac90f0d2019-05-06 12:21:44 +0200970 /* Translate the flags */
971 if ((flags->create_flags &
972 BASE_CONTEXT_SYSTEM_MONITOR_SUBMIT_DISABLED) == 0)
973 kbase_ctx_flag_clear(kctx, KCTX_SUBMIT_DISABLED);
974
Sidath Senanayakeb64f5682020-04-14 14:55:25 +0200975
Sidath Senanayakeac90f0d2019-05-06 12:21:44 +0200976 spin_unlock_irqrestore(&kctx->kbdev->hwaccess_lock, irq_flags);
Devika Krishnadas5eb4e282022-05-06 20:45:05 +0000977 rt_mutex_unlock(&js_kctx_info->ctx.jsctx_mutex);
Sidath Senanayake72f24572020-10-27 11:38:49 +0000978#endif
Sidath Senanayakeac90f0d2019-05-06 12:21:44 +0200979 }
980
981 return err;
Sidath Senanayake6f5ab3b2017-05-18 14:43:17 +0200982}
983
Yunju Lee2a499c82022-11-10 21:59:24 +0000984#if !MALI_USE_CSF
Yiwei Zhang63fbdfe2021-02-24 05:18:52 +0000985static int kbase_api_apc_request(struct kbase_file *kfile,
986 struct kbase_ioctl_apc_request *apc)
987{
988 kbase_pm_apc_request(kfile->kbdev, apc->dur_usec);
989 return 0;
990}
Yunju Lee2a499c82022-11-10 21:59:24 +0000991#endif
Yiwei Zhang63fbdfe2021-02-24 05:18:52 +0000992
Jack Diverbbff2062023-01-06 15:25:09 +0000993static int kbase_api_buffer_liveness_update(struct kbase_context *kctx,
994 struct kbase_ioctl_buffer_liveness_update *update)
995{
Jack Diverc1bfc7d2023-01-11 15:33:12 +0000996 /* Defer handling to platform */
997 return gpu_pixel_handle_buffer_liveness_update_ioctl(kctx, update);
Jack Diverbbff2062023-01-06 15:25:09 +0000998}
999
Sidath Senanayake72f24572020-10-27 11:38:49 +00001000#if !MALI_USE_CSF
Sidath Senanayake6f5ab3b2017-05-18 14:43:17 +02001001static int kbase_api_job_submit(struct kbase_context *kctx,
1002 struct kbase_ioctl_job_submit *submit)
1003{
Sidath Senanayakeea23e532017-07-11 16:57:40 +02001004 return kbase_jd_submit(kctx, u64_to_user_ptr(submit->addr),
1005 submit->nr_atoms,
Sidath Senanayake6f5ab3b2017-05-18 14:43:17 +02001006 submit->stride, false);
1007}
Sidath Senanayake72f24572020-10-27 11:38:49 +00001008#endif /* !MALI_USE_CSF */
Sidath Senanayake6f5ab3b2017-05-18 14:43:17 +02001009
Siddharth Kapoor88d7d982022-03-02 14:51:29 +08001010static int kbase_api_get_gpuprops(struct kbase_file *kfile,
Sidath Senanayake6f5ab3b2017-05-18 14:43:17 +02001011 struct kbase_ioctl_get_gpuprops *get_props)
1012{
Siddharth Kapoor88d7d982022-03-02 14:51:29 +08001013 struct kbase_gpu_props *kprops = &kfile->kbdev->gpu_props;
Sidath Senanayake6f5ab3b2017-05-18 14:43:17 +02001014 int err;
1015
1016 if (get_props->flags != 0) {
Siddharth Kapoor88d7d982022-03-02 14:51:29 +08001017 dev_err(kfile->kbdev->dev, "Unsupported flags to get_gpuprops");
Sidath Senanayake6f5ab3b2017-05-18 14:43:17 +02001018 return -EINVAL;
1019 }
1020
1021 if (get_props->size == 0)
1022 return kprops->prop_buffer_size;
1023 if (get_props->size < kprops->prop_buffer_size)
1024 return -EINVAL;
1025
Sidath Senanayakeea23e532017-07-11 16:57:40 +02001026 err = copy_to_user(u64_to_user_ptr(get_props->buffer),
1027 kprops->prop_buffer,
Sidath Senanayake6f5ab3b2017-05-18 14:43:17 +02001028 kprops->prop_buffer_size);
1029 if (err)
Sidath Senanayakeea23e532017-07-11 16:57:40 +02001030 return -EFAULT;
Sidath Senanayake6f5ab3b2017-05-18 14:43:17 +02001031 return kprops->prop_buffer_size;
1032}
1033
Sidath Senanayake72f24572020-10-27 11:38:49 +00001034#if !MALI_USE_CSF
Sidath Senanayake6f5ab3b2017-05-18 14:43:17 +02001035static int kbase_api_post_term(struct kbase_context *kctx)
1036{
1037 kbase_event_close(kctx);
1038 return 0;
1039}
Sidath Senanayake72f24572020-10-27 11:38:49 +00001040#endif /* !MALI_USE_CSF */
Sidath Senanayake6f5ab3b2017-05-18 14:43:17 +02001041
Siddharth Kapoor88d7d982022-03-02 14:51:29 +08001042#if MALI_USE_CSF
1043static int kbase_api_mem_alloc_ex(struct kbase_context *kctx,
1044 union kbase_ioctl_mem_alloc_ex *alloc_ex)
1045{
1046 struct kbase_va_region *reg;
1047 u64 flags = alloc_ex->in.flags;
1048 u64 gpu_va;
1049
1050 /* Calls to this function are inherently asynchronous, with respect to
1051 * MMU operations.
1052 */
1053 const enum kbase_caller_mmu_sync_info mmu_sync_info = CALLER_MMU_ASYNC;
1054
1055 bool gpu_executable = (flags & BASE_MEM_PROT_GPU_EX) && kbase_has_exec_va_zone(kctx);
1056 bool fixed_or_fixable = (flags & (BASE_MEM_FIXED | BASE_MEM_FIXABLE));
1057
1058 if (!kbase_mem_allow_alloc(kctx))
1059 return -EINVAL;
1060
1061 /* The driver counts the number of FIXABLE and FIXED allocations because
1062 * they're not supposed to happen at the same time. However, that is not
1063 * a security concern: nothing bad happens if the two types of allocations
1064 * are made at the same time. The only reason why the driver is guarding
1065 * against them is because there's no client use case that is supposed
1066 * to need both of them at the same time, and the driver wants to help
1067 * the user space catch some obvious mistake.
1068 *
1069 * The driver is able to switch from FIXABLE allocations to FIXED and
1070 * vice versa, if all the allocations of one kind are freed before trying
1071 * to create allocations of a different kind.
1072 */
1073 if ((flags & BASE_MEM_FIXED) && (atomic64_read(&kctx->num_fixable_allocs) > 0))
1074 return -EINVAL;
1075
1076 if ((flags & BASE_MEM_FIXABLE) && (atomic64_read(&kctx->num_fixed_allocs) > 0))
1077 return -EINVAL;
1078
1079 if (flags & BASEP_MEM_FLAGS_KERNEL_ONLY)
1080 return -ENOMEM;
1081
1082 /* The fixed_address parameter must be either a non-zero, page-aligned
1083 * value for FIXED allocations or zero for any other kind of allocation.
1084 */
1085 if (flags & BASE_MEM_FIXED) {
1086 u64 aligned_fixed_address = alloc_ex->in.fixed_address & PAGE_MASK;
1087
1088 if ((aligned_fixed_address == 0) ||
1089 (aligned_fixed_address != alloc_ex->in.fixed_address))
1090 return -EINVAL;
1091
1092 gpu_va = aligned_fixed_address;
1093 } else if (alloc_ex->in.fixed_address != 0) {
1094 return -EINVAL;
1095 }
1096
1097 /* For 64-bit clients, force SAME_VA up to 2^(47)-1.
1098 * For 32-bit clients, force SAME_VA up to 2^(32)-1.
1099 *
1100 * In both cases, the executable and fixed/fixable zones, and
1101 * the executable+fixed/fixable zone, are all above this range.
1102 */
1103 if ((!kbase_ctx_flag(kctx, KCTX_COMPAT)) &&
1104 kbase_ctx_flag(kctx, KCTX_FORCE_SAME_VA)) {
1105 if (!gpu_executable && !fixed_or_fixable)
1106 flags |= BASE_MEM_SAME_VA;
1107 }
1108
1109 /* If CSF event memory allocation, need to force certain flags.
1110 * SAME_VA - GPU address needs to be used as a CPU address, explicit
1111 * mmap has to be avoided.
1112 * CACHED_CPU - Frequent access to the event memory by CPU.
1113 * COHERENT_SYSTEM - No explicit cache maintenance around the access
1114 * to event memory so need to leverage the coherency support.
1115 */
1116 if (flags & BASE_MEM_CSF_EVENT) {
1117 /* We cannot honor this request */
1118 if (gpu_executable || fixed_or_fixable)
1119 return -ENOMEM;
1120
1121 flags |= (BASE_MEM_SAME_VA |
1122 BASE_MEM_CACHED_CPU |
1123 BASE_MEM_COHERENT_SYSTEM);
1124 }
1125
1126 reg = kbase_mem_alloc(kctx, alloc_ex->in.va_pages, alloc_ex->in.commit_pages,
1127 alloc_ex->in.extension, &flags, &gpu_va, mmu_sync_info);
1128
1129 if (!reg)
1130 return -ENOMEM;
1131
1132 alloc_ex->out.flags = flags;
1133 alloc_ex->out.gpu_va = gpu_va;
1134
1135 return 0;
1136}
1137
1138static int kbase_api_mem_alloc(struct kbase_context *kctx, union kbase_ioctl_mem_alloc *alloc)
1139{
1140 int ret;
1141 union kbase_ioctl_mem_alloc_ex mem_alloc_ex = { { 0 } };
1142
1143 mem_alloc_ex.in.va_pages = alloc->in.va_pages;
1144 mem_alloc_ex.in.commit_pages = alloc->in.commit_pages;
1145 mem_alloc_ex.in.extension = alloc->in.extension;
1146 mem_alloc_ex.in.flags = alloc->in.flags;
1147 mem_alloc_ex.in.fixed_address = 0;
1148
1149 ret = kbase_api_mem_alloc_ex(kctx, &mem_alloc_ex);
1150
1151 alloc->out.flags = mem_alloc_ex.out.flags;
1152 alloc->out.gpu_va = mem_alloc_ex.out.gpu_va;
1153
1154 return ret;
1155}
1156#else
1157static int kbase_api_mem_alloc(struct kbase_context *kctx, union kbase_ioctl_mem_alloc *alloc)
Sidath Senanayake6f5ab3b2017-05-18 14:43:17 +02001158{
1159 struct kbase_va_region *reg;
1160 u64 flags = alloc->in.flags;
1161 u64 gpu_va;
1162
Jesse Hall0c596dc2021-11-23 14:38:46 -08001163 /* Calls to this function are inherently asynchronous, with respect to
1164 * MMU operations.
Sidath Senanayakef32af5a2018-07-31 15:28:14 +02001165 */
Jesse Hall0c596dc2021-11-23 14:38:46 -08001166 const enum kbase_caller_mmu_sync_info mmu_sync_info = CALLER_MMU_ASYNC;
1167
1168 if (!kbase_mem_allow_alloc(kctx))
Sidath Senanayakef32af5a2018-07-31 15:28:14 +02001169 return -EINVAL;
Sidath Senanayakef32af5a2018-07-31 15:28:14 +02001170
Sidath Senanayakeac90f0d2019-05-06 12:21:44 +02001171 if (flags & BASEP_MEM_FLAGS_KERNEL_ONLY)
Sidath Senanayakef32af5a2018-07-31 15:28:14 +02001172 return -ENOMEM;
1173
Sidath Senanayakea9704312018-12-06 09:09:59 +01001174 /* Force SAME_VA if a 64-bit client.
1175 * The only exception is GPU-executable memory if an EXEC_VA zone
1176 * has been initialized. In that case, GPU-executable memory may
1177 * or may not be SAME_VA.
1178 */
Siddharth Kapoor88d7d982022-03-02 14:51:29 +08001179 if ((!kbase_ctx_flag(kctx, KCTX_COMPAT)) && kbase_ctx_flag(kctx, KCTX_FORCE_SAME_VA)) {
Sidath Senanayakea9704312018-12-06 09:09:59 +01001180 if (!(flags & BASE_MEM_PROT_GPU_EX) || !kbase_has_exec_va_zone(kctx))
1181 flags |= BASE_MEM_SAME_VA;
Sidath Senanayake6f5ab3b2017-05-18 14:43:17 +02001182 }
Sidath Senanayake6f5ab3b2017-05-18 14:43:17 +02001183
Siddharth Kapoor88d7d982022-03-02 14:51:29 +08001184 reg = kbase_mem_alloc(kctx, alloc->in.va_pages, alloc->in.commit_pages, alloc->in.extension,
1185 &flags, &gpu_va, mmu_sync_info);
Sidath Senanayake6f5ab3b2017-05-18 14:43:17 +02001186
1187 if (!reg)
1188 return -ENOMEM;
1189
1190 alloc->out.flags = flags;
1191 alloc->out.gpu_va = gpu_va;
1192
1193 return 0;
1194}
Siddharth Kapoor88d7d982022-03-02 14:51:29 +08001195#endif /* MALI_USE_CSF */
Sidath Senanayake6f5ab3b2017-05-18 14:43:17 +02001196
1197static int kbase_api_mem_query(struct kbase_context *kctx,
1198 union kbase_ioctl_mem_query *query)
1199{
1200 return kbase_mem_query(kctx, query->in.gpu_addr,
1201 query->in.query, &query->out.value);
1202}
1203
1204static int kbase_api_mem_free(struct kbase_context *kctx,
1205 struct kbase_ioctl_mem_free *free)
1206{
1207 return kbase_mem_free(kctx, free->gpu_addr);
1208}
1209
Sidath Senanayake72f24572020-10-27 11:38:49 +00001210#if !MALI_USE_CSF
Sidath Senanayaked4ca6eb2020-09-11 16:44:12 +01001211static int kbase_api_kinstr_jm_fd(struct kbase_context *kctx,
1212 union kbase_kinstr_jm_fd *arg)
1213{
1214 return kbase_kinstr_jm_get_fd(kctx->kinstr_jm, arg);
1215}
Sidath Senanayake72f24572020-10-27 11:38:49 +00001216#endif
Sidath Senanayaked4ca6eb2020-09-11 16:44:12 +01001217
Sidath Senanayake6f5ab3b2017-05-18 14:43:17 +02001218static int kbase_api_hwcnt_reader_setup(struct kbase_context *kctx,
1219 struct kbase_ioctl_hwcnt_reader_setup *setup)
1220{
Sidath Senanayakea9704312018-12-06 09:09:59 +01001221 return kbase_vinstr_hwcnt_reader_setup(kctx->kbdev->vinstr_ctx, setup);
Sidath Senanayake6f5ab3b2017-05-18 14:43:17 +02001222}
1223
Sidath Senanayake86966062019-08-23 15:40:27 +02001224static int kbase_api_get_cpu_gpu_timeinfo(struct kbase_context *kctx,
1225 union kbase_ioctl_get_cpu_gpu_timeinfo *timeinfo)
1226{
1227 u32 flags = timeinfo->in.request_flags;
Jack Divere19249e2022-11-07 12:13:47 +00001228 struct timespec64 ts = { 0 };
1229 u64 timestamp = 0;
1230 u64 cycle_cnt = 0;
Sidath Senanayake86966062019-08-23 15:40:27 +02001231
1232 kbase_pm_context_active(kctx->kbdev);
1233
1234 kbase_backend_get_gpu_time(kctx->kbdev,
1235 (flags & BASE_TIMEINFO_CYCLE_COUNTER_FLAG) ? &cycle_cnt : NULL,
1236 (flags & BASE_TIMEINFO_TIMESTAMP_FLAG) ? &timestamp : NULL,
1237 (flags & BASE_TIMEINFO_MONOTONIC_FLAG) ? &ts : NULL);
1238
1239 if (flags & BASE_TIMEINFO_TIMESTAMP_FLAG)
1240 timeinfo->out.timestamp = timestamp;
1241
1242 if (flags & BASE_TIMEINFO_CYCLE_COUNTER_FLAG)
1243 timeinfo->out.cycle_counter = cycle_cnt;
1244
1245 if (flags & BASE_TIMEINFO_MONOTONIC_FLAG) {
1246 timeinfo->out.sec = ts.tv_sec;
1247 timeinfo->out.nsec = ts.tv_nsec;
1248 }
1249
1250 kbase_pm_context_idle(kctx->kbdev);
1251
1252 return 0;
1253}
1254
Siddharth Kapoor0207d6c2022-01-07 19:09:01 +08001255#if IS_ENABLED(CONFIG_MALI_NO_MALI)
1256static int kbase_api_hwcnt_set(struct kbase_context *kctx,
1257 struct kbase_ioctl_hwcnt_values *values)
1258{
Jack Divere19249e2022-11-07 12:13:47 +00001259 return gpu_model_set_dummy_prfcnt_user_sample(u64_to_user_ptr(values->data), values->size);
Siddharth Kapoor0207d6c2022-01-07 19:09:01 +08001260}
1261#endif /* CONFIG_MALI_NO_MALI */
Sidath Senanayake8946bcd2018-03-19 13:26:23 +01001262
Sidath Senanayake6f5ab3b2017-05-18 14:43:17 +02001263static int kbase_api_disjoint_query(struct kbase_context *kctx,
1264 struct kbase_ioctl_disjoint_query *query)
1265{
1266 query->counter = kbase_disjoint_event_get(kctx->kbdev);
1267
1268 return 0;
1269}
1270
1271static int kbase_api_get_ddk_version(struct kbase_context *kctx,
1272 struct kbase_ioctl_get_ddk_version *version)
1273{
1274 int ret;
1275 int len = sizeof(KERNEL_SIDE_DDK_VERSION_STRING);
1276
Sidath Senanayakeea23e532017-07-11 16:57:40 +02001277 if (version->version_buffer == 0)
Sidath Senanayake6f5ab3b2017-05-18 14:43:17 +02001278 return len;
1279
1280 if (version->size < len)
1281 return -EOVERFLOW;
1282
Sidath Senanayakeea23e532017-07-11 16:57:40 +02001283 ret = copy_to_user(u64_to_user_ptr(version->version_buffer),
Sidath Senanayake6f5ab3b2017-05-18 14:43:17 +02001284 KERNEL_SIDE_DDK_VERSION_STRING,
1285 sizeof(KERNEL_SIDE_DDK_VERSION_STRING));
1286
1287 if (ret)
Sidath Senanayakeea23e532017-07-11 16:57:40 +02001288 return -EFAULT;
Sidath Senanayake6f5ab3b2017-05-18 14:43:17 +02001289
1290 return len;
1291}
1292
Sidath Senanayakeb64f5682020-04-14 14:55:25 +02001293static int kbase_api_mem_jit_init(struct kbase_context *kctx,
1294 struct kbase_ioctl_mem_jit_init *jit_init)
1295{
1296 int i;
1297
Sidath Senanayakeb64f5682020-04-14 14:55:25 +02001298 for (i = 0; i < sizeof(jit_init->padding); i++) {
1299 /* Ensure all padding bytes are 0 for potential future
1300 * extension
1301 */
1302 if (jit_init->padding[i])
1303 return -EINVAL;
1304 }
1305
1306 return kbase_region_tracker_init_jit(kctx, jit_init->va_pages,
1307 jit_init->max_allocations, jit_init->trim_level,
1308 jit_init->group_id, jit_init->phys_pages);
Sidath Senanayake6f5ab3b2017-05-18 14:43:17 +02001309}
1310
Sidath Senanayakea9704312018-12-06 09:09:59 +01001311static int kbase_api_mem_exec_init(struct kbase_context *kctx,
1312 struct kbase_ioctl_mem_exec_init *exec_init)
1313{
1314 return kbase_region_tracker_init_exec(kctx, exec_init->va_pages);
1315}
1316
Sidath Senanayake6f5ab3b2017-05-18 14:43:17 +02001317static int kbase_api_mem_sync(struct kbase_context *kctx,
1318 struct kbase_ioctl_mem_sync *sync)
1319{
Sidath Senanayake6f5ab3b2017-05-18 14:43:17 +02001320 struct basep_syncset sset = {
1321 .mem_handle.basep.handle = sync->handle,
1322 .user_addr = sync->user_addr,
1323 .size = sync->size,
1324 .type = sync->type
1325 };
1326
1327 return kbase_sync_now(kctx, &sset);
1328}
1329
1330static int kbase_api_mem_find_cpu_offset(struct kbase_context *kctx,
1331 union kbase_ioctl_mem_find_cpu_offset *find)
1332{
1333 return kbasep_find_enclosing_cpu_mapping_offset(
1334 kctx,
1335 find->in.cpu_addr,
1336 find->in.size,
1337 &find->out.offset);
1338}
1339
Sidath Senanayakee42736e2018-01-22 13:55:38 +01001340static int kbase_api_mem_find_gpu_start_and_offset(struct kbase_context *kctx,
1341 union kbase_ioctl_mem_find_gpu_start_and_offset *find)
1342{
1343 return kbasep_find_enclosing_gpu_mapping_start_and_offset(
1344 kctx,
1345 find->in.gpu_addr,
1346 find->in.size,
1347 &find->out.start,
1348 &find->out.offset);
1349}
1350
Sidath Senanayake6f5ab3b2017-05-18 14:43:17 +02001351static int kbase_api_get_context_id(struct kbase_context *kctx,
1352 struct kbase_ioctl_get_context_id *info)
1353{
1354 info->id = kctx->id;
1355
1356 return 0;
1357}
1358
1359static int kbase_api_tlstream_acquire(struct kbase_context *kctx,
1360 struct kbase_ioctl_tlstream_acquire *acquire)
1361{
Sidath Senanayakeac90f0d2019-05-06 12:21:44 +02001362 return kbase_timeline_io_acquire(kctx->kbdev, acquire->flags);
Sidath Senanayake6f5ab3b2017-05-18 14:43:17 +02001363}
1364
1365static int kbase_api_tlstream_flush(struct kbase_context *kctx)
1366{
Sidath Senanayakeac90f0d2019-05-06 12:21:44 +02001367 kbase_timeline_streams_flush(kctx->kbdev->timeline);
Sidath Senanayake6f5ab3b2017-05-18 14:43:17 +02001368
1369 return 0;
1370}
1371
1372static int kbase_api_mem_commit(struct kbase_context *kctx,
1373 struct kbase_ioctl_mem_commit *commit)
1374{
1375 return kbase_mem_commit(kctx, commit->gpu_addr, commit->pages);
1376}
1377
1378static int kbase_api_mem_alias(struct kbase_context *kctx,
1379 union kbase_ioctl_mem_alias *alias)
1380{
1381 struct base_mem_aliasing_info *ai;
1382 u64 flags;
1383 int err;
1384
Sidath Senanayakefca86132021-06-15 13:39:30 +01001385 if (alias->in.nents == 0 || alias->in.nents > BASE_MEM_ALIAS_MAX_ENTS)
Sidath Senanayakee42736e2018-01-22 13:55:38 +01001386 return -EINVAL;
1387
Sidath Senanayake6f5ab3b2017-05-18 14:43:17 +02001388 ai = vmalloc(sizeof(*ai) * alias->in.nents);
1389 if (!ai)
1390 return -ENOMEM;
1391
Sidath Senanayakeea23e532017-07-11 16:57:40 +02001392 err = copy_from_user(ai,
1393 u64_to_user_ptr(alias->in.aliasing_info),
Sidath Senanayake6f5ab3b2017-05-18 14:43:17 +02001394 sizeof(*ai) * alias->in.nents);
1395 if (err) {
1396 vfree(ai);
Sidath Senanayakeea23e532017-07-11 16:57:40 +02001397 return -EFAULT;
Sidath Senanayake6f5ab3b2017-05-18 14:43:17 +02001398 }
1399
1400 flags = alias->in.flags;
Sidath Senanayakeac90f0d2019-05-06 12:21:44 +02001401 if (flags & BASEP_MEM_FLAGS_KERNEL_ONLY) {
Sidath Senanayakef32af5a2018-07-31 15:28:14 +02001402 vfree(ai);
1403 return -EINVAL;
1404 }
Sidath Senanayake6f5ab3b2017-05-18 14:43:17 +02001405
1406 alias->out.gpu_va = kbase_mem_alias(kctx, &flags,
1407 alias->in.stride, alias->in.nents,
1408 ai, &alias->out.va_pages);
1409
1410 alias->out.flags = flags;
1411
1412 vfree(ai);
1413
1414 if (alias->out.gpu_va == 0)
1415 return -ENOMEM;
1416
1417 return 0;
1418}
1419
1420static int kbase_api_mem_import(struct kbase_context *kctx,
1421 union kbase_ioctl_mem_import *import)
1422{
1423 int ret;
1424 u64 flags = import->in.flags;
1425
Sidath Senanayakeac90f0d2019-05-06 12:21:44 +02001426 if (flags & BASEP_MEM_FLAGS_KERNEL_ONLY)
Sidath Senanayakef32af5a2018-07-31 15:28:14 +02001427 return -ENOMEM;
1428
Sidath Senanayake6f5ab3b2017-05-18 14:43:17 +02001429 ret = kbase_mem_import(kctx,
1430 import->in.type,
Sidath Senanayakeea23e532017-07-11 16:57:40 +02001431 u64_to_user_ptr(import->in.phandle),
Sidath Senanayake6f5ab3b2017-05-18 14:43:17 +02001432 import->in.padding,
1433 &import->out.gpu_va,
1434 &import->out.va_pages,
1435 &flags);
1436
1437 import->out.flags = flags;
1438
1439 return ret;
1440}
1441
1442static int kbase_api_mem_flags_change(struct kbase_context *kctx,
1443 struct kbase_ioctl_mem_flags_change *change)
1444{
Sidath Senanayakeac90f0d2019-05-06 12:21:44 +02001445 if (change->flags & BASEP_MEM_FLAGS_KERNEL_ONLY)
Sidath Senanayakef32af5a2018-07-31 15:28:14 +02001446 return -ENOMEM;
1447
Sidath Senanayake6f5ab3b2017-05-18 14:43:17 +02001448 return kbase_mem_flags_change(kctx, change->gpu_va,
1449 change->flags, change->mask);
1450}
1451
1452static int kbase_api_stream_create(struct kbase_context *kctx,
1453 struct kbase_ioctl_stream_create *stream)
1454{
Debarshi Dutta20fff722023-06-02 13:36:22 +00001455#if IS_ENABLED(CONFIG_SYNC_FILE)
Sidath Senanayake6f5ab3b2017-05-18 14:43:17 +02001456 int fd, ret;
1457
1458 /* Name must be NULL-terminated and padded with NULLs, so check last
1459 * character is NULL
1460 */
1461 if (stream->name[sizeof(stream->name)-1] != 0)
1462 return -EINVAL;
1463
1464 ret = kbase_sync_fence_stream_create(stream->name, &fd);
1465
1466 if (ret)
1467 return ret;
1468 return fd;
1469#else
1470 return -ENOENT;
1471#endif
1472}
1473
1474static int kbase_api_fence_validate(struct kbase_context *kctx,
1475 struct kbase_ioctl_fence_validate *validate)
1476{
Debarshi Dutta20fff722023-06-02 13:36:22 +00001477#if IS_ENABLED(CONFIG_SYNC_FILE)
Sidath Senanayake6f5ab3b2017-05-18 14:43:17 +02001478 return kbase_sync_fence_validate(validate->fd);
1479#else
1480 return -ENOENT;
1481#endif
1482}
1483
Sidath Senanayake6f5ab3b2017-05-18 14:43:17 +02001484static int kbase_api_mem_profile_add(struct kbase_context *kctx,
1485 struct kbase_ioctl_mem_profile_add *data)
1486{
1487 char *buf;
1488 int err;
1489
1490 if (data->len > KBASE_MEM_PROFILE_MAX_BUF_SIZE) {
Debarshi Dutta20fff722023-06-02 13:36:22 +00001491 dev_err(kctx->kbdev->dev, "mem_profile_add: buffer too big");
Sidath Senanayake6f5ab3b2017-05-18 14:43:17 +02001492 return -EINVAL;
1493 }
1494
Debarshi Dutta20fff722023-06-02 13:36:22 +00001495 if (!data->len) {
1496 dev_err(kctx->kbdev->dev, "mem_profile_add: buffer size is 0");
1497 /* Should return -EINVAL, but returning -ENOMEM for backwards compat */
1498 return -ENOMEM;
1499 }
1500
Sidath Senanayake6f5ab3b2017-05-18 14:43:17 +02001501 buf = kmalloc(data->len, GFP_KERNEL);
Debarshi Dutta20fff722023-06-02 13:36:22 +00001502 if (!buf)
Sidath Senanayake6f5ab3b2017-05-18 14:43:17 +02001503 return -ENOMEM;
1504
Sidath Senanayakeea23e532017-07-11 16:57:40 +02001505 err = copy_from_user(buf, u64_to_user_ptr(data->buffer),
1506 data->len);
Sidath Senanayake6f5ab3b2017-05-18 14:43:17 +02001507 if (err) {
1508 kfree(buf);
Sidath Senanayakeea23e532017-07-11 16:57:40 +02001509 return -EFAULT;
Sidath Senanayake6f5ab3b2017-05-18 14:43:17 +02001510 }
1511
1512 return kbasep_mem_profile_debugfs_insert(kctx, buf, data->len);
1513}
1514
Sidath Senanayake72f24572020-10-27 11:38:49 +00001515#if !MALI_USE_CSF
Sidath Senanayake6f5ab3b2017-05-18 14:43:17 +02001516static int kbase_api_soft_event_update(struct kbase_context *kctx,
1517 struct kbase_ioctl_soft_event_update *update)
1518{
1519 if (update->flags != 0)
1520 return -EINVAL;
1521
1522 return kbase_soft_event_update(kctx, update->event, update->new_status);
1523}
Sidath Senanayake72f24572020-10-27 11:38:49 +00001524#endif /* !MALI_USE_CSF */
Sidath Senanayake6f5ab3b2017-05-18 14:43:17 +02001525
Sidath Senanayakee42736e2018-01-22 13:55:38 +01001526static int kbase_api_sticky_resource_map(struct kbase_context *kctx,
1527 struct kbase_ioctl_sticky_resource_map *map)
1528{
1529 int ret;
1530 u64 i;
1531 u64 gpu_addr[BASE_EXT_RES_COUNT_MAX];
1532
1533 if (!map->count || map->count > BASE_EXT_RES_COUNT_MAX)
1534 return -EOVERFLOW;
1535
1536 ret = copy_from_user(gpu_addr, u64_to_user_ptr(map->address),
1537 sizeof(u64) * map->count);
1538
1539 if (ret != 0)
1540 return -EFAULT;
1541
1542 kbase_gpu_vm_lock(kctx);
1543
1544 for (i = 0; i < map->count; i++) {
1545 if (!kbase_sticky_resource_acquire(kctx, gpu_addr[i])) {
1546 /* Invalid resource */
1547 ret = -EINVAL;
1548 break;
1549 }
1550 }
1551
1552 if (ret != 0) {
1553 while (i > 0) {
1554 i--;
Sidath Senanayake1f3b3ea2019-11-11 11:49:32 +01001555 kbase_sticky_resource_release_force(kctx, NULL, gpu_addr[i]);
Sidath Senanayakee42736e2018-01-22 13:55:38 +01001556 }
1557 }
1558
1559 kbase_gpu_vm_unlock(kctx);
1560
1561 return ret;
1562}
1563
1564static int kbase_api_sticky_resource_unmap(struct kbase_context *kctx,
1565 struct kbase_ioctl_sticky_resource_unmap *unmap)
1566{
1567 int ret;
1568 u64 i;
1569 u64 gpu_addr[BASE_EXT_RES_COUNT_MAX];
1570
1571 if (!unmap->count || unmap->count > BASE_EXT_RES_COUNT_MAX)
1572 return -EOVERFLOW;
1573
1574 ret = copy_from_user(gpu_addr, u64_to_user_ptr(unmap->address),
1575 sizeof(u64) * unmap->count);
1576
1577 if (ret != 0)
1578 return -EFAULT;
1579
1580 kbase_gpu_vm_lock(kctx);
1581
1582 for (i = 0; i < unmap->count; i++) {
Sidath Senanayake1f3b3ea2019-11-11 11:49:32 +01001583 if (!kbase_sticky_resource_release_force(kctx, NULL, gpu_addr[i])) {
Sidath Senanayakee42736e2018-01-22 13:55:38 +01001584 /* Invalid resource, but we keep going anyway */
1585 ret = -EINVAL;
1586 }
1587 }
1588
1589 kbase_gpu_vm_unlock(kctx);
1590
1591 return ret;
1592}
1593
Sidath Senanayake6f5ab3b2017-05-18 14:43:17 +02001594#if MALI_UNIT_TEST
Sidath Senanayake6f5ab3b2017-05-18 14:43:17 +02001595
1596static int kbase_api_tlstream_stats(struct kbase_context *kctx,
1597 struct kbase_ioctl_tlstream_stats *stats)
1598{
Sidath Senanayakeac90f0d2019-05-06 12:21:44 +02001599 kbase_timeline_stats(kctx->kbdev->timeline,
Sidath Senanayake6f5ab3b2017-05-18 14:43:17 +02001600 &stats->bytes_collected,
1601 &stats->bytes_generated);
1602
1603 return 0;
1604}
1605#endif /* MALI_UNIT_TEST */
1606
Sidath Senanayake72f24572020-10-27 11:38:49 +00001607#if MALI_USE_CSF
1608static int kbasep_cs_event_signal(struct kbase_context *kctx)
1609{
1610 kbase_csf_event_signal_notify_gpu(kctx);
1611 return 0;
1612}
1613
1614static int kbasep_cs_queue_register(struct kbase_context *kctx,
1615 struct kbase_ioctl_cs_queue_register *reg)
1616{
1617 kctx->jit_group_id = BASE_MEM_GROUP_DEFAULT;
1618
1619 return kbase_csf_queue_register(kctx, reg);
1620}
1621
Sidath Senanayake2bfaaa52021-06-17 17:58:22 +01001622static int kbasep_cs_queue_register_ex(struct kbase_context *kctx,
1623 struct kbase_ioctl_cs_queue_register_ex *reg)
1624{
1625 kctx->jit_group_id = BASE_MEM_GROUP_DEFAULT;
1626
1627 return kbase_csf_queue_register_ex(kctx, reg);
1628}
1629
Sidath Senanayake72f24572020-10-27 11:38:49 +00001630static int kbasep_cs_queue_terminate(struct kbase_context *kctx,
1631 struct kbase_ioctl_cs_queue_terminate *term)
1632{
1633 kbase_csf_queue_terminate(kctx, term);
1634
1635 return 0;
1636}
1637
1638static int kbasep_cs_queue_bind(struct kbase_context *kctx,
1639 union kbase_ioctl_cs_queue_bind *bind)
1640{
1641 return kbase_csf_queue_bind(kctx, bind);
1642}
1643
1644static int kbasep_cs_queue_kick(struct kbase_context *kctx,
1645 struct kbase_ioctl_cs_queue_kick *kick)
1646{
1647 return kbase_csf_queue_kick(kctx, kick);
1648}
1649
Siddharth Kapoor0207d6c2022-01-07 19:09:01 +08001650static int kbasep_cs_queue_group_create_1_6(
1651 struct kbase_context *kctx,
1652 union kbase_ioctl_cs_queue_group_create_1_6 *create)
1653{
Jörg Wagnerdacf0042023-08-01 13:38:22 +00001654 int ret, i;
Siddharth Kapoor0207d6c2022-01-07 19:09:01 +08001655 union kbase_ioctl_cs_queue_group_create
1656 new_create = { .in = {
1657 .tiler_mask = create->in.tiler_mask,
1658 .fragment_mask =
1659 create->in.fragment_mask,
1660 .compute_mask = create->in.compute_mask,
1661 .cs_min = create->in.cs_min,
1662 .priority = create->in.priority,
1663 .tiler_max = create->in.tiler_max,
1664 .fragment_max = create->in.fragment_max,
1665 .compute_max = create->in.compute_max,
1666 } };
1667
Jörg Wagnerdacf0042023-08-01 13:38:22 +00001668 for (i = 0; i < ARRAY_SIZE(create->in.padding); i++) {
1669 if (create->in.padding[i] != 0) {
1670 dev_warn(kctx->kbdev->dev, "Invalid padding not 0 in queue group create\n");
1671 return -EINVAL;
1672 }
1673 }
1674
1675 ret = kbase_csf_queue_group_create(kctx, &new_create);
Siddharth Kapoor0207d6c2022-01-07 19:09:01 +08001676
1677 create->out.group_handle = new_create.out.group_handle;
1678 create->out.group_uid = new_create.out.group_uid;
1679
1680 return ret;
1681}
Jörg Wagnerdacf0042023-08-01 13:38:22 +00001682
1683static int kbasep_cs_queue_group_create_1_18(struct kbase_context *kctx,
1684 union kbase_ioctl_cs_queue_group_create_1_18 *create)
1685{
1686 int ret, i;
1687 union kbase_ioctl_cs_queue_group_create
1688 new_create = { .in = {
1689 .tiler_mask = create->in.tiler_mask,
1690 .fragment_mask = create->in.fragment_mask,
1691 .compute_mask = create->in.compute_mask,
1692 .cs_min = create->in.cs_min,
1693 .priority = create->in.priority,
1694 .tiler_max = create->in.tiler_max,
1695 .fragment_max = create->in.fragment_max,
1696 .compute_max = create->in.compute_max,
1697 .csi_handlers = create->in.csi_handlers,
1698 .dvs_buf = create->in.dvs_buf,
1699 } };
1700
1701 for (i = 0; i < ARRAY_SIZE(create->in.padding); i++) {
1702 if (create->in.padding[i] != 0) {
1703 dev_warn(kctx->kbdev->dev, "Invalid padding not 0 in queue group create\n");
1704 return -EINVAL;
1705 }
1706 }
1707
1708 ret = kbase_csf_queue_group_create(kctx, &new_create);
1709
1710 create->out.group_handle = new_create.out.group_handle;
1711 create->out.group_uid = new_create.out.group_uid;
1712
1713 return ret;
1714}
1715
Sidath Senanayake72f24572020-10-27 11:38:49 +00001716static int kbasep_cs_queue_group_create(struct kbase_context *kctx,
1717 union kbase_ioctl_cs_queue_group_create *create)
1718{
Jörg Wagnerdacf0042023-08-01 13:38:22 +00001719 if (create->in.reserved != 0) {
1720 dev_warn(kctx->kbdev->dev, "Invalid reserved field not 0 in queue group create\n");
1721 return -EINVAL;
1722 }
Sidath Senanayake72f24572020-10-27 11:38:49 +00001723 return kbase_csf_queue_group_create(kctx, create);
1724}
1725
1726static int kbasep_cs_queue_group_terminate(struct kbase_context *kctx,
1727 struct kbase_ioctl_cs_queue_group_term *term)
1728{
1729 kbase_csf_queue_group_terminate(kctx, term->group_handle);
1730
1731 return 0;
1732}
1733
1734static int kbasep_kcpu_queue_new(struct kbase_context *kctx,
1735 struct kbase_ioctl_kcpu_queue_new *new)
1736{
1737 return kbase_csf_kcpu_queue_new(kctx, new);
1738}
1739
1740static int kbasep_kcpu_queue_delete(struct kbase_context *kctx,
1741 struct kbase_ioctl_kcpu_queue_delete *delete)
1742{
1743 return kbase_csf_kcpu_queue_delete(kctx, delete);
1744}
1745
1746static int kbasep_kcpu_queue_enqueue(struct kbase_context *kctx,
1747 struct kbase_ioctl_kcpu_queue_enqueue *enqueue)
1748{
1749 return kbase_csf_kcpu_queue_enqueue(kctx, enqueue);
1750}
1751
1752static int kbasep_cs_tiler_heap_init(struct kbase_context *kctx,
1753 union kbase_ioctl_cs_tiler_heap_init *heap_init)
1754{
Kevin Parkf19a3fd2022-11-24 14:27:12 +00001755 if (heap_init->in.group_id >= MEMORY_GROUP_MANAGER_NR_GROUPS)
1756 return -EINVAL;
1757 else
1758 kctx->jit_group_id = heap_init->in.group_id;
Sidath Senanayake72f24572020-10-27 11:38:49 +00001759
1760 return kbase_csf_tiler_heap_init(kctx, heap_init->in.chunk_size,
Jack Divere19249e2022-11-07 12:13:47 +00001761 heap_init->in.initial_chunks, heap_init->in.max_chunks,
1762 heap_init->in.target_in_flight, heap_init->in.buf_desc_va,
1763 &heap_init->out.gpu_heap_va,
1764 &heap_init->out.first_chunk_va);
1765}
1766
1767static int kbasep_cs_tiler_heap_init_1_13(struct kbase_context *kctx,
1768 union kbase_ioctl_cs_tiler_heap_init_1_13 *heap_init)
1769{
Debarshi Dutta20fff722023-06-02 13:36:22 +00001770 if (heap_init->in.group_id >= MEMORY_GROUP_MANAGER_NR_GROUPS)
1771 return -EINVAL;
1772
Jack Divere19249e2022-11-07 12:13:47 +00001773 kctx->jit_group_id = heap_init->in.group_id;
1774
1775 return kbase_csf_tiler_heap_init(kctx, heap_init->in.chunk_size,
1776 heap_init->in.initial_chunks, heap_init->in.max_chunks,
1777 heap_init->in.target_in_flight, 0,
1778 &heap_init->out.gpu_heap_va,
1779 &heap_init->out.first_chunk_va);
Sidath Senanayake72f24572020-10-27 11:38:49 +00001780}
1781
1782static int kbasep_cs_tiler_heap_term(struct kbase_context *kctx,
1783 struct kbase_ioctl_cs_tiler_heap_term *heap_term)
1784{
1785 return kbase_csf_tiler_heap_term(kctx, heap_term->gpu_heap_va);
1786}
1787
1788static int kbase_ioctl_cs_get_glb_iface(struct kbase_context *kctx,
1789 union kbase_ioctl_cs_get_glb_iface *param)
1790{
1791 struct basep_cs_stream_control *stream_data = NULL;
1792 struct basep_cs_group_control *group_data = NULL;
1793 void __user *user_groups, *user_streams;
1794 int err = 0;
1795 u32 const max_group_num = param->in.max_group_num;
1796 u32 const max_total_stream_num = param->in.max_total_stream_num;
1797
1798 if (max_group_num > MAX_SUPPORTED_CSGS)
1799 return -EINVAL;
1800
1801 if (max_total_stream_num >
1802 MAX_SUPPORTED_CSGS * MAX_SUPPORTED_STREAMS_PER_GROUP)
1803 return -EINVAL;
1804
1805 user_groups = u64_to_user_ptr(param->in.groups_ptr);
1806 user_streams = u64_to_user_ptr(param->in.streams_ptr);
1807
1808 if (max_group_num > 0) {
1809 if (!user_groups)
1810 err = -EINVAL;
1811 else {
1812 group_data = kcalloc(max_group_num,
1813 sizeof(*group_data), GFP_KERNEL);
1814 if (!group_data)
1815 err = -ENOMEM;
1816 }
1817 }
1818
1819 if (max_total_stream_num > 0) {
1820 if (!user_streams)
1821 err = -EINVAL;
1822 else {
1823 stream_data = kcalloc(max_total_stream_num,
1824 sizeof(*stream_data), GFP_KERNEL);
1825 if (!stream_data)
1826 err = -ENOMEM;
1827 }
1828 }
1829
1830 if (!err) {
Sidath Senanayakefca86132021-06-15 13:39:30 +01001831 param->out.total_stream_num = kbase_csf_firmware_get_glb_iface(
1832 kctx->kbdev, group_data, max_group_num, stream_data,
1833 max_total_stream_num, &param->out.glb_version,
1834 &param->out.features, &param->out.group_num,
1835 &param->out.prfcnt_size, &param->out.instr_features);
Sidath Senanayake72f24572020-10-27 11:38:49 +00001836
1837 if (copy_to_user(user_groups, group_data,
1838 MIN(max_group_num, param->out.group_num) *
1839 sizeof(*group_data)))
1840 err = -EFAULT;
1841 }
1842
1843 if (!err)
1844 if (copy_to_user(user_streams, stream_data,
1845 MIN(max_total_stream_num, param->out.total_stream_num) *
1846 sizeof(*stream_data)))
1847 err = -EFAULT;
1848
1849 kfree(group_data);
1850 kfree(stream_data);
1851 return err;
1852}
Sidath Senanayake201c8bf2021-01-29 14:51:21 +00001853
1854static int kbasep_ioctl_cs_cpu_queue_dump(struct kbase_context *kctx,
1855 struct kbase_ioctl_cs_cpu_queue_info *cpu_queue_info)
1856{
1857 return kbase_csf_cpu_queue_dump(kctx, cpu_queue_info->buffer,
1858 cpu_queue_info->size);
1859}
1860
Jack Divere19249e2022-11-07 12:13:47 +00001861static int kbase_ioctl_read_user_page(struct kbase_context *kctx,
1862 union kbase_ioctl_read_user_page *user_page)
1863{
1864 struct kbase_device *kbdev = kctx->kbdev;
1865 unsigned long flags;
1866
1867 /* As of now, only LATEST_FLUSH is supported */
1868 if (unlikely(user_page->in.offset != LATEST_FLUSH))
1869 return -EINVAL;
1870
1871 /* Validating padding that must be zero */
1872 if (unlikely(user_page->in.padding != 0))
1873 return -EINVAL;
1874
1875 spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
1876 if (!kbdev->pm.backend.gpu_powered)
1877 user_page->out.val_lo = POWER_DOWN_LATEST_FLUSH_VALUE;
1878 else
1879 user_page->out.val_lo = kbase_reg_read(kbdev, USER_REG(LATEST_FLUSH));
1880 user_page->out.val_hi = 0;
1881 spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
1882
1883 return 0;
1884}
Sidath Senanayake72f24572020-10-27 11:38:49 +00001885#endif /* MALI_USE_CSF */
Sidath Senanayakef32af5a2018-07-31 15:28:14 +02001886
Sidath Senanayake97483052021-01-29 15:03:53 +00001887static int kbasep_ioctl_context_priority_check(struct kbase_context *kctx,
1888 struct kbase_ioctl_context_priority_check *priority_check)
1889{
1890#if MALI_USE_CSF
1891 priority_check->priority = kbase_csf_priority_check(kctx->kbdev, priority_check->priority);
1892#else
1893 base_jd_prio req_priority = (base_jd_prio)priority_check->priority;
1894
1895 priority_check->priority = (u8)kbase_js_priority_check(kctx->kbdev, req_priority);
1896#endif
1897 return 0;
1898}
1899
1900#define KBASE_HANDLE_IOCTL(cmd, function, arg) \
1901 do { \
1902 int ret; \
1903 BUILD_BUG_ON(_IOC_DIR(cmd) != _IOC_NONE); \
1904 dev_dbg(arg->kbdev->dev, "Enter ioctl %s\n", #function); \
1905 ret = function(arg); \
1906 dev_dbg(arg->kbdev->dev, "Return %d from ioctl %s\n", ret, \
1907 #function); \
1908 return ret; \
Sidath Senanayake6f5ab3b2017-05-18 14:43:17 +02001909 } while (0)
1910
Sidath Senanayake97483052021-01-29 15:03:53 +00001911#define KBASE_HANDLE_IOCTL_IN(cmd, function, type, arg) \
1912 do { \
1913 type param; \
1914 int ret, err; \
1915 dev_dbg(arg->kbdev->dev, "Enter ioctl %s\n", #function); \
1916 BUILD_BUG_ON(_IOC_DIR(cmd) != _IOC_WRITE); \
1917 BUILD_BUG_ON(sizeof(param) != _IOC_SIZE(cmd)); \
1918 err = copy_from_user(&param, uarg, sizeof(param)); \
1919 if (err) \
1920 return -EFAULT; \
1921 ret = function(arg, &param); \
1922 dev_dbg(arg->kbdev->dev, "Return %d from ioctl %s\n", ret, \
1923 #function); \
1924 return ret; \
Sidath Senanayake6f5ab3b2017-05-18 14:43:17 +02001925 } while (0)
1926
Sidath Senanayake97483052021-01-29 15:03:53 +00001927#define KBASE_HANDLE_IOCTL_OUT(cmd, function, type, arg) \
1928 do { \
1929 type param; \
1930 int ret, err; \
1931 dev_dbg(arg->kbdev->dev, "Enter ioctl %s\n", #function); \
1932 BUILD_BUG_ON(_IOC_DIR(cmd) != _IOC_READ); \
1933 BUILD_BUG_ON(sizeof(param) != _IOC_SIZE(cmd)); \
1934 memset(&param, 0, sizeof(param)); \
1935 ret = function(arg, &param); \
1936 err = copy_to_user(uarg, &param, sizeof(param)); \
1937 if (err) \
1938 return -EFAULT; \
1939 dev_dbg(arg->kbdev->dev, "Return %d from ioctl %s\n", ret, \
1940 #function); \
1941 return ret; \
Sidath Senanayake6f5ab3b2017-05-18 14:43:17 +02001942 } while (0)
1943
Sidath Senanayake97483052021-01-29 15:03:53 +00001944#define KBASE_HANDLE_IOCTL_INOUT(cmd, function, type, arg) \
1945 do { \
1946 type param; \
1947 int ret, err; \
1948 dev_dbg(arg->kbdev->dev, "Enter ioctl %s\n", #function); \
1949 BUILD_BUG_ON(_IOC_DIR(cmd) != (_IOC_WRITE | _IOC_READ)); \
1950 BUILD_BUG_ON(sizeof(param) != _IOC_SIZE(cmd)); \
1951 err = copy_from_user(&param, uarg, sizeof(param)); \
1952 if (err) \
1953 return -EFAULT; \
1954 ret = function(arg, &param); \
1955 err = copy_to_user(uarg, &param, sizeof(param)); \
1956 if (err) \
1957 return -EFAULT; \
1958 dev_dbg(arg->kbdev->dev, "Return %d from ioctl %s\n", ret, \
1959 #function); \
1960 return ret; \
Sidath Senanayake6f5ab3b2017-05-18 14:43:17 +02001961 } while (0)
1962
Sidath Senanayakefca86132021-06-15 13:39:30 +01001963static int kbasep_ioctl_set_limited_core_count(struct kbase_context *kctx,
1964 struct kbase_ioctl_set_limited_core_count *set_limited_core_count)
1965{
1966 const u64 shader_core_mask =
1967 kbase_pm_get_present_cores(kctx->kbdev, KBASE_PM_CORE_SHADER);
1968 const u64 limited_core_mask =
1969 ((u64)1 << (set_limited_core_count->max_core_count)) - 1;
1970
1971 if ((shader_core_mask & limited_core_mask) == 0) {
1972 /* At least one shader core must be available after applying the mask */
1973 return -EINVAL;
1974 }
1975
1976 kctx->limited_core_mask = limited_core_mask;
1977 return 0;
1978}
1979
Jörg Wagnerdacf0042023-08-01 13:38:22 +00001980static long kbase_kfile_ioctl(struct kbase_file *kfile, unsigned int cmd, unsigned long arg)
Sidath Senanayake6f5ab3b2017-05-18 14:43:17 +02001981{
Sidath Senanayakeac90f0d2019-05-06 12:21:44 +02001982 struct kbase_context *kctx = NULL;
1983 struct kbase_device *kbdev = kfile->kbdev;
Sidath Senanayake6f5ab3b2017-05-18 14:43:17 +02001984 void __user *uarg = (void __user *)arg;
1985
Sidath Senanayake6f5ab3b2017-05-18 14:43:17 +02001986 /* Only these ioctls are available until setup is complete */
1987 switch (cmd) {
Sidath Senanayake8946bcd2018-03-19 13:26:23 +01001988 case KBASE_IOCTL_VERSION_CHECK:
Sidath Senanayake6f5ab3b2017-05-18 14:43:17 +02001989 KBASE_HANDLE_IOCTL_INOUT(KBASE_IOCTL_VERSION_CHECK,
1990 kbase_api_handshake,
Sidath Senanayakeac90f0d2019-05-06 12:21:44 +02001991 struct kbase_ioctl_version_check,
1992 kfile);
Sidath Senanayake8946bcd2018-03-19 13:26:23 +01001993 break;
1994
Sidath Senanayake72f24572020-10-27 11:38:49 +00001995 case KBASE_IOCTL_VERSION_CHECK_RESERVED:
1996 KBASE_HANDLE_IOCTL_INOUT(KBASE_IOCTL_VERSION_CHECK_RESERVED,
1997 kbase_api_handshake_dummy,
1998 struct kbase_ioctl_version_check,
1999 kfile);
2000 break;
2001
Sidath Senanayake8946bcd2018-03-19 13:26:23 +01002002 case KBASE_IOCTL_SET_FLAGS:
Sidath Senanayake6f5ab3b2017-05-18 14:43:17 +02002003 KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_SET_FLAGS,
2004 kbase_api_set_flags,
Sidath Senanayakeac90f0d2019-05-06 12:21:44 +02002005 struct kbase_ioctl_set_flags,
2006 kfile);
Sidath Senanayake8946bcd2018-03-19 13:26:23 +01002007 break;
Yiwei Zhang63fbdfe2021-02-24 05:18:52 +00002008
Yunju Lee2a499c82022-11-10 21:59:24 +00002009#if !MALI_USE_CSF
Yiwei Zhang63fbdfe2021-02-24 05:18:52 +00002010 case KBASE_IOCTL_APC_REQUEST:
2011 KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_APC_REQUEST,
2012 kbase_api_apc_request,
2013 struct kbase_ioctl_apc_request,
2014 kfile);
2015 break;
Yunju Lee2a499c82022-11-10 21:59:24 +00002016#endif
Siddharth Kapoor93dab6a2021-11-26 19:02:16 +08002017
Jesse Hall0c596dc2021-11-23 14:38:46 -08002018 case KBASE_IOCTL_KINSTR_PRFCNT_ENUM_INFO:
2019 KBASE_HANDLE_IOCTL_INOUT(
2020 KBASE_IOCTL_KINSTR_PRFCNT_ENUM_INFO,
2021 kbase_api_kinstr_prfcnt_enum_info,
2022 struct kbase_ioctl_kinstr_prfcnt_enum_info, kfile);
2023 break;
2024
2025 case KBASE_IOCTL_KINSTR_PRFCNT_SETUP:
2026 KBASE_HANDLE_IOCTL_INOUT(KBASE_IOCTL_KINSTR_PRFCNT_SETUP,
2027 kbase_api_kinstr_prfcnt_setup,
2028 union kbase_ioctl_kinstr_prfcnt_setup,
2029 kfile);
2030 break;
Siddharth Kapoor88d7d982022-03-02 14:51:29 +08002031 case KBASE_IOCTL_GET_GPUPROPS:
2032 KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_GET_GPUPROPS, kbase_api_get_gpuprops,
2033 struct kbase_ioctl_get_gpuprops, kfile);
2034 break;
Sidath Senanayake6f5ab3b2017-05-18 14:43:17 +02002035 }
2036
Sidath Senanayakeac90f0d2019-05-06 12:21:44 +02002037 kctx = kbase_file_get_kctx_if_setup_complete(kfile);
2038 if (unlikely(!kctx))
2039 return -EPERM;
Sidath Senanayake6f5ab3b2017-05-18 14:43:17 +02002040
2041 /* Normal ioctls */
2042 switch (cmd) {
Sidath Senanayake72f24572020-10-27 11:38:49 +00002043#if !MALI_USE_CSF
Sidath Senanayake8946bcd2018-03-19 13:26:23 +01002044 case KBASE_IOCTL_JOB_SUBMIT:
Sidath Senanayake6f5ab3b2017-05-18 14:43:17 +02002045 KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_JOB_SUBMIT,
2046 kbase_api_job_submit,
Sidath Senanayakeac90f0d2019-05-06 12:21:44 +02002047 struct kbase_ioctl_job_submit,
2048 kctx);
Sidath Senanayake8946bcd2018-03-19 13:26:23 +01002049 break;
Sidath Senanayake72f24572020-10-27 11:38:49 +00002050#endif /* !MALI_USE_CSF */
Sidath Senanayake72f24572020-10-27 11:38:49 +00002051#if !MALI_USE_CSF
Sidath Senanayake8946bcd2018-03-19 13:26:23 +01002052 case KBASE_IOCTL_POST_TERM:
Sidath Senanayake6f5ab3b2017-05-18 14:43:17 +02002053 KBASE_HANDLE_IOCTL(KBASE_IOCTL_POST_TERM,
Sidath Senanayakeac90f0d2019-05-06 12:21:44 +02002054 kbase_api_post_term,
2055 kctx);
Sidath Senanayake8946bcd2018-03-19 13:26:23 +01002056 break;
Sidath Senanayake72f24572020-10-27 11:38:49 +00002057#endif /* !MALI_USE_CSF */
Sidath Senanayake8946bcd2018-03-19 13:26:23 +01002058 case KBASE_IOCTL_MEM_ALLOC:
Sidath Senanayake6f5ab3b2017-05-18 14:43:17 +02002059 KBASE_HANDLE_IOCTL_INOUT(KBASE_IOCTL_MEM_ALLOC,
2060 kbase_api_mem_alloc,
Sidath Senanayakeac90f0d2019-05-06 12:21:44 +02002061 union kbase_ioctl_mem_alloc,
2062 kctx);
Sidath Senanayake8946bcd2018-03-19 13:26:23 +01002063 break;
Siddharth Kapoor88d7d982022-03-02 14:51:29 +08002064#if MALI_USE_CSF
2065 case KBASE_IOCTL_MEM_ALLOC_EX:
2066 KBASE_HANDLE_IOCTL_INOUT(KBASE_IOCTL_MEM_ALLOC_EX, kbase_api_mem_alloc_ex,
2067 union kbase_ioctl_mem_alloc_ex, kctx);
2068 break;
2069#endif
Sidath Senanayake8946bcd2018-03-19 13:26:23 +01002070 case KBASE_IOCTL_MEM_QUERY:
Sidath Senanayake6f5ab3b2017-05-18 14:43:17 +02002071 KBASE_HANDLE_IOCTL_INOUT(KBASE_IOCTL_MEM_QUERY,
2072 kbase_api_mem_query,
Sidath Senanayakeac90f0d2019-05-06 12:21:44 +02002073 union kbase_ioctl_mem_query,
2074 kctx);
Sidath Senanayake8946bcd2018-03-19 13:26:23 +01002075 break;
2076 case KBASE_IOCTL_MEM_FREE:
Sidath Senanayake6f5ab3b2017-05-18 14:43:17 +02002077 KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_MEM_FREE,
2078 kbase_api_mem_free,
Sidath Senanayakeac90f0d2019-05-06 12:21:44 +02002079 struct kbase_ioctl_mem_free,
2080 kctx);
Sidath Senanayake8946bcd2018-03-19 13:26:23 +01002081 break;
2082 case KBASE_IOCTL_DISJOINT_QUERY:
Sidath Senanayake6f5ab3b2017-05-18 14:43:17 +02002083 KBASE_HANDLE_IOCTL_OUT(KBASE_IOCTL_DISJOINT_QUERY,
2084 kbase_api_disjoint_query,
Sidath Senanayakeac90f0d2019-05-06 12:21:44 +02002085 struct kbase_ioctl_disjoint_query,
2086 kctx);
Sidath Senanayake8946bcd2018-03-19 13:26:23 +01002087 break;
2088 case KBASE_IOCTL_GET_DDK_VERSION:
Sidath Senanayake6f5ab3b2017-05-18 14:43:17 +02002089 KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_GET_DDK_VERSION,
2090 kbase_api_get_ddk_version,
Sidath Senanayakeac90f0d2019-05-06 12:21:44 +02002091 struct kbase_ioctl_get_ddk_version,
2092 kctx);
Sidath Senanayake8946bcd2018-03-19 13:26:23 +01002093 break;
Sidath Senanayake8946bcd2018-03-19 13:26:23 +01002094 case KBASE_IOCTL_MEM_JIT_INIT:
Sidath Senanayake6f5ab3b2017-05-18 14:43:17 +02002095 KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_MEM_JIT_INIT,
2096 kbase_api_mem_jit_init,
Sidath Senanayakeac90f0d2019-05-06 12:21:44 +02002097 struct kbase_ioctl_mem_jit_init,
2098 kctx);
Sidath Senanayake8946bcd2018-03-19 13:26:23 +01002099 break;
Sidath Senanayakea9704312018-12-06 09:09:59 +01002100 case KBASE_IOCTL_MEM_EXEC_INIT:
2101 KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_MEM_EXEC_INIT,
2102 kbase_api_mem_exec_init,
Sidath Senanayakeac90f0d2019-05-06 12:21:44 +02002103 struct kbase_ioctl_mem_exec_init,
2104 kctx);
Sidath Senanayakea9704312018-12-06 09:09:59 +01002105 break;
Sidath Senanayake8946bcd2018-03-19 13:26:23 +01002106 case KBASE_IOCTL_MEM_SYNC:
Sidath Senanayake6f5ab3b2017-05-18 14:43:17 +02002107 KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_MEM_SYNC,
2108 kbase_api_mem_sync,
Sidath Senanayakeac90f0d2019-05-06 12:21:44 +02002109 struct kbase_ioctl_mem_sync,
2110 kctx);
Sidath Senanayake8946bcd2018-03-19 13:26:23 +01002111 break;
2112 case KBASE_IOCTL_MEM_FIND_CPU_OFFSET:
Sidath Senanayake6f5ab3b2017-05-18 14:43:17 +02002113 KBASE_HANDLE_IOCTL_INOUT(KBASE_IOCTL_MEM_FIND_CPU_OFFSET,
2114 kbase_api_mem_find_cpu_offset,
Sidath Senanayakeac90f0d2019-05-06 12:21:44 +02002115 union kbase_ioctl_mem_find_cpu_offset,
2116 kctx);
Sidath Senanayake8946bcd2018-03-19 13:26:23 +01002117 break;
2118 case KBASE_IOCTL_MEM_FIND_GPU_START_AND_OFFSET:
Sidath Senanayakee42736e2018-01-22 13:55:38 +01002119 KBASE_HANDLE_IOCTL_INOUT(KBASE_IOCTL_MEM_FIND_GPU_START_AND_OFFSET,
2120 kbase_api_mem_find_gpu_start_and_offset,
Sidath Senanayakeac90f0d2019-05-06 12:21:44 +02002121 union kbase_ioctl_mem_find_gpu_start_and_offset,
2122 kctx);
Sidath Senanayake8946bcd2018-03-19 13:26:23 +01002123 break;
2124 case KBASE_IOCTL_GET_CONTEXT_ID:
Sidath Senanayake6f5ab3b2017-05-18 14:43:17 +02002125 KBASE_HANDLE_IOCTL_OUT(KBASE_IOCTL_GET_CONTEXT_ID,
2126 kbase_api_get_context_id,
Sidath Senanayakeac90f0d2019-05-06 12:21:44 +02002127 struct kbase_ioctl_get_context_id,
2128 kctx);
Sidath Senanayake8946bcd2018-03-19 13:26:23 +01002129 break;
2130 case KBASE_IOCTL_TLSTREAM_ACQUIRE:
Sidath Senanayake6f5ab3b2017-05-18 14:43:17 +02002131 KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_TLSTREAM_ACQUIRE,
2132 kbase_api_tlstream_acquire,
Sidath Senanayakeac90f0d2019-05-06 12:21:44 +02002133 struct kbase_ioctl_tlstream_acquire,
2134 kctx);
Sidath Senanayake8946bcd2018-03-19 13:26:23 +01002135 break;
2136 case KBASE_IOCTL_TLSTREAM_FLUSH:
Sidath Senanayake6f5ab3b2017-05-18 14:43:17 +02002137 KBASE_HANDLE_IOCTL(KBASE_IOCTL_TLSTREAM_FLUSH,
Sidath Senanayakeac90f0d2019-05-06 12:21:44 +02002138 kbase_api_tlstream_flush,
2139 kctx);
Sidath Senanayake8946bcd2018-03-19 13:26:23 +01002140 break;
2141 case KBASE_IOCTL_MEM_COMMIT:
Sidath Senanayake6f5ab3b2017-05-18 14:43:17 +02002142 KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_MEM_COMMIT,
2143 kbase_api_mem_commit,
Sidath Senanayakeac90f0d2019-05-06 12:21:44 +02002144 struct kbase_ioctl_mem_commit,
2145 kctx);
Sidath Senanayake8946bcd2018-03-19 13:26:23 +01002146 break;
2147 case KBASE_IOCTL_MEM_ALIAS:
Sidath Senanayake6f5ab3b2017-05-18 14:43:17 +02002148 KBASE_HANDLE_IOCTL_INOUT(KBASE_IOCTL_MEM_ALIAS,
2149 kbase_api_mem_alias,
Sidath Senanayakeac90f0d2019-05-06 12:21:44 +02002150 union kbase_ioctl_mem_alias,
2151 kctx);
Sidath Senanayake8946bcd2018-03-19 13:26:23 +01002152 break;
2153 case KBASE_IOCTL_MEM_IMPORT:
Sidath Senanayake6f5ab3b2017-05-18 14:43:17 +02002154 KBASE_HANDLE_IOCTL_INOUT(KBASE_IOCTL_MEM_IMPORT,
2155 kbase_api_mem_import,
Sidath Senanayakeac90f0d2019-05-06 12:21:44 +02002156 union kbase_ioctl_mem_import,
2157 kctx);
Sidath Senanayake8946bcd2018-03-19 13:26:23 +01002158 break;
2159 case KBASE_IOCTL_MEM_FLAGS_CHANGE:
Sidath Senanayake6f5ab3b2017-05-18 14:43:17 +02002160 KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_MEM_FLAGS_CHANGE,
2161 kbase_api_mem_flags_change,
Sidath Senanayakeac90f0d2019-05-06 12:21:44 +02002162 struct kbase_ioctl_mem_flags_change,
2163 kctx);
Sidath Senanayake8946bcd2018-03-19 13:26:23 +01002164 break;
2165 case KBASE_IOCTL_STREAM_CREATE:
Sidath Senanayake6f5ab3b2017-05-18 14:43:17 +02002166 KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_STREAM_CREATE,
2167 kbase_api_stream_create,
Sidath Senanayakeac90f0d2019-05-06 12:21:44 +02002168 struct kbase_ioctl_stream_create,
2169 kctx);
Sidath Senanayake8946bcd2018-03-19 13:26:23 +01002170 break;
2171 case KBASE_IOCTL_FENCE_VALIDATE:
Sidath Senanayake6f5ab3b2017-05-18 14:43:17 +02002172 KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_FENCE_VALIDATE,
2173 kbase_api_fence_validate,
Sidath Senanayakeac90f0d2019-05-06 12:21:44 +02002174 struct kbase_ioctl_fence_validate,
2175 kctx);
Sidath Senanayake8946bcd2018-03-19 13:26:23 +01002176 break;
Sidath Senanayake8946bcd2018-03-19 13:26:23 +01002177 case KBASE_IOCTL_MEM_PROFILE_ADD:
Sidath Senanayake6f5ab3b2017-05-18 14:43:17 +02002178 KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_MEM_PROFILE_ADD,
2179 kbase_api_mem_profile_add,
Sidath Senanayakeac90f0d2019-05-06 12:21:44 +02002180 struct kbase_ioctl_mem_profile_add,
2181 kctx);
Sidath Senanayake8946bcd2018-03-19 13:26:23 +01002182 break;
Sidath Senanayakeb64f5682020-04-14 14:55:25 +02002183
Sidath Senanayake72f24572020-10-27 11:38:49 +00002184#if !MALI_USE_CSF
Sidath Senanayake8946bcd2018-03-19 13:26:23 +01002185 case KBASE_IOCTL_SOFT_EVENT_UPDATE:
Sidath Senanayake6f5ab3b2017-05-18 14:43:17 +02002186 KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_SOFT_EVENT_UPDATE,
2187 kbase_api_soft_event_update,
Sidath Senanayakeac90f0d2019-05-06 12:21:44 +02002188 struct kbase_ioctl_soft_event_update,
2189 kctx);
Sidath Senanayake8946bcd2018-03-19 13:26:23 +01002190 break;
Sidath Senanayake72f24572020-10-27 11:38:49 +00002191#endif /* !MALI_USE_CSF */
Sidath Senanayakeb64f5682020-04-14 14:55:25 +02002192
Sidath Senanayake8946bcd2018-03-19 13:26:23 +01002193 case KBASE_IOCTL_STICKY_RESOURCE_MAP:
Sidath Senanayakee42736e2018-01-22 13:55:38 +01002194 KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_STICKY_RESOURCE_MAP,
2195 kbase_api_sticky_resource_map,
Sidath Senanayakeac90f0d2019-05-06 12:21:44 +02002196 struct kbase_ioctl_sticky_resource_map,
2197 kctx);
Sidath Senanayake8946bcd2018-03-19 13:26:23 +01002198 break;
2199 case KBASE_IOCTL_STICKY_RESOURCE_UNMAP:
Sidath Senanayakee42736e2018-01-22 13:55:38 +01002200 KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_STICKY_RESOURCE_UNMAP,
2201 kbase_api_sticky_resource_unmap,
Sidath Senanayakeac90f0d2019-05-06 12:21:44 +02002202 struct kbase_ioctl_sticky_resource_unmap,
2203 kctx);
Sidath Senanayake8946bcd2018-03-19 13:26:23 +01002204 break;
Sidath Senanayake6f5ab3b2017-05-18 14:43:17 +02002205
Sidath Senanayake8946bcd2018-03-19 13:26:23 +01002206 /* Instrumentation. */
Sidath Senanayake72f24572020-10-27 11:38:49 +00002207#if !MALI_USE_CSF
Sidath Senanayaked4ca6eb2020-09-11 16:44:12 +01002208 case KBASE_IOCTL_KINSTR_JM_FD:
2209 KBASE_HANDLE_IOCTL_INOUT(KBASE_IOCTL_KINSTR_JM_FD,
2210 kbase_api_kinstr_jm_fd,
2211 union kbase_kinstr_jm_fd,
2212 kctx);
2213 break;
Sidath Senanayake72f24572020-10-27 11:38:49 +00002214#endif
Sidath Senanayake8946bcd2018-03-19 13:26:23 +01002215 case KBASE_IOCTL_HWCNT_READER_SETUP:
2216 KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_HWCNT_READER_SETUP,
2217 kbase_api_hwcnt_reader_setup,
Sidath Senanayakeac90f0d2019-05-06 12:21:44 +02002218 struct kbase_ioctl_hwcnt_reader_setup,
2219 kctx);
Sidath Senanayake8946bcd2018-03-19 13:26:23 +01002220 break;
Sidath Senanayake86966062019-08-23 15:40:27 +02002221 case KBASE_IOCTL_GET_CPU_GPU_TIMEINFO:
2222 KBASE_HANDLE_IOCTL_INOUT(KBASE_IOCTL_GET_CPU_GPU_TIMEINFO,
2223 kbase_api_get_cpu_gpu_timeinfo,
2224 union kbase_ioctl_get_cpu_gpu_timeinfo,
2225 kctx);
2226 break;
Siddharth Kapoor0207d6c2022-01-07 19:09:01 +08002227#if IS_ENABLED(CONFIG_MALI_NO_MALI)
2228 case KBASE_IOCTL_HWCNT_SET:
2229 KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_HWCNT_SET,
2230 kbase_api_hwcnt_set,
2231 struct kbase_ioctl_hwcnt_values,
2232 kctx);
2233 break;
2234#endif /* CONFIG_MALI_NO_MALI */
Sidath Senanayakef10b3de2018-09-27 14:34:14 +02002235#ifdef CONFIG_MALI_CINSTR_GWT
Sidath Senanayake8946bcd2018-03-19 13:26:23 +01002236 case KBASE_IOCTL_CINSTR_GWT_START:
2237 KBASE_HANDLE_IOCTL(KBASE_IOCTL_CINSTR_GWT_START,
Sidath Senanayakeac90f0d2019-05-06 12:21:44 +02002238 kbase_gpu_gwt_start,
2239 kctx);
Sidath Senanayake8946bcd2018-03-19 13:26:23 +01002240 break;
2241 case KBASE_IOCTL_CINSTR_GWT_STOP:
2242 KBASE_HANDLE_IOCTL(KBASE_IOCTL_CINSTR_GWT_STOP,
Sidath Senanayakeac90f0d2019-05-06 12:21:44 +02002243 kbase_gpu_gwt_stop,
2244 kctx);
Sidath Senanayake8946bcd2018-03-19 13:26:23 +01002245 break;
2246 case KBASE_IOCTL_CINSTR_GWT_DUMP:
2247 KBASE_HANDLE_IOCTL_INOUT(KBASE_IOCTL_CINSTR_GWT_DUMP,
2248 kbase_gpu_gwt_dump,
Sidath Senanayakeac90f0d2019-05-06 12:21:44 +02002249 union kbase_ioctl_cinstr_gwt_dump,
2250 kctx);
Sidath Senanayake8946bcd2018-03-19 13:26:23 +01002251 break;
2252#endif
Sidath Senanayake72f24572020-10-27 11:38:49 +00002253#if MALI_USE_CSF
2254 case KBASE_IOCTL_CS_EVENT_SIGNAL:
2255 KBASE_HANDLE_IOCTL(KBASE_IOCTL_CS_EVENT_SIGNAL,
2256 kbasep_cs_event_signal,
2257 kctx);
2258 break;
2259 case KBASE_IOCTL_CS_QUEUE_REGISTER:
2260 KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_CS_QUEUE_REGISTER,
2261 kbasep_cs_queue_register,
2262 struct kbase_ioctl_cs_queue_register,
2263 kctx);
2264 break;
Sidath Senanayake2bfaaa52021-06-17 17:58:22 +01002265 case KBASE_IOCTL_CS_QUEUE_REGISTER_EX:
2266 KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_CS_QUEUE_REGISTER_EX,
2267 kbasep_cs_queue_register_ex,
2268 struct kbase_ioctl_cs_queue_register_ex,
2269 kctx);
2270 break;
Sidath Senanayake72f24572020-10-27 11:38:49 +00002271 case KBASE_IOCTL_CS_QUEUE_TERMINATE:
2272 KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_CS_QUEUE_TERMINATE,
2273 kbasep_cs_queue_terminate,
2274 struct kbase_ioctl_cs_queue_terminate,
2275 kctx);
2276 break;
2277 case KBASE_IOCTL_CS_QUEUE_BIND:
2278 KBASE_HANDLE_IOCTL_INOUT(KBASE_IOCTL_CS_QUEUE_BIND,
2279 kbasep_cs_queue_bind,
2280 union kbase_ioctl_cs_queue_bind,
2281 kctx);
2282 break;
2283 case KBASE_IOCTL_CS_QUEUE_KICK:
2284 KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_CS_QUEUE_KICK,
2285 kbasep_cs_queue_kick,
2286 struct kbase_ioctl_cs_queue_kick,
2287 kctx);
2288 break;
Siddharth Kapoor0207d6c2022-01-07 19:09:01 +08002289 case KBASE_IOCTL_CS_QUEUE_GROUP_CREATE_1_6:
2290 KBASE_HANDLE_IOCTL_INOUT(
2291 KBASE_IOCTL_CS_QUEUE_GROUP_CREATE_1_6,
2292 kbasep_cs_queue_group_create_1_6,
2293 union kbase_ioctl_cs_queue_group_create_1_6, kctx);
2294 break;
Jörg Wagnerdacf0042023-08-01 13:38:22 +00002295 case KBASE_IOCTL_CS_QUEUE_GROUP_CREATE_1_18:
2296 KBASE_HANDLE_IOCTL_INOUT(KBASE_IOCTL_CS_QUEUE_GROUP_CREATE_1_18,
2297 kbasep_cs_queue_group_create_1_18,
2298 union kbase_ioctl_cs_queue_group_create_1_18, kctx);
2299 break;
Sidath Senanayake72f24572020-10-27 11:38:49 +00002300 case KBASE_IOCTL_CS_QUEUE_GROUP_CREATE:
2301 KBASE_HANDLE_IOCTL_INOUT(KBASE_IOCTL_CS_QUEUE_GROUP_CREATE,
2302 kbasep_cs_queue_group_create,
2303 union kbase_ioctl_cs_queue_group_create,
2304 kctx);
2305 break;
2306 case KBASE_IOCTL_CS_QUEUE_GROUP_TERMINATE:
2307 KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_CS_QUEUE_GROUP_TERMINATE,
2308 kbasep_cs_queue_group_terminate,
2309 struct kbase_ioctl_cs_queue_group_term,
2310 kctx);
2311 break;
2312 case KBASE_IOCTL_KCPU_QUEUE_CREATE:
2313 KBASE_HANDLE_IOCTL_OUT(KBASE_IOCTL_KCPU_QUEUE_CREATE,
2314 kbasep_kcpu_queue_new,
2315 struct kbase_ioctl_kcpu_queue_new,
2316 kctx);
2317 break;
2318 case KBASE_IOCTL_KCPU_QUEUE_DELETE:
2319 KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_KCPU_QUEUE_DELETE,
2320 kbasep_kcpu_queue_delete,
2321 struct kbase_ioctl_kcpu_queue_delete,
2322 kctx);
2323 break;
2324 case KBASE_IOCTL_KCPU_QUEUE_ENQUEUE:
2325 KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_KCPU_QUEUE_ENQUEUE,
2326 kbasep_kcpu_queue_enqueue,
2327 struct kbase_ioctl_kcpu_queue_enqueue,
2328 kctx);
2329 break;
2330 case KBASE_IOCTL_CS_TILER_HEAP_INIT:
2331 KBASE_HANDLE_IOCTL_INOUT(KBASE_IOCTL_CS_TILER_HEAP_INIT,
2332 kbasep_cs_tiler_heap_init,
2333 union kbase_ioctl_cs_tiler_heap_init,
2334 kctx);
2335 break;
Jack Divere19249e2022-11-07 12:13:47 +00002336 case KBASE_IOCTL_CS_TILER_HEAP_INIT_1_13:
2337 KBASE_HANDLE_IOCTL_INOUT(KBASE_IOCTL_CS_TILER_HEAP_INIT_1_13,
2338 kbasep_cs_tiler_heap_init_1_13,
2339 union kbase_ioctl_cs_tiler_heap_init_1_13, kctx);
2340 break;
Sidath Senanayake72f24572020-10-27 11:38:49 +00002341 case KBASE_IOCTL_CS_TILER_HEAP_TERM:
2342 KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_CS_TILER_HEAP_TERM,
2343 kbasep_cs_tiler_heap_term,
2344 struct kbase_ioctl_cs_tiler_heap_term,
2345 kctx);
2346 break;
2347 case KBASE_IOCTL_CS_GET_GLB_IFACE:
2348 KBASE_HANDLE_IOCTL_INOUT(KBASE_IOCTL_CS_GET_GLB_IFACE,
2349 kbase_ioctl_cs_get_glb_iface,
2350 union kbase_ioctl_cs_get_glb_iface,
2351 kctx);
2352 break;
Sidath Senanayake201c8bf2021-01-29 14:51:21 +00002353 case KBASE_IOCTL_CS_CPU_QUEUE_DUMP:
2354 KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_CS_CPU_QUEUE_DUMP,
2355 kbasep_ioctl_cs_cpu_queue_dump,
2356 struct kbase_ioctl_cs_cpu_queue_info,
2357 kctx);
2358 break;
Debarshi Dutta20fff722023-06-02 13:36:22 +00002359 /* This IOCTL will be kept for backward compatibility */
Jack Divere19249e2022-11-07 12:13:47 +00002360 case KBASE_IOCTL_READ_USER_PAGE:
2361 KBASE_HANDLE_IOCTL_INOUT(KBASE_IOCTL_READ_USER_PAGE, kbase_ioctl_read_user_page,
2362 union kbase_ioctl_read_user_page, kctx);
2363 break;
Sidath Senanayake72f24572020-10-27 11:38:49 +00002364#endif /* MALI_USE_CSF */
Sidath Senanayake6f5ab3b2017-05-18 14:43:17 +02002365#if MALI_UNIT_TEST
Sidath Senanayake8946bcd2018-03-19 13:26:23 +01002366 case KBASE_IOCTL_TLSTREAM_STATS:
Sidath Senanayake6f5ab3b2017-05-18 14:43:17 +02002367 KBASE_HANDLE_IOCTL_OUT(KBASE_IOCTL_TLSTREAM_STATS,
2368 kbase_api_tlstream_stats,
Sidath Senanayakeac90f0d2019-05-06 12:21:44 +02002369 struct kbase_ioctl_tlstream_stats,
2370 kctx);
Sidath Senanayake8946bcd2018-03-19 13:26:23 +01002371 break;
Sidath Senanayakeb64f5682020-04-14 14:55:25 +02002372#endif /* MALI_UNIT_TEST */
Sidath Senanayake97483052021-01-29 15:03:53 +00002373 case KBASE_IOCTL_CONTEXT_PRIORITY_CHECK:
2374 KBASE_HANDLE_IOCTL_INOUT(KBASE_IOCTL_CONTEXT_PRIORITY_CHECK,
2375 kbasep_ioctl_context_priority_check,
2376 struct kbase_ioctl_context_priority_check,
2377 kctx);
2378 break;
Sidath Senanayakefca86132021-06-15 13:39:30 +01002379 case KBASE_IOCTL_SET_LIMITED_CORE_COUNT:
2380 KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_SET_LIMITED_CORE_COUNT,
2381 kbasep_ioctl_set_limited_core_count,
2382 struct kbase_ioctl_set_limited_core_count,
2383 kctx);
2384 break;
Jack Diverbbff2062023-01-06 15:25:09 +00002385 case KBASE_IOCTL_BUFFER_LIVENESS_UPDATE:
2386 KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_BUFFER_LIVENESS_UPDATE,
2387 kbase_api_buffer_liveness_update,
2388 struct kbase_ioctl_buffer_liveness_update,
2389 kctx);
2390 break;
Sidath Senanayake6f5ab3b2017-05-18 14:43:17 +02002391 }
2392
2393 dev_warn(kbdev->dev, "Unknown ioctl 0x%x nr:%d", cmd, _IOC_NR(cmd));
2394
2395 return -ENOIOCTLCMD;
2396}
2397
Jörg Wagnerdacf0042023-08-01 13:38:22 +00002398static long kbase_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
2399{
2400 struct kbase_file *const kfile = filp->private_data;
2401 long ioctl_ret;
2402
2403 if (unlikely(!kbase_file_inc_fops_count_if_allowed(kfile)))
2404 return -EPERM;
2405
2406 ioctl_ret = kbase_kfile_ioctl(kfile, cmd, arg);
2407 kbase_file_dec_fops_count(kfile);
2408
2409 return ioctl_ret;
2410}
2411
Sidath Senanayake72f24572020-10-27 11:38:49 +00002412#if MALI_USE_CSF
2413static ssize_t kbase_read(struct file *filp, char __user *buf, size_t count, loff_t *f_pos)
2414{
2415 struct kbase_file *const kfile = filp->private_data;
Jörg Wagnerdacf0042023-08-01 13:38:22 +00002416 struct kbase_context *kctx;
Sidath Senanayake72f24572020-10-27 11:38:49 +00002417 struct base_csf_notification event_data = {
2418 .type = BASE_CSF_NOTIFICATION_EVENT };
2419 const size_t data_size = sizeof(event_data);
2420 bool read_event = false, read_error = false;
Jörg Wagnerdacf0042023-08-01 13:38:22 +00002421 ssize_t err = 0;
Sidath Senanayake72f24572020-10-27 11:38:49 +00002422
Jörg Wagnerdacf0042023-08-01 13:38:22 +00002423 if (unlikely(!kbase_file_inc_fops_count_if_allowed(kfile)))
Sidath Senanayake72f24572020-10-27 11:38:49 +00002424 return -EPERM;
2425
Jörg Wagnerdacf0042023-08-01 13:38:22 +00002426 kctx = kbase_file_get_kctx_if_setup_complete(kfile);
2427 if (unlikely(!kctx)) {
2428 err = -EPERM;
2429 goto out;
2430 }
2431
2432 if (count < data_size) {
2433 err = -ENOBUFS;
2434 goto out;
2435 }
Suzanne Candanedoc7b5fc82022-10-06 14:32:55 +01002436
Sidath Senanayake72f24572020-10-27 11:38:49 +00002437 if (atomic_read(&kctx->event_count))
2438 read_event = true;
2439 else
Siddharth Kapoor0207d6c2022-01-07 19:09:01 +08002440 read_error = kbase_csf_event_read_error(kctx, &event_data);
Sidath Senanayake72f24572020-10-27 11:38:49 +00002441
2442 if (!read_event && !read_error) {
Sidath Senanayake201c8bf2021-01-29 14:51:21 +00002443 bool dump = kbase_csf_cpu_queue_read_dump_req(kctx,
2444 &event_data);
Sidath Senanayake72f24572020-10-27 11:38:49 +00002445 /* This condition is not treated as an error.
2446 * It is possible that event handling thread was woken up due
2447 * to a fault/error that occurred for a queue group, but before
2448 * the corresponding fault data was read by the thread the
2449 * queue group was already terminated by the userspace.
2450 */
Sidath Senanayake201c8bf2021-01-29 14:51:21 +00002451 if (!dump)
2452 dev_dbg(kctx->kbdev->dev,
2453 "Neither event nor error signaled");
Sidath Senanayake72f24572020-10-27 11:38:49 +00002454 }
2455
2456 if (copy_to_user(buf, &event_data, data_size) != 0) {
2457 dev_warn(kctx->kbdev->dev,
2458 "Failed to copy data\n");
Jörg Wagnerdacf0042023-08-01 13:38:22 +00002459 err = -EFAULT;
2460 goto out;
Sidath Senanayake72f24572020-10-27 11:38:49 +00002461 }
2462
2463 if (read_event)
2464 atomic_set(&kctx->event_count, 0);
2465
Jörg Wagnerdacf0042023-08-01 13:38:22 +00002466out:
2467 kbase_file_dec_fops_count(kfile);
2468 return err ? err : data_size;
Sidath Senanayake72f24572020-10-27 11:38:49 +00002469}
2470#else /* MALI_USE_CSF */
Sidath Senanayake823a7602016-06-29 16:03:55 +02002471static ssize_t kbase_read(struct file *filp, char __user *buf, size_t count, loff_t *f_pos)
2472{
Sidath Senanayakeac90f0d2019-05-06 12:21:44 +02002473 struct kbase_file *const kfile = filp->private_data;
Jörg Wagnerdacf0042023-08-01 13:38:22 +00002474 struct kbase_context *kctx;
Sidath Senanayake823a7602016-06-29 16:03:55 +02002475 struct base_jd_event_v2 uevent;
2476 int out_count = 0;
Jörg Wagnerdacf0042023-08-01 13:38:22 +00002477 ssize_t err = 0;
Sidath Senanayake823a7602016-06-29 16:03:55 +02002478
Jörg Wagnerdacf0042023-08-01 13:38:22 +00002479 if (unlikely(!kbase_file_inc_fops_count_if_allowed(kfile)))
Sidath Senanayakeac90f0d2019-05-06 12:21:44 +02002480 return -EPERM;
2481
Jörg Wagnerdacf0042023-08-01 13:38:22 +00002482 kctx = kbase_file_get_kctx_if_setup_complete(kfile);
2483 if (unlikely(!kctx)) {
2484 err = -EPERM;
2485 goto out;
2486 }
2487
2488 if (count < sizeof(uevent)) {
2489 err = -ENOBUFS;
2490 goto out;
2491 }
Sidath Senanayake823a7602016-06-29 16:03:55 +02002492
Siddharth Kapoor88d7d982022-03-02 14:51:29 +08002493 memset(&uevent, 0, sizeof(uevent));
2494
Sidath Senanayake823a7602016-06-29 16:03:55 +02002495 do {
2496 while (kbase_event_dequeue(kctx, &uevent)) {
2497 if (out_count > 0)
2498 goto out;
2499
Jörg Wagnerdacf0042023-08-01 13:38:22 +00002500 if (filp->f_flags & O_NONBLOCK) {
2501 err = -EAGAIN;
2502 goto out;
2503 }
Sidath Senanayake823a7602016-06-29 16:03:55 +02002504
Varad Gautam66e3cfc2023-09-27 13:34:16 +00002505 if (wait_event_interruptible(kctx->kfile->event_queue,
Jörg Wagnerdacf0042023-08-01 13:38:22 +00002506 kbase_event_pending(kctx)) != 0) {
2507 err = -ERESTARTSYS;
2508 goto out;
2509 }
Sidath Senanayake823a7602016-06-29 16:03:55 +02002510 }
2511 if (uevent.event_code == BASE_JD_EVENT_DRV_TERMINATED) {
Jörg Wagnerdacf0042023-08-01 13:38:22 +00002512 if (out_count == 0) {
2513 err = -EPIPE;
2514 goto out;
2515 }
Sidath Senanayake823a7602016-06-29 16:03:55 +02002516 goto out;
2517 }
2518
Jörg Wagnerdacf0042023-08-01 13:38:22 +00002519 if (copy_to_user(buf, &uevent, sizeof(uevent)) != 0) {
2520 err = -EFAULT;
2521 goto out;
2522 }
Sidath Senanayake823a7602016-06-29 16:03:55 +02002523
2524 buf += sizeof(uevent);
2525 out_count++;
2526 count -= sizeof(uevent);
2527 } while (count >= sizeof(uevent));
2528
Jörg Wagnerdacf0042023-08-01 13:38:22 +00002529out:
2530 kbase_file_dec_fops_count(kfile);
2531 return err ? err : (out_count * sizeof(uevent));
Sidath Senanayake823a7602016-06-29 16:03:55 +02002532}
Sidath Senanayake72f24572020-10-27 11:38:49 +00002533#endif /* MALI_USE_CSF */
Sidath Senanayake823a7602016-06-29 16:03:55 +02002534
Jack Divere19249e2022-11-07 12:13:47 +00002535static __poll_t kbase_poll(struct file *filp, poll_table *wait)
Sidath Senanayake823a7602016-06-29 16:03:55 +02002536{
Sidath Senanayakeac90f0d2019-05-06 12:21:44 +02002537 struct kbase_file *const kfile = filp->private_data;
Jörg Wagnerdacf0042023-08-01 13:38:22 +00002538 struct kbase_context *kctx;
2539 __poll_t ret = 0;
Sidath Senanayakeac90f0d2019-05-06 12:21:44 +02002540
Jörg Wagnerdacf0042023-08-01 13:38:22 +00002541 if (unlikely(!kbase_file_inc_fops_count_if_allowed(kfile))) {
2542#if (KERNEL_VERSION(4, 19, 0) > LINUX_VERSION_CODE)
2543 ret = POLLNVAL;
2544#else
2545 ret = EPOLLNVAL;
2546#endif
2547 return ret;
2548 }
2549
2550 kctx = kbase_file_get_kctx_if_setup_complete(kfile);
Jack Divere19249e2022-11-07 12:13:47 +00002551 if (unlikely(!kctx)) {
2552#if (KERNEL_VERSION(4, 19, 0) > LINUX_VERSION_CODE)
Jörg Wagnerdacf0042023-08-01 13:38:22 +00002553 ret = POLLERR;
Jack Divere19249e2022-11-07 12:13:47 +00002554#else
Jörg Wagnerdacf0042023-08-01 13:38:22 +00002555 ret = EPOLLERR;
Jack Divere19249e2022-11-07 12:13:47 +00002556#endif
Jörg Wagnerdacf0042023-08-01 13:38:22 +00002557 goto out;
Jack Divere19249e2022-11-07 12:13:47 +00002558 }
Sidath Senanayake823a7602016-06-29 16:03:55 +02002559
Varad Gautam66e3cfc2023-09-27 13:34:16 +00002560 poll_wait(filp, &kfile->event_queue, wait);
Jack Divere19249e2022-11-07 12:13:47 +00002561 if (kbase_event_pending(kctx)) {
2562#if (KERNEL_VERSION(4, 19, 0) > LINUX_VERSION_CODE)
Jörg Wagnerdacf0042023-08-01 13:38:22 +00002563 ret = POLLIN | POLLRDNORM;
Jack Divere19249e2022-11-07 12:13:47 +00002564#else
Jörg Wagnerdacf0042023-08-01 13:38:22 +00002565 ret = EPOLLIN | EPOLLRDNORM;
Jack Divere19249e2022-11-07 12:13:47 +00002566#endif
2567 }
Sidath Senanayake823a7602016-06-29 16:03:55 +02002568
Jörg Wagnerdacf0042023-08-01 13:38:22 +00002569out:
2570 kbase_file_dec_fops_count(kfile);
2571 return ret;
Sidath Senanayake823a7602016-06-29 16:03:55 +02002572}
2573
Devika Krishnadas6a9213b2021-07-29 11:52:22 +08002574void _kbase_event_wakeup(struct kbase_context *kctx, bool sync)
Sidath Senanayake823a7602016-06-29 16:03:55 +02002575{
2576 KBASE_DEBUG_ASSERT(kctx);
Devika Krishnadas6a9213b2021-07-29 11:52:22 +08002577 if(sync) {
2578 dev_dbg(kctx->kbdev->dev,
2579 "Waking event queue for context %pK (sync)\n", (void *)kctx);
Varad Gautam66e3cfc2023-09-27 13:34:16 +00002580 wake_up_interruptible_sync(&kctx->kfile->event_queue);
Devika Krishnadas6a9213b2021-07-29 11:52:22 +08002581 }
2582 else {
2583 dev_dbg(kctx->kbdev->dev,
2584 "Waking event queue for context %pK (nosync)\n",(void *)kctx);
Varad Gautam66e3cfc2023-09-27 13:34:16 +00002585 wake_up_interruptible(&kctx->kfile->event_queue);
Devika Krishnadas6a9213b2021-07-29 11:52:22 +08002586 }
Sidath Senanayake823a7602016-06-29 16:03:55 +02002587}
2588
Devika Krishnadas6a9213b2021-07-29 11:52:22 +08002589KBASE_EXPORT_TEST_API(_kbase_event_wakeup);
Sidath Senanayake823a7602016-06-29 16:03:55 +02002590
Sidath Senanayake72f24572020-10-27 11:38:49 +00002591#if MALI_USE_CSF
2592int kbase_event_pending(struct kbase_context *ctx)
2593{
Debarshi Dutta20fff722023-06-02 13:36:22 +00002594 KBASE_DEBUG_ASSERT(ctx);
2595
2596 if (unlikely(!ctx))
2597 return -EPERM;
Sidath Senanayake72f24572020-10-27 11:38:49 +00002598
2599 return (atomic_read(&ctx->event_count) != 0) ||
Siddharth Kapoor0207d6c2022-01-07 19:09:01 +08002600 kbase_csf_event_error_pending(ctx) ||
Sidath Senanayake201c8bf2021-01-29 14:51:21 +00002601 kbase_csf_cpu_queue_dump_needed(ctx);
Sidath Senanayake72f24572020-10-27 11:38:49 +00002602}
2603#else
Sidath Senanayake7ed9a0b2020-02-03 12:15:52 +01002604int kbase_event_pending(struct kbase_context *ctx)
2605{
2606 KBASE_DEBUG_ASSERT(ctx);
2607
Debarshi Dutta20fff722023-06-02 13:36:22 +00002608 if (unlikely(!ctx))
2609 return -EPERM;
2610
Sidath Senanayake7ed9a0b2020-02-03 12:15:52 +01002611 return (atomic_read(&ctx->event_count) != 0) ||
Sidath Senanayake72f24572020-10-27 11:38:49 +00002612 (atomic_read(&ctx->event_closed) != 0);
Sidath Senanayake7ed9a0b2020-02-03 12:15:52 +01002613}
Sidath Senanayake72f24572020-10-27 11:38:49 +00002614#endif
Sidath Senanayake7ed9a0b2020-02-03 12:15:52 +01002615
2616KBASE_EXPORT_TEST_API(kbase_event_pending);
2617
Sidath Senanayakeac90f0d2019-05-06 12:21:44 +02002618static int kbase_mmap(struct file *const filp, struct vm_area_struct *const vma)
2619{
2620 struct kbase_file *const kfile = filp->private_data;
Jörg Wagnerdacf0042023-08-01 13:38:22 +00002621 struct kbase_context *kctx;
2622 int ret;
Sidath Senanayakeac90f0d2019-05-06 12:21:44 +02002623
Jörg Wagnerdacf0042023-08-01 13:38:22 +00002624 if (unlikely(!kbase_file_inc_fops_count_if_allowed(kfile)))
Sidath Senanayakeac90f0d2019-05-06 12:21:44 +02002625 return -EPERM;
2626
Jörg Wagnerdacf0042023-08-01 13:38:22 +00002627 kctx = kbase_file_get_kctx_if_setup_complete(kfile);
2628 if (likely(kctx))
2629 ret = kbase_context_mmap(kctx, vma);
2630 else
2631 ret = -EPERM;
2632
2633 kbase_file_dec_fops_count(kfile);
2634 return ret;
Sidath Senanayakeac90f0d2019-05-06 12:21:44 +02002635}
2636
Sidath Senanayake823a7602016-06-29 16:03:55 +02002637static int kbase_check_flags(int flags)
2638{
2639 /* Enforce that the driver keeps the O_CLOEXEC flag so that execve() always
2640 * closes the file descriptor in a child process.
2641 */
2642 if (0 == (flags & O_CLOEXEC))
2643 return -EINVAL;
2644
2645 return 0;
2646}
2647
Sidath Senanayakeac90f0d2019-05-06 12:21:44 +02002648static unsigned long kbase_get_unmapped_area(struct file *const filp,
2649 const unsigned long addr, const unsigned long len,
2650 const unsigned long pgoff, const unsigned long flags)
2651{
2652 struct kbase_file *const kfile = filp->private_data;
Jörg Wagnerdacf0042023-08-01 13:38:22 +00002653 struct kbase_context *kctx;
2654 unsigned long address;
Sidath Senanayakeac90f0d2019-05-06 12:21:44 +02002655
Jörg Wagnerdacf0042023-08-01 13:38:22 +00002656 if (unlikely(!kbase_file_inc_fops_count_if_allowed(kfile)))
Sidath Senanayakeac90f0d2019-05-06 12:21:44 +02002657 return -EPERM;
2658
Jörg Wagnerdacf0042023-08-01 13:38:22 +00002659 kctx = kbase_file_get_kctx_if_setup_complete(kfile);
2660 if (likely(kctx))
2661 address = kbase_context_get_unmapped_area(kctx, addr, len, pgoff, flags);
2662 else
2663 address = -EPERM;
2664
2665 kbase_file_dec_fops_count(kfile);
2666 return address;
Sidath Senanayakeac90f0d2019-05-06 12:21:44 +02002667}
2668
Sidath Senanayake823a7602016-06-29 16:03:55 +02002669static const struct file_operations kbase_fops = {
2670 .owner = THIS_MODULE,
2671 .open = kbase_open,
Jörg Wagnerdacf0042023-08-01 13:38:22 +00002672 .flush = kbase_flush,
Sidath Senanayake823a7602016-06-29 16:03:55 +02002673 .release = kbase_release,
2674 .read = kbase_read,
2675 .poll = kbase_poll,
2676 .unlocked_ioctl = kbase_ioctl,
2677 .compat_ioctl = kbase_ioctl,
2678 .mmap = kbase_mmap,
2679 .check_flags = kbase_check_flags,
Sidath Senanayake823a7602016-06-29 16:03:55 +02002680 .get_unmapped_area = kbase_get_unmapped_area,
Sidath Senanayake823a7602016-06-29 16:03:55 +02002681};
2682
Sidath Senanayake44e8be92017-01-24 10:48:35 +01002683/**
Siddharth Kapoor88d7d982022-03-02 14:51:29 +08002684 * power_policy_show - Show callback for the power_policy sysfs file.
Sidath Senanayake823a7602016-06-29 16:03:55 +02002685 *
Sidath Senanayake44e8be92017-01-24 10:48:35 +01002686 * @dev: The device this sysfs file is for
2687 * @attr: The attributes of the sysfs file
2688 * @buf: The output buffer for the sysfs file contents
Sidath Senanayake823a7602016-06-29 16:03:55 +02002689 *
Siddharth Kapoor88d7d982022-03-02 14:51:29 +08002690 * This function is called to get the contents of the power_policy sysfs
2691 * file. This is a list of the available policies with the currently active one
2692 * surrounded by square brackets.
2693 *
Sidath Senanayake44e8be92017-01-24 10:48:35 +01002694 * Return: The number of bytes output to @buf.
Sidath Senanayake823a7602016-06-29 16:03:55 +02002695 */
Siddharth Kapoor88d7d982022-03-02 14:51:29 +08002696static ssize_t power_policy_show(struct device *dev, struct device_attribute *attr, char *const buf)
Sidath Senanayake823a7602016-06-29 16:03:55 +02002697{
2698 struct kbase_device *kbdev;
2699 const struct kbase_pm_policy *current_policy;
2700 const struct kbase_pm_policy *const *policy_list;
2701 int policy_count;
2702 int i;
2703 ssize_t ret = 0;
2704
2705 kbdev = to_kbase_device(dev);
2706
2707 if (!kbdev)
2708 return -ENODEV;
2709
2710 current_policy = kbase_pm_get_policy(kbdev);
2711
Sidath Senanayakeac90f0d2019-05-06 12:21:44 +02002712 policy_count = kbase_pm_list_policies(kbdev, &policy_list);
Sidath Senanayake823a7602016-06-29 16:03:55 +02002713
2714 for (i = 0; i < policy_count && ret < PAGE_SIZE; i++) {
2715 if (policy_list[i] == current_policy)
2716 ret += scnprintf(buf + ret, PAGE_SIZE - ret, "[%s] ", policy_list[i]->name);
2717 else
2718 ret += scnprintf(buf + ret, PAGE_SIZE - ret, "%s ", policy_list[i]->name);
2719 }
2720
2721 if (ret < PAGE_SIZE - 1) {
2722 ret += scnprintf(buf + ret, PAGE_SIZE - ret, "\n");
2723 } else {
2724 buf[PAGE_SIZE - 2] = '\n';
2725 buf[PAGE_SIZE - 1] = '\0';
2726 ret = PAGE_SIZE - 1;
2727 }
2728
2729 return ret;
2730}
2731
Sidath Senanayake44e8be92017-01-24 10:48:35 +01002732/**
Siddharth Kapoor88d7d982022-03-02 14:51:29 +08002733 * power_policy_store - Store callback for the power_policy sysfs file.
Sidath Senanayake823a7602016-06-29 16:03:55 +02002734 *
Sidath Senanayake44e8be92017-01-24 10:48:35 +01002735 * @dev: The device with sysfs file is for
2736 * @attr: The attributes of the sysfs file
2737 * @buf: The value written to the sysfs file
Sidath Senanayake72f24572020-10-27 11:38:49 +00002738 * @count: The number of bytes to write to the sysfs file
Sidath Senanayake823a7602016-06-29 16:03:55 +02002739 *
Siddharth Kapoor88d7d982022-03-02 14:51:29 +08002740 * This function is called when the power_policy sysfs file is written to.
2741 * It matches the requested policy against the available policies and if a
2742 * matching policy is found calls kbase_pm_set_policy() to change the
2743 * policy.
2744 *
Sidath Senanayake44e8be92017-01-24 10:48:35 +01002745 * Return: @count if the function succeeded. An error code on failure.
Sidath Senanayake823a7602016-06-29 16:03:55 +02002746 */
Siddharth Kapoor88d7d982022-03-02 14:51:29 +08002747static ssize_t power_policy_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
Sidath Senanayake823a7602016-06-29 16:03:55 +02002748{
2749 struct kbase_device *kbdev;
2750 const struct kbase_pm_policy *new_policy = NULL;
2751 const struct kbase_pm_policy *const *policy_list;
2752 int policy_count;
2753 int i;
2754
2755 kbdev = to_kbase_device(dev);
2756
2757 if (!kbdev)
2758 return -ENODEV;
2759
Sidath Senanayakeac90f0d2019-05-06 12:21:44 +02002760 policy_count = kbase_pm_list_policies(kbdev, &policy_list);
Sidath Senanayake823a7602016-06-29 16:03:55 +02002761
2762 for (i = 0; i < policy_count; i++) {
2763 if (sysfs_streq(policy_list[i]->name, buf)) {
2764 new_policy = policy_list[i];
2765 break;
2766 }
2767 }
2768
2769 if (!new_policy) {
2770 dev_err(dev, "power_policy: policy not found\n");
2771 return -EINVAL;
2772 }
2773
2774 kbase_pm_set_policy(kbdev, new_policy);
2775
2776 return count;
2777}
2778
Sidath Senanayake44e8be92017-01-24 10:48:35 +01002779/*
2780 * The sysfs file power_policy.
Sidath Senanayake823a7602016-06-29 16:03:55 +02002781 *
2782 * This is used for obtaining information about the available policies,
2783 * determining which policy is currently active, and changing the active
2784 * policy.
2785 */
Siddharth Kapoor88d7d982022-03-02 14:51:29 +08002786static DEVICE_ATTR_RW(power_policy);
Sidath Senanayake823a7602016-06-29 16:03:55 +02002787
Sidath Senanayake44e8be92017-01-24 10:48:35 +01002788/*
Siddharth Kapoor88d7d982022-03-02 14:51:29 +08002789 * core_mask_show - Show callback for the core_mask sysfs file.
Sidath Senanayake823a7602016-06-29 16:03:55 +02002790 *
Sidath Senanayake44e8be92017-01-24 10:48:35 +01002791 * @dev: The device this sysfs file is for
2792 * @attr: The attributes of the sysfs file
2793 * @buf: The output buffer for the sysfs file contents
Sidath Senanayake823a7602016-06-29 16:03:55 +02002794 *
Siddharth Kapoor88d7d982022-03-02 14:51:29 +08002795 * This function is called to get the contents of the core_mask sysfs file.
2796 *
Sidath Senanayake44e8be92017-01-24 10:48:35 +01002797 * Return: The number of bytes output to @buf.
Sidath Senanayake823a7602016-06-29 16:03:55 +02002798 */
Siddharth Kapoor88d7d982022-03-02 14:51:29 +08002799static ssize_t core_mask_show(struct device *dev, struct device_attribute *attr, char * const buf)
Sidath Senanayake823a7602016-06-29 16:03:55 +02002800{
2801 struct kbase_device *kbdev;
Sidath Senanayake201c8bf2021-01-29 14:51:21 +00002802 unsigned long flags;
Sidath Senanayake823a7602016-06-29 16:03:55 +02002803 ssize_t ret = 0;
2804
2805 kbdev = to_kbase_device(dev);
2806
2807 if (!kbdev)
2808 return -ENODEV;
2809
Sidath Senanayake201c8bf2021-01-29 14:51:21 +00002810 spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
2811
2812#if MALI_USE_CSF
2813 ret += scnprintf(buf + ret, PAGE_SIZE - ret,
2814 "Current debug core mask : 0x%llX\n",
2815 kbdev->pm.debug_core_mask);
2816 ret += scnprintf(buf + ret, PAGE_SIZE - ret,
2817 "Current desired core mask : 0x%llX\n",
2818 kbase_pm_ca_get_core_mask(kbdev));
2819 ret += scnprintf(buf + ret, PAGE_SIZE - ret,
2820 "Current in use core mask : 0x%llX\n",
2821 kbdev->pm.backend.shaders_avail);
2822#else
Sidath Senanayake823a7602016-06-29 16:03:55 +02002823 ret += scnprintf(buf + ret, PAGE_SIZE - ret,
2824 "Current core mask (JS0) : 0x%llX\n",
2825 kbdev->pm.debug_core_mask[0]);
2826 ret += scnprintf(buf + ret, PAGE_SIZE - ret,
2827 "Current core mask (JS1) : 0x%llX\n",
2828 kbdev->pm.debug_core_mask[1]);
2829 ret += scnprintf(buf + ret, PAGE_SIZE - ret,
2830 "Current core mask (JS2) : 0x%llX\n",
2831 kbdev->pm.debug_core_mask[2]);
Sidath Senanayake201c8bf2021-01-29 14:51:21 +00002832#endif /* MALI_USE_CSF */
2833
Sidath Senanayake823a7602016-06-29 16:03:55 +02002834 ret += scnprintf(buf + ret, PAGE_SIZE - ret,
2835 "Available core mask : 0x%llX\n",
2836 kbdev->gpu_props.props.raw_props.shader_present);
2837
Sidath Senanayake201c8bf2021-01-29 14:51:21 +00002838 spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
2839
Sidath Senanayake823a7602016-06-29 16:03:55 +02002840 return ret;
2841}
2842
Sidath Senanayake44e8be92017-01-24 10:48:35 +01002843/**
Siddharth Kapoor88d7d982022-03-02 14:51:29 +08002844 * core_mask_store - Store callback for the core_mask sysfs file.
Sidath Senanayake823a7602016-06-29 16:03:55 +02002845 *
Sidath Senanayake44e8be92017-01-24 10:48:35 +01002846 * @dev: The device with sysfs file is for
2847 * @attr: The attributes of the sysfs file
2848 * @buf: The value written to the sysfs file
Sidath Senanayaked4ca6eb2020-09-11 16:44:12 +01002849 * @count: The number of bytes to write to the sysfs file
Sidath Senanayake823a7602016-06-29 16:03:55 +02002850 *
Siddharth Kapoor88d7d982022-03-02 14:51:29 +08002851 * This function is called when the core_mask sysfs file is written to.
2852 *
Sidath Senanayake44e8be92017-01-24 10:48:35 +01002853 * Return: @count if the function succeeded. An error code on failure.
Sidath Senanayake823a7602016-06-29 16:03:55 +02002854 */
Siddharth Kapoor88d7d982022-03-02 14:51:29 +08002855static ssize_t core_mask_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
Sidath Senanayake823a7602016-06-29 16:03:55 +02002856{
2857 struct kbase_device *kbdev;
Sidath Senanayake201c8bf2021-01-29 14:51:21 +00002858#if MALI_USE_CSF
2859 u64 new_core_mask;
2860#else
Sidath Senanayake823a7602016-06-29 16:03:55 +02002861 u64 new_core_mask[3];
Sidath Senanayake201c8bf2021-01-29 14:51:21 +00002862 u64 group0_core_mask;
2863 int i;
2864#endif /* MALI_USE_CSF */
2865
2866 int items;
Sidath Senanayakea9704312018-12-06 09:09:59 +01002867 ssize_t err = count;
2868 unsigned long flags;
Sidath Senanayake201c8bf2021-01-29 14:51:21 +00002869 u64 shader_present;
Sidath Senanayake823a7602016-06-29 16:03:55 +02002870
2871 kbdev = to_kbase_device(dev);
2872
2873 if (!kbdev)
2874 return -ENODEV;
2875
Sidath Senanayake201c8bf2021-01-29 14:51:21 +00002876#if MALI_USE_CSF
2877 items = sscanf(buf, "%llx", &new_core_mask);
2878
2879 if (items != 1) {
2880 dev_err(kbdev->dev,
2881 "Couldn't process core mask write operation.\n"
2882 "Use format <core_mask>\n");
2883 err = -EINVAL;
2884 goto end;
2885 }
2886#else
Sidath Senanayake823a7602016-06-29 16:03:55 +02002887 items = sscanf(buf, "%llx %llx %llx",
2888 &new_core_mask[0], &new_core_mask[1],
2889 &new_core_mask[2]);
2890
Sidath Senanayakea9704312018-12-06 09:09:59 +01002891 if (items != 1 && items != 3) {
2892 dev_err(kbdev->dev, "Couldn't process core mask write operation.\n"
2893 "Use format <core_mask>\n"
2894 "or <core_mask_js0> <core_mask_js1> <core_mask_js2>\n");
2895 err = -EINVAL;
2896 goto end;
2897 }
2898
Sidath Senanayake823a7602016-06-29 16:03:55 +02002899 if (items == 1)
2900 new_core_mask[1] = new_core_mask[2] = new_core_mask[0];
Sidath Senanayake201c8bf2021-01-29 14:51:21 +00002901#endif
Sidath Senanayake823a7602016-06-29 16:03:55 +02002902
Kevin DuBoisb98ccad2022-10-03 20:21:17 +00002903 rt_mutex_lock(&kbdev->pm.lock);
Sidath Senanayakea9704312018-12-06 09:09:59 +01002904 spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
Sidath Senanayake823a7602016-06-29 16:03:55 +02002905
Sidath Senanayakea9704312018-12-06 09:09:59 +01002906 shader_present = kbdev->gpu_props.props.raw_props.shader_present;
Sidath Senanayake201c8bf2021-01-29 14:51:21 +00002907
2908#if MALI_USE_CSF
2909 if ((new_core_mask & shader_present) != new_core_mask) {
2910 dev_err(dev,
2911 "Invalid core mask 0x%llX: Includes non-existent cores (present = 0x%llX)",
2912 new_core_mask, shader_present);
2913 err = -EINVAL;
2914 goto unlock;
2915
2916 } else if (!(new_core_mask & shader_present &
2917 kbdev->pm.backend.ca_cores_enabled)) {
2918 dev_err(dev,
2919 "Invalid core mask 0x%llX: No intersection with currently available cores (present = 0x%llX, CA enabled = 0x%llX\n",
2920 new_core_mask,
2921 kbdev->gpu_props.props.raw_props.shader_present,
2922 kbdev->pm.backend.ca_cores_enabled);
2923 err = -EINVAL;
2924 goto unlock;
2925 }
2926
2927 if (kbdev->pm.debug_core_mask != new_core_mask)
2928 kbase_pm_set_debug_core_mask(kbdev, new_core_mask);
2929#else
Sidath Senanayakea9704312018-12-06 09:09:59 +01002930 group0_core_mask = kbdev->gpu_props.props.coherency_info.group[0].core_mask;
2931
2932 for (i = 0; i < 3; ++i) {
2933 if ((new_core_mask[i] & shader_present) != new_core_mask[i]) {
2934 dev_err(dev, "Invalid core mask 0x%llX for JS %d: Includes non-existent cores (present = 0x%llX)",
2935 new_core_mask[i], i, shader_present);
2936 err = -EINVAL;
2937 goto unlock;
2938
2939 } else if (!(new_core_mask[i] & shader_present & kbdev->pm.backend.ca_cores_enabled)) {
2940 dev_err(dev, "Invalid core mask 0x%llX for JS %d: No intersection with currently available cores (present = 0x%llX, CA enabled = 0x%llX\n",
2941 new_core_mask[i], i,
2942 kbdev->gpu_props.props.raw_props.shader_present,
2943 kbdev->pm.backend.ca_cores_enabled);
2944 err = -EINVAL;
2945 goto unlock;
2946
2947 } else if (!(new_core_mask[i] & group0_core_mask)) {
2948 dev_err(dev, "Invalid core mask 0x%llX for JS %d: No intersection with group 0 core mask 0x%llX\n",
2949 new_core_mask[i], i, group0_core_mask);
2950 err = -EINVAL;
2951 goto unlock;
Sidath Senanayake2bfaaa52021-06-17 17:58:22 +01002952 } else if (!(new_core_mask[i] & kbdev->gpu_props.curr_config.shader_present)) {
2953 dev_err(dev, "Invalid core mask 0x%llX for JS %d: No intersection with current core mask 0x%llX\n",
2954 new_core_mask[i], i, kbdev->gpu_props.curr_config.shader_present);
2955 err = -EINVAL;
2956 goto unlock;
Sidath Senanayake823a7602016-06-29 16:03:55 +02002957 }
Sidath Senanayake823a7602016-06-29 16:03:55 +02002958 }
2959
Sidath Senanayakea9704312018-12-06 09:09:59 +01002960 if (kbdev->pm.debug_core_mask[0] != new_core_mask[0] ||
2961 kbdev->pm.debug_core_mask[1] !=
2962 new_core_mask[1] ||
2963 kbdev->pm.debug_core_mask[2] !=
2964 new_core_mask[2]) {
2965
2966 kbase_pm_set_debug_core_mask(kbdev, new_core_mask[0],
2967 new_core_mask[1], new_core_mask[2]);
2968 }
Sidath Senanayake201c8bf2021-01-29 14:51:21 +00002969#endif /* MALI_USE_CSF */
Sidath Senanayakea9704312018-12-06 09:09:59 +01002970
2971unlock:
2972 spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
Kevin DuBoisb98ccad2022-10-03 20:21:17 +00002973 rt_mutex_unlock(&kbdev->pm.lock);
Sidath Senanayakea9704312018-12-06 09:09:59 +01002974end:
2975 return err;
Sidath Senanayake823a7602016-06-29 16:03:55 +02002976}
2977
Sidath Senanayake44e8be92017-01-24 10:48:35 +01002978/*
2979 * The sysfs file core_mask.
Sidath Senanayake823a7602016-06-29 16:03:55 +02002980 *
2981 * This is used to restrict shader core availability for debugging purposes.
2982 * Reading it will show the current core mask and the mask of cores available.
2983 * Writing to it will set the current core mask.
2984 */
Siddharth Kapoor88d7d982022-03-02 14:51:29 +08002985static DEVICE_ATTR_RW(core_mask);
Sidath Senanayake823a7602016-06-29 16:03:55 +02002986
Sidath Senanayake72f24572020-10-27 11:38:49 +00002987#if !MALI_USE_CSF
Sidath Senanayake823a7602016-06-29 16:03:55 +02002988/**
Siddharth Kapoor88d7d982022-03-02 14:51:29 +08002989 * soft_job_timeout_store - Store callback for the soft_job_timeout sysfs
Sidath Senanayake823a7602016-06-29 16:03:55 +02002990 * file.
2991 *
2992 * @dev: The device this sysfs file is for.
2993 * @attr: The attributes of the sysfs file.
2994 * @buf: The value written to the sysfs file.
Sidath Senanayaked4ca6eb2020-09-11 16:44:12 +01002995 * @count: The number of bytes to write to the sysfs file.
Sidath Senanayake823a7602016-06-29 16:03:55 +02002996 *
Sidath Senanayake192bd792016-11-09 14:14:45 +01002997 * This allows setting the timeout for software jobs. Waiting soft event wait
2998 * jobs will be cancelled after this period expires, while soft fence wait jobs
2999 * will print debug information if the fence debug feature is enabled.
3000 *
3001 * This is expressed in milliseconds.
Sidath Senanayake823a7602016-06-29 16:03:55 +02003002 *
3003 * Return: count if the function succeeded. An error code on failure.
3004 */
Siddharth Kapoor88d7d982022-03-02 14:51:29 +08003005static ssize_t soft_job_timeout_store(struct device *dev,
Sidath Senanayake823a7602016-06-29 16:03:55 +02003006 struct device_attribute *attr,
3007 const char *buf, size_t count)
3008{
3009 struct kbase_device *kbdev;
Sidath Senanayake192bd792016-11-09 14:14:45 +01003010 int soft_job_timeout_ms;
Sidath Senanayake823a7602016-06-29 16:03:55 +02003011
3012 kbdev = to_kbase_device(dev);
3013 if (!kbdev)
3014 return -ENODEV;
3015
Sidath Senanayake192bd792016-11-09 14:14:45 +01003016 if ((kstrtoint(buf, 0, &soft_job_timeout_ms) != 0) ||
3017 (soft_job_timeout_ms <= 0))
Sidath Senanayake823a7602016-06-29 16:03:55 +02003018 return -EINVAL;
3019
Sidath Senanayake192bd792016-11-09 14:14:45 +01003020 atomic_set(&kbdev->js_data.soft_job_timeout_ms,
3021 soft_job_timeout_ms);
Sidath Senanayake823a7602016-06-29 16:03:55 +02003022
3023 return count;
3024}
3025
3026/**
Siddharth Kapoor88d7d982022-03-02 14:51:29 +08003027 * soft_job_timeout_show - Show callback for the soft_job_timeout sysfs
Sidath Senanayake823a7602016-06-29 16:03:55 +02003028 * file.
3029 *
Sidath Senanayake823a7602016-06-29 16:03:55 +02003030 * @dev: The device this sysfs file is for.
3031 * @attr: The attributes of the sysfs file.
3032 * @buf: The output buffer for the sysfs file contents.
3033 *
Siddharth Kapoor88d7d982022-03-02 14:51:29 +08003034 * This will return the timeout for the software jobs.
3035 *
Sidath Senanayake823a7602016-06-29 16:03:55 +02003036 * Return: The number of bytes output to buf.
3037 */
Siddharth Kapoor88d7d982022-03-02 14:51:29 +08003038static ssize_t soft_job_timeout_show(struct device *dev,
Sidath Senanayake823a7602016-06-29 16:03:55 +02003039 struct device_attribute *attr,
3040 char * const buf)
3041{
3042 struct kbase_device *kbdev;
3043
3044 kbdev = to_kbase_device(dev);
3045 if (!kbdev)
3046 return -ENODEV;
3047
3048 return scnprintf(buf, PAGE_SIZE, "%i\n",
Sidath Senanayake192bd792016-11-09 14:14:45 +01003049 atomic_read(&kbdev->js_data.soft_job_timeout_ms));
Sidath Senanayake823a7602016-06-29 16:03:55 +02003050}
3051
Siddharth Kapoor88d7d982022-03-02 14:51:29 +08003052static DEVICE_ATTR_RW(soft_job_timeout);
Sidath Senanayake192bd792016-11-09 14:14:45 +01003053
3054static u32 timeout_ms_to_ticks(struct kbase_device *kbdev, long timeout_ms,
3055 int default_ticks, u32 old_ticks)
3056{
3057 if (timeout_ms > 0) {
3058 u64 ticks = timeout_ms * 1000000ULL;
Siddharth Kapoor88d7d982022-03-02 14:51:29 +08003059
Sidath Senanayake192bd792016-11-09 14:14:45 +01003060 do_div(ticks, kbdev->js_data.scheduling_period_ns);
3061 if (!ticks)
3062 return 1;
3063 return ticks;
3064 } else if (timeout_ms < 0) {
3065 return default_ticks;
3066 } else {
3067 return old_ticks;
3068 }
3069}
Sidath Senanayake823a7602016-06-29 16:03:55 +02003070
Sidath Senanayake44e8be92017-01-24 10:48:35 +01003071/**
Siddharth Kapoor88d7d982022-03-02 14:51:29 +08003072 * js_timeouts_store - Store callback for the js_timeouts sysfs file.
3073 *
3074 * @dev: The device with sysfs file is for
3075 * @attr: The attributes of the sysfs file
3076 * @buf: The value written to the sysfs file
3077 * @count: The number of bytes to write to the sysfs file
Sidath Senanayake823a7602016-06-29 16:03:55 +02003078 *
Sidath Senanayake44e8be92017-01-24 10:48:35 +01003079 * This function is called to get the contents of the js_timeouts sysfs
Sidath Senanayake823a7602016-06-29 16:03:55 +02003080 * file. This file contains five values separated by whitespace. The values
Sidath Senanayake44e8be92017-01-24 10:48:35 +01003081 * are basically the same as %JS_SOFT_STOP_TICKS, %JS_HARD_STOP_TICKS_SS,
3082 * %JS_HARD_STOP_TICKS_DUMPING, %JS_RESET_TICKS_SS, %JS_RESET_TICKS_DUMPING
Sidath Senanayake823a7602016-06-29 16:03:55 +02003083 * configuration values (in that order), with the difference that the js_timeout
3084 * values are expressed in MILLISECONDS.
3085 *
3086 * The js_timeouts sysfile file allows the current values in
3087 * use by the job scheduler to get override. Note that a value needs to
3088 * be other than 0 for it to override the current job scheduler value.
3089 *
Sidath Senanayake44e8be92017-01-24 10:48:35 +01003090 * Return: @count if the function succeeded. An error code on failure.
Sidath Senanayake823a7602016-06-29 16:03:55 +02003091 */
Siddharth Kapoor88d7d982022-03-02 14:51:29 +08003092static ssize_t js_timeouts_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
Sidath Senanayake823a7602016-06-29 16:03:55 +02003093{
3094 struct kbase_device *kbdev;
3095 int items;
3096 long js_soft_stop_ms;
3097 long js_soft_stop_ms_cl;
3098 long js_hard_stop_ms_ss;
3099 long js_hard_stop_ms_cl;
3100 long js_hard_stop_ms_dumping;
3101 long js_reset_ms_ss;
3102 long js_reset_ms_cl;
3103 long js_reset_ms_dumping;
3104
3105 kbdev = to_kbase_device(dev);
3106 if (!kbdev)
3107 return -ENODEV;
3108
3109 items = sscanf(buf, "%ld %ld %ld %ld %ld %ld %ld %ld",
3110 &js_soft_stop_ms, &js_soft_stop_ms_cl,
3111 &js_hard_stop_ms_ss, &js_hard_stop_ms_cl,
3112 &js_hard_stop_ms_dumping, &js_reset_ms_ss,
3113 &js_reset_ms_cl, &js_reset_ms_dumping);
3114
3115 if (items == 8) {
Sidath Senanayake192bd792016-11-09 14:14:45 +01003116 struct kbasep_js_device_data *js_data = &kbdev->js_data;
3117 unsigned long flags;
Sidath Senanayake823a7602016-06-29 16:03:55 +02003118
Sidath Senanayake92327782016-11-09 14:53:08 +01003119 spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
Sidath Senanayake823a7602016-06-29 16:03:55 +02003120
Sidath Senanayake192bd792016-11-09 14:14:45 +01003121#define UPDATE_TIMEOUT(ticks_name, ms_name, default) do {\
3122 js_data->ticks_name = timeout_ms_to_ticks(kbdev, ms_name, \
3123 default, js_data->ticks_name); \
3124 dev_dbg(kbdev->dev, "Overriding " #ticks_name \
3125 " with %lu ticks (%lu ms)\n", \
3126 (unsigned long)js_data->ticks_name, \
3127 ms_name); \
3128 } while (0)
Sidath Senanayake823a7602016-06-29 16:03:55 +02003129
Sidath Senanayake192bd792016-11-09 14:14:45 +01003130 UPDATE_TIMEOUT(soft_stop_ticks, js_soft_stop_ms,
3131 DEFAULT_JS_SOFT_STOP_TICKS);
3132 UPDATE_TIMEOUT(soft_stop_ticks_cl, js_soft_stop_ms_cl,
3133 DEFAULT_JS_SOFT_STOP_TICKS_CL);
3134 UPDATE_TIMEOUT(hard_stop_ticks_ss, js_hard_stop_ms_ss,
Sidath Senanayake192bd792016-11-09 14:14:45 +01003135 DEFAULT_JS_HARD_STOP_TICKS_SS);
3136 UPDATE_TIMEOUT(hard_stop_ticks_cl, js_hard_stop_ms_cl,
3137 DEFAULT_JS_HARD_STOP_TICKS_CL);
3138 UPDATE_TIMEOUT(hard_stop_ticks_dumping,
3139 js_hard_stop_ms_dumping,
3140 DEFAULT_JS_HARD_STOP_TICKS_DUMPING);
3141 UPDATE_TIMEOUT(gpu_reset_ticks_ss, js_reset_ms_ss,
Sidath Senanayake192bd792016-11-09 14:14:45 +01003142 DEFAULT_JS_RESET_TICKS_SS);
3143 UPDATE_TIMEOUT(gpu_reset_ticks_cl, js_reset_ms_cl,
3144 DEFAULT_JS_RESET_TICKS_CL);
3145 UPDATE_TIMEOUT(gpu_reset_ticks_dumping, js_reset_ms_dumping,
3146 DEFAULT_JS_RESET_TICKS_DUMPING);
Sidath Senanayake823a7602016-06-29 16:03:55 +02003147
Sidath Senanayake192bd792016-11-09 14:14:45 +01003148 kbase_js_set_timeouts(kbdev);
Sidath Senanayake823a7602016-06-29 16:03:55 +02003149
Sidath Senanayake92327782016-11-09 14:53:08 +01003150 spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
Sidath Senanayake823a7602016-06-29 16:03:55 +02003151
3152 return count;
3153 }
3154
3155 dev_err(kbdev->dev, "Couldn't process js_timeouts write operation.\n"
3156 "Use format <soft_stop_ms> <soft_stop_ms_cl> <hard_stop_ms_ss> <hard_stop_ms_cl> <hard_stop_ms_dumping> <reset_ms_ss> <reset_ms_cl> <reset_ms_dumping>\n"
3157 "Write 0 for no change, -1 to restore default timeout\n");
3158 return -EINVAL;
3159}
3160
Sidath Senanayake192bd792016-11-09 14:14:45 +01003161static unsigned long get_js_timeout_in_ms(
3162 u32 scheduling_period_ns,
3163 u32 ticks)
3164{
3165 u64 ms = (u64)ticks * scheduling_period_ns;
3166
3167 do_div(ms, 1000000UL);
3168 return ms;
3169}
3170
Sidath Senanayake44e8be92017-01-24 10:48:35 +01003171/**
Siddharth Kapoor88d7d982022-03-02 14:51:29 +08003172 * js_timeouts_show - Show callback for the js_timeouts sysfs file.
3173 *
3174 * @dev: The device this sysfs file is for
3175 * @attr: The attributes of the sysfs file
3176 * @buf: The output buffer for the sysfs file contents
Sidath Senanayake823a7602016-06-29 16:03:55 +02003177 *
Sidath Senanayake44e8be92017-01-24 10:48:35 +01003178 * This function is called to get the contents of the js_timeouts sysfs
Sidath Senanayake823a7602016-06-29 16:03:55 +02003179 * file. It returns the last set values written to the js_timeouts sysfs file.
3180 * If the file didn't get written yet, the values will be current setting in
3181 * use.
Sidath Senanayake823a7602016-06-29 16:03:55 +02003182 *
Sidath Senanayake44e8be92017-01-24 10:48:35 +01003183 * Return: The number of bytes output to @buf.
Sidath Senanayake823a7602016-06-29 16:03:55 +02003184 */
Siddharth Kapoor88d7d982022-03-02 14:51:29 +08003185static ssize_t js_timeouts_show(struct device *dev, struct device_attribute *attr, char * const buf)
Sidath Senanayake823a7602016-06-29 16:03:55 +02003186{
3187 struct kbase_device *kbdev;
3188 ssize_t ret;
Sidath Senanayake823a7602016-06-29 16:03:55 +02003189 unsigned long js_soft_stop_ms;
3190 unsigned long js_soft_stop_ms_cl;
3191 unsigned long js_hard_stop_ms_ss;
3192 unsigned long js_hard_stop_ms_cl;
3193 unsigned long js_hard_stop_ms_dumping;
3194 unsigned long js_reset_ms_ss;
3195 unsigned long js_reset_ms_cl;
3196 unsigned long js_reset_ms_dumping;
Sidath Senanayake823a7602016-06-29 16:03:55 +02003197 u32 scheduling_period_ns;
3198
3199 kbdev = to_kbase_device(dev);
3200 if (!kbdev)
3201 return -ENODEV;
3202
Sidath Senanayake192bd792016-11-09 14:14:45 +01003203 scheduling_period_ns = kbdev->js_data.scheduling_period_ns;
Sidath Senanayake823a7602016-06-29 16:03:55 +02003204
Sidath Senanayake192bd792016-11-09 14:14:45 +01003205#define GET_TIMEOUT(name) get_js_timeout_in_ms(\
3206 scheduling_period_ns, \
3207 kbdev->js_data.name)
Sidath Senanayake823a7602016-06-29 16:03:55 +02003208
Sidath Senanayake192bd792016-11-09 14:14:45 +01003209 js_soft_stop_ms = GET_TIMEOUT(soft_stop_ticks);
3210 js_soft_stop_ms_cl = GET_TIMEOUT(soft_stop_ticks_cl);
3211 js_hard_stop_ms_ss = GET_TIMEOUT(hard_stop_ticks_ss);
3212 js_hard_stop_ms_cl = GET_TIMEOUT(hard_stop_ticks_cl);
3213 js_hard_stop_ms_dumping = GET_TIMEOUT(hard_stop_ticks_dumping);
3214 js_reset_ms_ss = GET_TIMEOUT(gpu_reset_ticks_ss);
3215 js_reset_ms_cl = GET_TIMEOUT(gpu_reset_ticks_cl);
3216 js_reset_ms_dumping = GET_TIMEOUT(gpu_reset_ticks_dumping);
Sidath Senanayake823a7602016-06-29 16:03:55 +02003217
Sidath Senanayake192bd792016-11-09 14:14:45 +01003218#undef GET_TIMEOUT
Sidath Senanayake823a7602016-06-29 16:03:55 +02003219
3220 ret = scnprintf(buf, PAGE_SIZE, "%lu %lu %lu %lu %lu %lu %lu %lu\n",
3221 js_soft_stop_ms, js_soft_stop_ms_cl,
3222 js_hard_stop_ms_ss, js_hard_stop_ms_cl,
3223 js_hard_stop_ms_dumping, js_reset_ms_ss,
3224 js_reset_ms_cl, js_reset_ms_dumping);
3225
3226 if (ret >= PAGE_SIZE) {
3227 buf[PAGE_SIZE - 2] = '\n';
3228 buf[PAGE_SIZE - 1] = '\0';
3229 ret = PAGE_SIZE - 1;
3230 }
3231
3232 return ret;
3233}
3234
Sidath Senanayake44e8be92017-01-24 10:48:35 +01003235/*
3236 * The sysfs file js_timeouts.
Sidath Senanayake823a7602016-06-29 16:03:55 +02003237 *
3238 * This is used to override the current job scheduler values for
3239 * JS_STOP_STOP_TICKS_SS
3240 * JS_STOP_STOP_TICKS_CL
3241 * JS_HARD_STOP_TICKS_SS
3242 * JS_HARD_STOP_TICKS_CL
3243 * JS_HARD_STOP_TICKS_DUMPING
3244 * JS_RESET_TICKS_SS
3245 * JS_RESET_TICKS_CL
3246 * JS_RESET_TICKS_DUMPING.
3247 */
Siddharth Kapoor88d7d982022-03-02 14:51:29 +08003248static DEVICE_ATTR_RW(js_timeouts);
Sidath Senanayake823a7602016-06-29 16:03:55 +02003249
Sidath Senanayake192bd792016-11-09 14:14:45 +01003250static u32 get_new_js_timeout(
3251 u32 old_period,
3252 u32 old_ticks,
3253 u32 new_scheduling_period_ns)
3254{
3255 u64 ticks = (u64)old_period * (u64)old_ticks;
Siddharth Kapoor88d7d982022-03-02 14:51:29 +08003256
Sidath Senanayake192bd792016-11-09 14:14:45 +01003257 do_div(ticks, new_scheduling_period_ns);
3258 return ticks?ticks:1;
3259}
3260
Sidath Senanayake823a7602016-06-29 16:03:55 +02003261/**
Siddharth Kapoor88d7d982022-03-02 14:51:29 +08003262 * js_scheduling_period_store - Store callback for the js_scheduling_period sysfs
Sidath Senanayake823a7602016-06-29 16:03:55 +02003263 * file
3264 * @dev: The device the sysfs file is for
3265 * @attr: The attributes of the sysfs file
3266 * @buf: The value written to the sysfs file
Sidath Senanayaked4ca6eb2020-09-11 16:44:12 +01003267 * @count: The number of bytes to write to the sysfs file
Sidath Senanayake823a7602016-06-29 16:03:55 +02003268 *
3269 * This function is called when the js_scheduling_period sysfs file is written
3270 * to. It checks the data written, and if valid updates the js_scheduling_period
3271 * value
3272 *
Sidath Senanayake44e8be92017-01-24 10:48:35 +01003273 * Return: @count if the function succeeded. An error code on failure.
Sidath Senanayake823a7602016-06-29 16:03:55 +02003274 */
Siddharth Kapoor88d7d982022-03-02 14:51:29 +08003275static ssize_t js_scheduling_period_store(struct device *dev,
Sidath Senanayake823a7602016-06-29 16:03:55 +02003276 struct device_attribute *attr, const char *buf, size_t count)
3277{
3278 struct kbase_device *kbdev;
3279 int ret;
3280 unsigned int js_scheduling_period;
3281 u32 new_scheduling_period_ns;
3282 u32 old_period;
Sidath Senanayake192bd792016-11-09 14:14:45 +01003283 struct kbasep_js_device_data *js_data;
3284 unsigned long flags;
Sidath Senanayake823a7602016-06-29 16:03:55 +02003285
3286 kbdev = to_kbase_device(dev);
3287 if (!kbdev)
3288 return -ENODEV;
3289
Sidath Senanayake192bd792016-11-09 14:14:45 +01003290 js_data = &kbdev->js_data;
3291
Sidath Senanayake823a7602016-06-29 16:03:55 +02003292 ret = kstrtouint(buf, 0, &js_scheduling_period);
3293 if (ret || !js_scheduling_period) {
3294 dev_err(kbdev->dev, "Couldn't process js_scheduling_period write operation.\n"
3295 "Use format <js_scheduling_period_ms>\n");
3296 return -EINVAL;
3297 }
3298
3299 new_scheduling_period_ns = js_scheduling_period * 1000000;
3300
3301 /* Update scheduling timeouts */
Sidath Senanayake192bd792016-11-09 14:14:45 +01003302 mutex_lock(&js_data->runpool_mutex);
Sidath Senanayake92327782016-11-09 14:53:08 +01003303 spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
Sidath Senanayake823a7602016-06-29 16:03:55 +02003304
3305 /* If no contexts have been scheduled since js_timeouts was last written
3306 * to, the new timeouts might not have been latched yet. So check if an
Sidath Senanayake97483052021-01-29 15:03:53 +00003307 * update is pending and use the new values if necessary.
3308 */
Sidath Senanayake823a7602016-06-29 16:03:55 +02003309
3310 /* Use previous 'new' scheduling period as a base if present. */
Sidath Senanayake192bd792016-11-09 14:14:45 +01003311 old_period = js_data->scheduling_period_ns;
Sidath Senanayake823a7602016-06-29 16:03:55 +02003312
Sidath Senanayake192bd792016-11-09 14:14:45 +01003313#define SET_TIMEOUT(name) \
3314 (js_data->name = get_new_js_timeout(\
3315 old_period, \
3316 kbdev->js_data.name, \
3317 new_scheduling_period_ns))
Sidath Senanayake823a7602016-06-29 16:03:55 +02003318
Sidath Senanayake192bd792016-11-09 14:14:45 +01003319 SET_TIMEOUT(soft_stop_ticks);
3320 SET_TIMEOUT(soft_stop_ticks_cl);
3321 SET_TIMEOUT(hard_stop_ticks_ss);
3322 SET_TIMEOUT(hard_stop_ticks_cl);
3323 SET_TIMEOUT(hard_stop_ticks_dumping);
3324 SET_TIMEOUT(gpu_reset_ticks_ss);
3325 SET_TIMEOUT(gpu_reset_ticks_cl);
3326 SET_TIMEOUT(gpu_reset_ticks_dumping);
Sidath Senanayake823a7602016-06-29 16:03:55 +02003327
Sidath Senanayake192bd792016-11-09 14:14:45 +01003328#undef SET_TIMEOUT
Sidath Senanayake823a7602016-06-29 16:03:55 +02003329
Sidath Senanayake192bd792016-11-09 14:14:45 +01003330 js_data->scheduling_period_ns = new_scheduling_period_ns;
Sidath Senanayake823a7602016-06-29 16:03:55 +02003331
Sidath Senanayake192bd792016-11-09 14:14:45 +01003332 kbase_js_set_timeouts(kbdev);
Sidath Senanayake823a7602016-06-29 16:03:55 +02003333
Sidath Senanayake92327782016-11-09 14:53:08 +01003334 spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
Sidath Senanayake192bd792016-11-09 14:14:45 +01003335 mutex_unlock(&js_data->runpool_mutex);
Sidath Senanayake823a7602016-06-29 16:03:55 +02003336
3337 dev_dbg(kbdev->dev, "JS scheduling period: %dms\n",
3338 js_scheduling_period);
3339
3340 return count;
3341}
3342
3343/**
Siddharth Kapoor88d7d982022-03-02 14:51:29 +08003344 * js_scheduling_period_show - Show callback for the js_scheduling_period sysfs
Sidath Senanayake823a7602016-06-29 16:03:55 +02003345 * entry.
3346 * @dev: The device this sysfs file is for.
3347 * @attr: The attributes of the sysfs file.
3348 * @buf: The output buffer to receive the GPU information.
3349 *
3350 * This function is called to get the current period used for the JS scheduling
3351 * period.
3352 *
Sidath Senanayake44e8be92017-01-24 10:48:35 +01003353 * Return: The number of bytes output to @buf.
Sidath Senanayake823a7602016-06-29 16:03:55 +02003354 */
Siddharth Kapoor88d7d982022-03-02 14:51:29 +08003355static ssize_t js_scheduling_period_show(struct device *dev,
Sidath Senanayake823a7602016-06-29 16:03:55 +02003356 struct device_attribute *attr, char * const buf)
3357{
3358 struct kbase_device *kbdev;
3359 u32 period;
3360 ssize_t ret;
3361
3362 kbdev = to_kbase_device(dev);
3363 if (!kbdev)
3364 return -ENODEV;
3365
Sidath Senanayake192bd792016-11-09 14:14:45 +01003366 period = kbdev->js_data.scheduling_period_ns;
Sidath Senanayake823a7602016-06-29 16:03:55 +02003367
3368 ret = scnprintf(buf, PAGE_SIZE, "%d\n",
3369 period / 1000000);
3370
3371 return ret;
3372}
3373
Siddharth Kapoor88d7d982022-03-02 14:51:29 +08003374static DEVICE_ATTR_RW(js_scheduling_period);
Sidath Senanayake823a7602016-06-29 16:03:55 +02003375
Sidath Senanayake823a7602016-06-29 16:03:55 +02003376
3377#ifdef CONFIG_MALI_DEBUG
Siddharth Kapoor88d7d982022-03-02 14:51:29 +08003378static ssize_t js_softstop_always_store(struct device *dev,
Sidath Senanayake823a7602016-06-29 16:03:55 +02003379 struct device_attribute *attr, const char *buf, size_t count)
3380{
3381 struct kbase_device *kbdev;
3382 int ret;
3383 int softstop_always;
3384
3385 kbdev = to_kbase_device(dev);
3386 if (!kbdev)
3387 return -ENODEV;
3388
3389 ret = kstrtoint(buf, 0, &softstop_always);
3390 if (ret || ((softstop_always != 0) && (softstop_always != 1))) {
3391 dev_err(kbdev->dev, "Couldn't process js_softstop_always write operation.\n"
3392 "Use format <soft_stop_always>\n");
3393 return -EINVAL;
3394 }
3395
3396 kbdev->js_data.softstop_always = (bool) softstop_always;
3397 dev_dbg(kbdev->dev, "Support for softstop on a single context: %s\n",
3398 (kbdev->js_data.softstop_always) ?
3399 "Enabled" : "Disabled");
3400 return count;
3401}
3402
Siddharth Kapoor88d7d982022-03-02 14:51:29 +08003403static ssize_t js_softstop_always_show(struct device *dev,
Sidath Senanayake823a7602016-06-29 16:03:55 +02003404 struct device_attribute *attr, char * const buf)
3405{
3406 struct kbase_device *kbdev;
3407 ssize_t ret;
3408
3409 kbdev = to_kbase_device(dev);
3410 if (!kbdev)
3411 return -ENODEV;
3412
3413 ret = scnprintf(buf, PAGE_SIZE, "%d\n", kbdev->js_data.softstop_always);
3414
3415 if (ret >= PAGE_SIZE) {
3416 buf[PAGE_SIZE - 2] = '\n';
3417 buf[PAGE_SIZE - 1] = '\0';
3418 ret = PAGE_SIZE - 1;
3419 }
3420
3421 return ret;
3422}
3423
3424/*
Sidath Senanayake44e8be92017-01-24 10:48:35 +01003425 * By default, soft-stops are disabled when only a single context is present.
3426 * The ability to enable soft-stop when only a single context is present can be
3427 * used for debug and unit-testing purposes.
Sidath Senanayake823a7602016-06-29 16:03:55 +02003428 * (see CL t6xx_stress_1 unit-test as an example whereby this feature is used.)
3429 */
Siddharth Kapoor88d7d982022-03-02 14:51:29 +08003430static DEVICE_ATTR_RW(js_softstop_always);
Sidath Senanayakeb2b17642020-02-27 15:37:17 +01003431#endif /* CONFIG_MALI_DEBUG */
Sidath Senanayake72f24572020-10-27 11:38:49 +00003432#endif /* !MALI_USE_CSF */
Sidath Senanayake823a7602016-06-29 16:03:55 +02003433
3434#ifdef CONFIG_MALI_DEBUG
Sidath Senanayake2bfaaa52021-06-17 17:58:22 +01003435typedef void kbasep_debug_command_func(struct kbase_device *);
Sidath Senanayake823a7602016-06-29 16:03:55 +02003436
3437enum kbasep_debug_command_code {
3438 KBASEP_DEBUG_COMMAND_DUMPTRACE,
3439
3440 /* This must be the last enum */
3441 KBASEP_DEBUG_COMMAND_COUNT
3442};
3443
3444struct kbasep_debug_command {
3445 char *str;
3446 kbasep_debug_command_func *func;
3447};
3448
Sidath Senanayakebbbb1cf2021-09-09 14:10:15 +01003449static void kbasep_ktrace_dump_wrapper(struct kbase_device *kbdev)
Sidath Senanayakebc3c01e2020-06-18 09:26:13 +02003450{
3451 KBASE_KTRACE_DUMP(kbdev);
3452}
3453
Sidath Senanayake44e8be92017-01-24 10:48:35 +01003454/* Debug commands supported by the driver */
Sidath Senanayake823a7602016-06-29 16:03:55 +02003455static const struct kbasep_debug_command debug_commands[] = {
3456 {
Siddharth Kapoor88d7d982022-03-02 14:51:29 +08003457 .str = "dumptrace",
3458 .func = &kbasep_ktrace_dump_wrapper,
3459 }
Sidath Senanayake823a7602016-06-29 16:03:55 +02003460};
3461
Sidath Senanayake44e8be92017-01-24 10:48:35 +01003462/**
Siddharth Kapoor88d7d982022-03-02 14:51:29 +08003463 * debug_command_show - Show callback for the debug_command sysfs file.
Sidath Senanayake823a7602016-06-29 16:03:55 +02003464 *
Sidath Senanayake44e8be92017-01-24 10:48:35 +01003465 * @dev: The device this sysfs file is for
3466 * @attr: The attributes of the sysfs file
3467 * @buf: The output buffer for the sysfs file contents
Sidath Senanayake823a7602016-06-29 16:03:55 +02003468 *
Siddharth Kapoor88d7d982022-03-02 14:51:29 +08003469 * This function is called to get the contents of the debug_command sysfs
3470 * file. This is a list of the available debug commands, separated by newlines.
3471 *
Sidath Senanayake44e8be92017-01-24 10:48:35 +01003472 * Return: The number of bytes output to @buf.
Sidath Senanayake823a7602016-06-29 16:03:55 +02003473 */
Siddharth Kapoor88d7d982022-03-02 14:51:29 +08003474static ssize_t debug_command_show(struct device *dev, struct device_attribute *attr, char * const buf)
Sidath Senanayake823a7602016-06-29 16:03:55 +02003475{
3476 struct kbase_device *kbdev;
3477 int i;
3478 ssize_t ret = 0;
3479
3480 kbdev = to_kbase_device(dev);
3481
3482 if (!kbdev)
3483 return -ENODEV;
3484
3485 for (i = 0; i < KBASEP_DEBUG_COMMAND_COUNT && ret < PAGE_SIZE; i++)
3486 ret += scnprintf(buf + ret, PAGE_SIZE - ret, "%s\n", debug_commands[i].str);
3487
3488 if (ret >= PAGE_SIZE) {
3489 buf[PAGE_SIZE - 2] = '\n';
3490 buf[PAGE_SIZE - 1] = '\0';
3491 ret = PAGE_SIZE - 1;
3492 }
3493
3494 return ret;
3495}
3496
Sidath Senanayake44e8be92017-01-24 10:48:35 +01003497/**
Siddharth Kapoor88d7d982022-03-02 14:51:29 +08003498 * debug_command_store - Store callback for the debug_command sysfs file.
Sidath Senanayake823a7602016-06-29 16:03:55 +02003499 *
Sidath Senanayake44e8be92017-01-24 10:48:35 +01003500 * @dev: The device with sysfs file is for
3501 * @attr: The attributes of the sysfs file
3502 * @buf: The value written to the sysfs file
Sidath Senanayake72f24572020-10-27 11:38:49 +00003503 * @count: The number of bytes written to the sysfs file
Sidath Senanayake823a7602016-06-29 16:03:55 +02003504 *
Siddharth Kapoor88d7d982022-03-02 14:51:29 +08003505 * This function is called when the debug_command sysfs file is written to.
3506 * It matches the requested command against the available commands, and if
3507 * a matching command is found calls the associated function from
3508 * @debug_commands to issue the command.
3509 *
Sidath Senanayake44e8be92017-01-24 10:48:35 +01003510 * Return: @count if the function succeeded. An error code on failure.
Sidath Senanayake823a7602016-06-29 16:03:55 +02003511 */
Siddharth Kapoor88d7d982022-03-02 14:51:29 +08003512static ssize_t debug_command_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
Sidath Senanayake823a7602016-06-29 16:03:55 +02003513{
3514 struct kbase_device *kbdev;
3515 int i;
3516
3517 kbdev = to_kbase_device(dev);
3518
3519 if (!kbdev)
3520 return -ENODEV;
3521
3522 for (i = 0; i < KBASEP_DEBUG_COMMAND_COUNT; i++) {
3523 if (sysfs_streq(debug_commands[i].str, buf)) {
3524 debug_commands[i].func(kbdev);
3525 return count;
3526 }
3527 }
3528
3529 /* Debug Command not found */
3530 dev_err(dev, "debug_command: command not known\n");
3531 return -EINVAL;
3532}
3533
Sidath Senanayake44e8be92017-01-24 10:48:35 +01003534/* The sysfs file debug_command.
Sidath Senanayake823a7602016-06-29 16:03:55 +02003535 *
3536 * This is used to issue general debug commands to the device driver.
3537 * Reading it will produce a list of debug commands, separated by newlines.
3538 * Writing to it with one of those commands will issue said command.
3539 */
Siddharth Kapoor88d7d982022-03-02 14:51:29 +08003540static DEVICE_ATTR_RW(debug_command);
Sidath Senanayake823a7602016-06-29 16:03:55 +02003541#endif /* CONFIG_MALI_DEBUG */
3542
3543/**
Siddharth Kapoor88d7d982022-03-02 14:51:29 +08003544 * gpuinfo_show - Show callback for the gpuinfo sysfs entry.
Sidath Senanayake823a7602016-06-29 16:03:55 +02003545 * @dev: The device this sysfs file is for.
3546 * @attr: The attributes of the sysfs file.
3547 * @buf: The output buffer to receive the GPU information.
3548 *
3549 * This function is called to get a description of the present Mali
3550 * GPU via the gpuinfo sysfs entry. This includes the GPU family, the
3551 * number of cores, the hardware version and the raw product id. For
Sidath Senanayake44e8be92017-01-24 10:48:35 +01003552 * example
Sidath Senanayake823a7602016-06-29 16:03:55 +02003553 *
3554 * Mali-T60x MP4 r0p0 0x6956
3555 *
Sidath Senanayake44e8be92017-01-24 10:48:35 +01003556 * Return: The number of bytes output to @buf.
Sidath Senanayake823a7602016-06-29 16:03:55 +02003557 */
Siddharth Kapoor88d7d982022-03-02 14:51:29 +08003558static ssize_t gpuinfo_show(struct device *dev,
Sidath Senanayake823a7602016-06-29 16:03:55 +02003559 struct device_attribute *attr, char *buf)
3560{
3561 static const struct gpu_product_id_name {
Siddharth Kapoor88d7d982022-03-02 14:51:29 +08003562 unsigned int id;
Sidath Senanayake823a7602016-06-29 16:03:55 +02003563 char *name;
3564 } gpu_product_id_names[] = {
Siddharth Kapoor88d7d982022-03-02 14:51:29 +08003565 { .id = GPU_ID2_PRODUCT_TMIX >> KBASE_GPU_ID_VERSION_PRODUCT_ID_SHIFT,
Sidath Senanayake823a7602016-06-29 16:03:55 +02003566 .name = "Mali-G71" },
Siddharth Kapoor88d7d982022-03-02 14:51:29 +08003567 { .id = GPU_ID2_PRODUCT_THEX >> KBASE_GPU_ID_VERSION_PRODUCT_ID_SHIFT,
Sidath Senanayakec19c6272017-09-19 18:23:58 +02003568 .name = "Mali-G72" },
Siddharth Kapoor88d7d982022-03-02 14:51:29 +08003569 { .id = GPU_ID2_PRODUCT_TSIX >> KBASE_GPU_ID_VERSION_PRODUCT_ID_SHIFT,
Sidath Senanayake44e8be92017-01-24 10:48:35 +01003570 .name = "Mali-G51" },
Siddharth Kapoor88d7d982022-03-02 14:51:29 +08003571 { .id = GPU_ID2_PRODUCT_TNOX >> KBASE_GPU_ID_VERSION_PRODUCT_ID_SHIFT,
Sidath Senanayakef32af5a2018-07-31 15:28:14 +02003572 .name = "Mali-G76" },
Siddharth Kapoor88d7d982022-03-02 14:51:29 +08003573 { .id = GPU_ID2_PRODUCT_TDVX >> KBASE_GPU_ID_VERSION_PRODUCT_ID_SHIFT,
Sidath Senanayake3fe808a2018-04-27 13:23:04 +02003574 .name = "Mali-G31" },
Siddharth Kapoor88d7d982022-03-02 14:51:29 +08003575 { .id = GPU_ID2_PRODUCT_TGOX >> KBASE_GPU_ID_VERSION_PRODUCT_ID_SHIFT,
Sidath Senanayake3fe808a2018-04-27 13:23:04 +02003576 .name = "Mali-G52" },
Siddharth Kapoor88d7d982022-03-02 14:51:29 +08003577 { .id = GPU_ID2_PRODUCT_TTRX >> KBASE_GPU_ID_VERSION_PRODUCT_ID_SHIFT,
Sidath Senanayake228451e2019-06-27 14:37:54 +02003578 .name = "Mali-G77" },
Siddharth Kapoor88d7d982022-03-02 14:51:29 +08003579 { .id = GPU_ID2_PRODUCT_TBEX >> KBASE_GPU_ID_VERSION_PRODUCT_ID_SHIFT,
Sidath Senanayakebc3c01e2020-06-18 09:26:13 +02003580 .name = "Mali-G78" },
Siddharth Kapoor88d7d982022-03-02 14:51:29 +08003581 { .id = GPU_ID2_PRODUCT_TBAX >> KBASE_GPU_ID_VERSION_PRODUCT_ID_SHIFT,
Sidath Senanayakefca86132021-06-15 13:39:30 +01003582 .name = "Mali-G78AE" },
Siddharth Kapoor88d7d982022-03-02 14:51:29 +08003583 { .id = GPU_ID2_PRODUCT_LBEX >> KBASE_GPU_ID_VERSION_PRODUCT_ID_SHIFT,
Sidath Senanayakebc3c01e2020-06-18 09:26:13 +02003584 .name = "Mali-G68" },
Siddharth Kapoor88d7d982022-03-02 14:51:29 +08003585 { .id = GPU_ID2_PRODUCT_TNAX >> KBASE_GPU_ID_VERSION_PRODUCT_ID_SHIFT,
Sidath Senanayake7ed9a0b2020-02-03 12:15:52 +01003586 .name = "Mali-G57" },
Siddharth Kapoor88d7d982022-03-02 14:51:29 +08003587 { .id = GPU_ID2_PRODUCT_TODX >> KBASE_GPU_ID_VERSION_PRODUCT_ID_SHIFT,
Sidath Senanayake52c5bf52021-07-19 14:38:02 +01003588 .name = "Mali-G710" },
Siddharth Kapoor88d7d982022-03-02 14:51:29 +08003589 { .id = GPU_ID2_PRODUCT_LODX >> KBASE_GPU_ID_VERSION_PRODUCT_ID_SHIFT,
Sidath Senanayake52c5bf52021-07-19 14:38:02 +01003590 .name = "Mali-G610" },
Siddharth Kapoor88d7d982022-03-02 14:51:29 +08003591 { .id = GPU_ID2_PRODUCT_TGRX >> KBASE_GPU_ID_VERSION_PRODUCT_ID_SHIFT,
Sidath Senanayake52c5bf52021-07-19 14:38:02 +01003592 .name = "Mali-G510" },
Siddharth Kapoor88d7d982022-03-02 14:51:29 +08003593 { .id = GPU_ID2_PRODUCT_TVAX >> KBASE_GPU_ID_VERSION_PRODUCT_ID_SHIFT,
Sidath Senanayake52c5bf52021-07-19 14:38:02 +01003594 .name = "Mali-G310" },
Debarshi Dutta20fff722023-06-02 13:36:22 +00003595 { .id = GPU_ID2_PRODUCT_LTIX >> KBASE_GPU_ID_VERSION_PRODUCT_ID_SHIFT,
Jörg Wagnerdacf0042023-08-01 13:38:22 +00003596 .name = "Mali-G620" },
Sidath Senanayake823a7602016-06-29 16:03:55 +02003597 };
3598 const char *product_name = "(Unknown Mali GPU)";
3599 struct kbase_device *kbdev;
3600 u32 gpu_id;
Siddharth Kapoor88d7d982022-03-02 14:51:29 +08003601 unsigned int product_id, product_id_mask;
3602 unsigned int i;
Jack Divere19249e2022-11-07 12:13:47 +00003603 struct kbase_gpu_props *gpu_props;
Sidath Senanayake823a7602016-06-29 16:03:55 +02003604
3605 kbdev = to_kbase_device(dev);
3606 if (!kbdev)
3607 return -ENODEV;
3608
Jack Divere19249e2022-11-07 12:13:47 +00003609 gpu_props = &kbdev->gpu_props;
3610 gpu_id = gpu_props->props.raw_props.gpu_id;
Siddharth Kapoor88d7d982022-03-02 14:51:29 +08003611 product_id = gpu_id >> KBASE_GPU_ID_VERSION_PRODUCT_ID_SHIFT;
3612 product_id_mask = GPU_ID2_PRODUCT_MODEL >> KBASE_GPU_ID_VERSION_PRODUCT_ID_SHIFT;
Sidath Senanayake823a7602016-06-29 16:03:55 +02003613
3614 for (i = 0; i < ARRAY_SIZE(gpu_product_id_names); ++i) {
3615 const struct gpu_product_id_name *p = &gpu_product_id_names[i];
3616
Sidath Senanayake86966062019-08-23 15:40:27 +02003617 if ((p->id & product_id_mask) ==
Sidath Senanayake823a7602016-06-29 16:03:55 +02003618 (product_id & product_id_mask)) {
3619 product_name = p->name;
3620 break;
3621 }
3622 }
3623
Jack Divere19249e2022-11-07 12:13:47 +00003624#if MALI_USE_CSF
3625 if ((product_id & product_id_mask) ==
3626 ((GPU_ID2_PRODUCT_TTUX >> KBASE_GPU_ID_VERSION_PRODUCT_ID_SHIFT) & product_id_mask)) {
3627 const bool rt_supported =
3628 GPU_FEATURES_RAY_TRACING_GET(gpu_props->props.raw_props.gpu_features);
3629 const u8 nr_cores = gpu_props->num_cores;
3630
3631 /* Mali-G715-Immortalis if 10 < number of cores with ray tracing supproted.
3632 * Mali-G715 if 10 < number of cores without ray tracing supported.
3633 * Mali-G715 if 7 <= number of cores <= 10 regardless ray tracing.
3634 * Mali-G615 if number of cores < 7.
3635 */
3636 if ((nr_cores > 10) && rt_supported)
3637 product_name = "Mali-G715-Immortalis";
3638 else if (nr_cores >= 7)
3639 product_name = "Mali-G715";
3640
3641 if (nr_cores < 7) {
3642 dev_warn(kbdev->dev, "nr_cores(%u) GPU ID must be G615", nr_cores);
3643 product_name = "Mali-G615";
3644 } else
3645 dev_dbg(kbdev->dev, "GPU ID_Name: %s, nr_cores(%u)\n", product_name,
3646 nr_cores);
3647 }
Jörg Wagnerdacf0042023-08-01 13:38:22 +00003648
3649 if ((product_id & product_id_mask) ==
3650 ((GPU_ID2_PRODUCT_TTIX >> KBASE_GPU_ID_VERSION_PRODUCT_ID_SHIFT) & product_id_mask)) {
3651 const bool rt_supported =
3652 GPU_FEATURES_RAY_TRACING_GET(gpu_props->props.raw_props.gpu_features);
3653 const u8 nr_cores = gpu_props->num_cores;
3654
Jörg Wagnere61eb932023-08-31 17:27:24 +00003655 if ((nr_cores >= 10) && rt_supported)
Jörg Wagnerdacf0042023-08-01 13:38:22 +00003656 product_name = "Mali-G720-Immortalis";
3657 else
3658 product_name = (nr_cores >= 6) ? "Mali-G720" : "Mali-G620";
3659
3660 dev_dbg(kbdev->dev, "GPU ID_Name: %s (ID: 0x%x), nr_cores(%u)\n", product_name,
3661 nr_cores, product_id & product_id_mask);
3662 }
Jack Divere19249e2022-11-07 12:13:47 +00003663#endif /* MALI_USE_CSF */
3664
Siddharth Kapoor88d7d982022-03-02 14:51:29 +08003665 return scnprintf(buf, PAGE_SIZE, "%s %d cores r%dp%d 0x%04X\n", product_name,
3666 kbdev->gpu_props.num_cores,
3667 (gpu_id & GPU_ID_VERSION_MAJOR) >> KBASE_GPU_ID_VERSION_MAJOR_SHIFT,
3668 (gpu_id & GPU_ID_VERSION_MINOR) >> KBASE_GPU_ID_VERSION_MINOR_SHIFT,
3669 product_id);
Sidath Senanayake823a7602016-06-29 16:03:55 +02003670}
Siddharth Kapoor88d7d982022-03-02 14:51:29 +08003671static DEVICE_ATTR_RO(gpuinfo);
Sidath Senanayake823a7602016-06-29 16:03:55 +02003672
3673/**
Siddharth Kapoor88d7d982022-03-02 14:51:29 +08003674 * dvfs_period_store - Store callback for the dvfs_period sysfs file.
Sidath Senanayake823a7602016-06-29 16:03:55 +02003675 * @dev: The device with sysfs file is for
3676 * @attr: The attributes of the sysfs file
3677 * @buf: The value written to the sysfs file
3678 * @count: The number of bytes written to the sysfs file
3679 *
3680 * This function is called when the dvfs_period sysfs file is written to. It
3681 * checks the data written, and if valid updates the DVFS period variable,
3682 *
Sidath Senanayake44e8be92017-01-24 10:48:35 +01003683 * Return: @count if the function succeeded. An error code on failure.
Sidath Senanayake823a7602016-06-29 16:03:55 +02003684 */
Siddharth Kapoor88d7d982022-03-02 14:51:29 +08003685static ssize_t dvfs_period_store(struct device *dev,
Sidath Senanayake823a7602016-06-29 16:03:55 +02003686 struct device_attribute *attr, const char *buf, size_t count)
3687{
3688 struct kbase_device *kbdev;
3689 int ret;
3690 int dvfs_period;
3691
3692 kbdev = to_kbase_device(dev);
3693 if (!kbdev)
3694 return -ENODEV;
3695
3696 ret = kstrtoint(buf, 0, &dvfs_period);
3697 if (ret || dvfs_period <= 0) {
3698 dev_err(kbdev->dev, "Couldn't process dvfs_period write operation.\n"
3699 "Use format <dvfs_period_ms>\n");
3700 return -EINVAL;
3701 }
3702
3703 kbdev->pm.dvfs_period = dvfs_period;
3704 dev_dbg(kbdev->dev, "DVFS period: %dms\n", dvfs_period);
3705
3706 return count;
3707}
3708
3709/**
Siddharth Kapoor88d7d982022-03-02 14:51:29 +08003710 * dvfs_period_show - Show callback for the dvfs_period sysfs entry.
Sidath Senanayake823a7602016-06-29 16:03:55 +02003711 * @dev: The device this sysfs file is for.
3712 * @attr: The attributes of the sysfs file.
3713 * @buf: The output buffer to receive the GPU information.
3714 *
3715 * This function is called to get the current period used for the DVFS sample
3716 * timer.
3717 *
Sidath Senanayake44e8be92017-01-24 10:48:35 +01003718 * Return: The number of bytes output to @buf.
Sidath Senanayake823a7602016-06-29 16:03:55 +02003719 */
Siddharth Kapoor88d7d982022-03-02 14:51:29 +08003720static ssize_t dvfs_period_show(struct device *dev,
Sidath Senanayake823a7602016-06-29 16:03:55 +02003721 struct device_attribute *attr, char * const buf)
3722{
3723 struct kbase_device *kbdev;
3724 ssize_t ret;
3725
3726 kbdev = to_kbase_device(dev);
3727 if (!kbdev)
3728 return -ENODEV;
3729
3730 ret = scnprintf(buf, PAGE_SIZE, "%d\n", kbdev->pm.dvfs_period);
3731
3732 return ret;
3733}
3734
Siddharth Kapoor88d7d982022-03-02 14:51:29 +08003735static DEVICE_ATTR_RW(dvfs_period);
Sidath Senanayake823a7602016-06-29 16:03:55 +02003736
Jörg Wagnerdacf0042023-08-01 13:38:22 +00003737int kbase_pm_gpu_freq_init(struct kbase_device *kbdev)
Jack Divere19249e2022-11-07 12:13:47 +00003738{
Jörg Wagnerdacf0042023-08-01 13:38:22 +00003739 int err;
Jack Divere19249e2022-11-07 12:13:47 +00003740 /* Uses default reference frequency defined in below macro */
3741 u64 lowest_freq_khz = DEFAULT_REF_TIMEOUT_FREQ_KHZ;
3742
3743 /* Only check lowest frequency in cases when OPPs are used and
3744 * present in the device tree.
3745 */
3746#ifdef CONFIG_PM_OPP
3747 struct dev_pm_opp *opp_ptr;
3748 unsigned long found_freq = 0;
3749
3750 /* find lowest frequency OPP */
3751 opp_ptr = dev_pm_opp_find_freq_ceil(kbdev->dev, &found_freq);
3752 if (IS_ERR(opp_ptr)) {
3753 dev_err(kbdev->dev, "No OPPs found in device tree! Scaling timeouts using %llu kHz",
3754 (unsigned long long)lowest_freq_khz);
3755 } else {
3756#if KERNEL_VERSION(4, 11, 0) <= LINUX_VERSION_CODE
3757 dev_pm_opp_put(opp_ptr); /* decrease OPP refcount */
3758#endif
3759 /* convert found frequency to KHz */
3760 found_freq /= 1000;
3761
3762 /* If lowest frequency in OPP table is still higher
3763 * than the reference, then keep the reference frequency
3764 * as the one to use for scaling .
3765 */
3766 if (found_freq < lowest_freq_khz)
3767 lowest_freq_khz = found_freq;
3768 }
3769#else
3770 dev_err(kbdev->dev, "No operating-points-v2 node or operating-points property in DT");
3771#endif
3772
3773 kbdev->lowest_gpu_freq_khz = lowest_freq_khz;
Jörg Wagnerdacf0042023-08-01 13:38:22 +00003774
3775 err = kbase_device_populate_max_freq(kbdev);
3776 if (unlikely(err < 0))
3777 return -1;
3778
Jack Divere19249e2022-11-07 12:13:47 +00003779 dev_dbg(kbdev->dev, "Lowest frequency identified is %llu kHz", kbdev->lowest_gpu_freq_khz);
Jörg Wagnerdacf0042023-08-01 13:38:22 +00003780 dev_dbg(kbdev->dev,
3781 "Setting default highest frequency to %u kHz (pending devfreq initialization",
3782 kbdev->gpu_props.props.core_props.gpu_freq_khz_max);
3783
Jack Divere19249e2022-11-07 12:13:47 +00003784 return 0;
3785}
3786
Sidath Senanayake823a7602016-06-29 16:03:55 +02003787/**
Siddharth Kapoor88d7d982022-03-02 14:51:29 +08003788 * pm_poweroff_store - Store callback for the pm_poweroff sysfs file.
Sidath Senanayake823a7602016-06-29 16:03:55 +02003789 * @dev: The device with sysfs file is for
3790 * @attr: The attributes of the sysfs file
3791 * @buf: The value written to the sysfs file
3792 * @count: The number of bytes written to the sysfs file
3793 *
3794 * This function is called when the pm_poweroff sysfs file is written to.
3795 *
3796 * This file contains three values separated by whitespace. The values
3797 * are gpu_poweroff_time (the period of the poweroff timer, in ns),
3798 * poweroff_shader_ticks (the number of poweroff timer ticks before an idle
3799 * shader is powered off), and poweroff_gpu_ticks (the number of poweroff timer
3800 * ticks before the GPU is powered off), in that order.
3801 *
Sidath Senanayake44e8be92017-01-24 10:48:35 +01003802 * Return: @count if the function succeeded. An error code on failure.
Sidath Senanayake823a7602016-06-29 16:03:55 +02003803 */
Siddharth Kapoor88d7d982022-03-02 14:51:29 +08003804static ssize_t pm_poweroff_store(struct device *dev,
Sidath Senanayake823a7602016-06-29 16:03:55 +02003805 struct device_attribute *attr, const char *buf, size_t count)
3806{
3807 struct kbase_device *kbdev;
Sidath Senanayakea9704312018-12-06 09:09:59 +01003808 struct kbasep_pm_tick_timer_state *stt;
Sidath Senanayake823a7602016-06-29 16:03:55 +02003809 int items;
Sidath Senanayakea9704312018-12-06 09:09:59 +01003810 u64 gpu_poweroff_time;
3811 unsigned int poweroff_shader_ticks, poweroff_gpu_ticks;
3812 unsigned long flags;
Sidath Senanayake823a7602016-06-29 16:03:55 +02003813
3814 kbdev = to_kbase_device(dev);
3815 if (!kbdev)
3816 return -ENODEV;
3817
3818 items = sscanf(buf, "%llu %u %u", &gpu_poweroff_time,
3819 &poweroff_shader_ticks,
3820 &poweroff_gpu_ticks);
3821 if (items != 3) {
3822 dev_err(kbdev->dev, "Couldn't process pm_poweroff write operation.\n"
3823 "Use format <gpu_poweroff_time_ns> <poweroff_shader_ticks> <poweroff_gpu_ticks>\n");
3824 return -EINVAL;
3825 }
3826
Sidath Senanayakea9704312018-12-06 09:09:59 +01003827 spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
3828
3829 stt = &kbdev->pm.backend.shader_tick_timer;
3830 stt->configured_interval = HR_TIMER_DELAY_NSEC(gpu_poweroff_time);
Sidath Senanayake54804872021-04-06 14:40:20 +01003831 stt->default_ticks = poweroff_shader_ticks;
3832 stt->configured_ticks = stt->default_ticks;
Sidath Senanayakea9704312018-12-06 09:09:59 +01003833
3834 spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
3835
3836 if (poweroff_gpu_ticks != 0)
3837 dev_warn(kbdev->dev, "Separate GPU poweroff delay no longer supported.\n");
Sidath Senanayake823a7602016-06-29 16:03:55 +02003838
3839 return count;
3840}
3841
3842/**
Siddharth Kapoor88d7d982022-03-02 14:51:29 +08003843 * pm_poweroff_show - Show callback for the pm_poweroff sysfs entry.
Sidath Senanayake823a7602016-06-29 16:03:55 +02003844 * @dev: The device this sysfs file is for.
3845 * @attr: The attributes of the sysfs file.
3846 * @buf: The output buffer to receive the GPU information.
3847 *
3848 * This function is called to get the current period used for the DVFS sample
3849 * timer.
3850 *
Sidath Senanayake44e8be92017-01-24 10:48:35 +01003851 * Return: The number of bytes output to @buf.
Sidath Senanayake823a7602016-06-29 16:03:55 +02003852 */
Siddharth Kapoor88d7d982022-03-02 14:51:29 +08003853static ssize_t pm_poweroff_show(struct device *dev,
Sidath Senanayake823a7602016-06-29 16:03:55 +02003854 struct device_attribute *attr, char * const buf)
3855{
3856 struct kbase_device *kbdev;
Sidath Senanayakea9704312018-12-06 09:09:59 +01003857 struct kbasep_pm_tick_timer_state *stt;
Sidath Senanayake823a7602016-06-29 16:03:55 +02003858 ssize_t ret;
Sidath Senanayakea9704312018-12-06 09:09:59 +01003859 unsigned long flags;
Sidath Senanayake823a7602016-06-29 16:03:55 +02003860
3861 kbdev = to_kbase_device(dev);
3862 if (!kbdev)
3863 return -ENODEV;
3864
Sidath Senanayakea9704312018-12-06 09:09:59 +01003865 spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
3866
3867 stt = &kbdev->pm.backend.shader_tick_timer;
3868 ret = scnprintf(buf, PAGE_SIZE, "%llu %u 0\n",
3869 ktime_to_ns(stt->configured_interval),
Sidath Senanayake54804872021-04-06 14:40:20 +01003870 stt->default_ticks);
Sidath Senanayakea9704312018-12-06 09:09:59 +01003871
3872 spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
Sidath Senanayake823a7602016-06-29 16:03:55 +02003873
3874 return ret;
3875}
3876
Siddharth Kapoor88d7d982022-03-02 14:51:29 +08003877static DEVICE_ATTR_RW(pm_poweroff);
Sidath Senanayake201c8bf2021-01-29 14:51:21 +00003878
3879/**
Siddharth Kapoor88d7d982022-03-02 14:51:29 +08003880 * reset_timeout_store - Store callback for the reset_timeout sysfs file.
Sidath Senanayake823a7602016-06-29 16:03:55 +02003881 * @dev: The device with sysfs file is for
3882 * @attr: The attributes of the sysfs file
3883 * @buf: The value written to the sysfs file
3884 * @count: The number of bytes written to the sysfs file
3885 *
3886 * This function is called when the reset_timeout sysfs file is written to. It
3887 * checks the data written, and if valid updates the reset timeout.
3888 *
Sidath Senanayake44e8be92017-01-24 10:48:35 +01003889 * Return: @count if the function succeeded. An error code on failure.
Sidath Senanayake823a7602016-06-29 16:03:55 +02003890 */
Siddharth Kapoor88d7d982022-03-02 14:51:29 +08003891static ssize_t reset_timeout_store(struct device *dev,
Sidath Senanayake823a7602016-06-29 16:03:55 +02003892 struct device_attribute *attr, const char *buf, size_t count)
3893{
3894 struct kbase_device *kbdev;
3895 int ret;
Jörg Wagnerdacf0042023-08-01 13:38:22 +00003896 u32 reset_timeout;
3897 u32 default_reset_timeout;
Sidath Senanayake823a7602016-06-29 16:03:55 +02003898
3899 kbdev = to_kbase_device(dev);
3900 if (!kbdev)
3901 return -ENODEV;
3902
Jörg Wagnerdacf0042023-08-01 13:38:22 +00003903 ret = kstrtou32(buf, 0, &reset_timeout);
3904 if (ret || reset_timeout == 0) {
Sidath Senanayake823a7602016-06-29 16:03:55 +02003905 dev_err(kbdev->dev, "Couldn't process reset_timeout write operation.\n"
3906 "Use format <reset_timeout_ms>\n");
3907 return -EINVAL;
3908 }
3909
Jörg Wagnerdacf0042023-08-01 13:38:22 +00003910#if MALI_USE_CSF
3911 default_reset_timeout = kbase_get_timeout_ms(kbdev, CSF_GPU_RESET_TIMEOUT);
3912#else /* MALI_USE_CSF */
3913 default_reset_timeout = JM_DEFAULT_RESET_TIMEOUT_MS;
3914#endif /* !MALI_USE_CSF */
3915
3916 if (reset_timeout < default_reset_timeout)
3917 dev_warn(kbdev->dev, "requested reset_timeout(%u) is smaller than default(%u)",
3918 reset_timeout, default_reset_timeout);
3919
Sidath Senanayake823a7602016-06-29 16:03:55 +02003920 kbdev->reset_timeout_ms = reset_timeout;
Jörg Wagnerdacf0042023-08-01 13:38:22 +00003921 dev_dbg(kbdev->dev, "Reset timeout: %ums\n", reset_timeout);
Sidath Senanayake823a7602016-06-29 16:03:55 +02003922
3923 return count;
3924}
3925
3926/**
Siddharth Kapoor88d7d982022-03-02 14:51:29 +08003927 * reset_timeout_show - Show callback for the reset_timeout sysfs entry.
Sidath Senanayake823a7602016-06-29 16:03:55 +02003928 * @dev: The device this sysfs file is for.
3929 * @attr: The attributes of the sysfs file.
3930 * @buf: The output buffer to receive the GPU information.
3931 *
3932 * This function is called to get the current reset timeout.
3933 *
Sidath Senanayake44e8be92017-01-24 10:48:35 +01003934 * Return: The number of bytes output to @buf.
Sidath Senanayake823a7602016-06-29 16:03:55 +02003935 */
Siddharth Kapoor88d7d982022-03-02 14:51:29 +08003936static ssize_t reset_timeout_show(struct device *dev,
Sidath Senanayake823a7602016-06-29 16:03:55 +02003937 struct device_attribute *attr, char * const buf)
3938{
3939 struct kbase_device *kbdev;
3940 ssize_t ret;
3941
3942 kbdev = to_kbase_device(dev);
3943 if (!kbdev)
3944 return -ENODEV;
3945
3946 ret = scnprintf(buf, PAGE_SIZE, "%d\n", kbdev->reset_timeout_ms);
3947
3948 return ret;
3949}
3950
Siddharth Kapoor88d7d982022-03-02 14:51:29 +08003951static DEVICE_ATTR_RW(reset_timeout);
Sidath Senanayake823a7602016-06-29 16:03:55 +02003952
Siddharth Kapoor88d7d982022-03-02 14:51:29 +08003953static ssize_t mem_pool_size_show(struct device *dev,
Sidath Senanayake823a7602016-06-29 16:03:55 +02003954 struct device_attribute *attr, char * const buf)
3955{
Sidath Senanayakee972f652019-04-10 14:37:00 +02003956 struct kbase_device *const kbdev = to_kbase_device(dev);
Sidath Senanayake823a7602016-06-29 16:03:55 +02003957
Sidath Senanayake823a7602016-06-29 16:03:55 +02003958 if (!kbdev)
3959 return -ENODEV;
3960
Sidath Senanayakee972f652019-04-10 14:37:00 +02003961 return kbase_debugfs_helper_get_attr_to_string(buf, PAGE_SIZE,
3962 kbdev->mem_pools.small, MEMORY_GROUP_MANAGER_NR_GROUPS,
3963 kbase_mem_pool_debugfs_size);
Sidath Senanayake823a7602016-06-29 16:03:55 +02003964}
3965
Siddharth Kapoor88d7d982022-03-02 14:51:29 +08003966static ssize_t mem_pool_size_store(struct device *dev,
Sidath Senanayake823a7602016-06-29 16:03:55 +02003967 struct device_attribute *attr, const char *buf, size_t count)
3968{
Sidath Senanayakee972f652019-04-10 14:37:00 +02003969 struct kbase_device *const kbdev = to_kbase_device(dev);
Sidath Senanayake823a7602016-06-29 16:03:55 +02003970 int err;
3971
Sidath Senanayake823a7602016-06-29 16:03:55 +02003972 if (!kbdev)
3973 return -ENODEV;
3974
Sidath Senanayakee972f652019-04-10 14:37:00 +02003975 err = kbase_debugfs_helper_set_attr_from_string(buf,
3976 kbdev->mem_pools.small, MEMORY_GROUP_MANAGER_NR_GROUPS,
3977 kbase_mem_pool_debugfs_trim);
Sidath Senanayake823a7602016-06-29 16:03:55 +02003978
Sidath Senanayakee972f652019-04-10 14:37:00 +02003979 return err ? err : count;
Sidath Senanayake823a7602016-06-29 16:03:55 +02003980}
3981
Siddharth Kapoor88d7d982022-03-02 14:51:29 +08003982static DEVICE_ATTR_RW(mem_pool_size);
Sidath Senanayake823a7602016-06-29 16:03:55 +02003983
Siddharth Kapoor88d7d982022-03-02 14:51:29 +08003984static ssize_t mem_pool_max_size_show(struct device *dev,
Sidath Senanayake823a7602016-06-29 16:03:55 +02003985 struct device_attribute *attr, char * const buf)
3986{
Sidath Senanayakee972f652019-04-10 14:37:00 +02003987 struct kbase_device *const kbdev = to_kbase_device(dev);
Sidath Senanayake823a7602016-06-29 16:03:55 +02003988
Sidath Senanayake823a7602016-06-29 16:03:55 +02003989 if (!kbdev)
3990 return -ENODEV;
3991
Sidath Senanayakee972f652019-04-10 14:37:00 +02003992 return kbase_debugfs_helper_get_attr_to_string(buf, PAGE_SIZE,
3993 kbdev->mem_pools.small, MEMORY_GROUP_MANAGER_NR_GROUPS,
3994 kbase_mem_pool_debugfs_max_size);
Sidath Senanayake823a7602016-06-29 16:03:55 +02003995}
3996
Siddharth Kapoor88d7d982022-03-02 14:51:29 +08003997static ssize_t mem_pool_max_size_store(struct device *dev,
Sidath Senanayake823a7602016-06-29 16:03:55 +02003998 struct device_attribute *attr, const char *buf, size_t count)
3999{
Sidath Senanayakee972f652019-04-10 14:37:00 +02004000 struct kbase_device *const kbdev = to_kbase_device(dev);
Sidath Senanayake823a7602016-06-29 16:03:55 +02004001 int err;
4002
Sidath Senanayake823a7602016-06-29 16:03:55 +02004003 if (!kbdev)
4004 return -ENODEV;
4005
Sidath Senanayakee972f652019-04-10 14:37:00 +02004006 err = kbase_debugfs_helper_set_attr_from_string(buf,
4007 kbdev->mem_pools.small, MEMORY_GROUP_MANAGER_NR_GROUPS,
4008 kbase_mem_pool_debugfs_set_max_size);
Sidath Senanayake823a7602016-06-29 16:03:55 +02004009
Sidath Senanayakee972f652019-04-10 14:37:00 +02004010 return err ? err : count;
Sidath Senanayake823a7602016-06-29 16:03:55 +02004011}
4012
Siddharth Kapoor88d7d982022-03-02 14:51:29 +08004013static DEVICE_ATTR_RW(mem_pool_max_size);
Sidath Senanayake823a7602016-06-29 16:03:55 +02004014
Sidath Senanayakec19c6272017-09-19 18:23:58 +02004015/**
Siddharth Kapoor88d7d982022-03-02 14:51:29 +08004016 * lp_mem_pool_size_show - Show size of the large memory pages pool.
Sidath Senanayakec19c6272017-09-19 18:23:58 +02004017 * @dev: The device this sysfs file is for.
4018 * @attr: The attributes of the sysfs file.
4019 * @buf: The output buffer to receive the pool size.
4020 *
4021 * This function is called to get the number of large memory pages which currently populate the kbdev pool.
4022 *
4023 * Return: The number of bytes output to @buf.
4024 */
Siddharth Kapoor88d7d982022-03-02 14:51:29 +08004025static ssize_t lp_mem_pool_size_show(struct device *dev,
Sidath Senanayakec19c6272017-09-19 18:23:58 +02004026 struct device_attribute *attr, char * const buf)
4027{
Sidath Senanayakee972f652019-04-10 14:37:00 +02004028 struct kbase_device *const kbdev = to_kbase_device(dev);
Sidath Senanayakec19c6272017-09-19 18:23:58 +02004029
Sidath Senanayakec19c6272017-09-19 18:23:58 +02004030 if (!kbdev)
4031 return -ENODEV;
4032
Sidath Senanayakee972f652019-04-10 14:37:00 +02004033 return kbase_debugfs_helper_get_attr_to_string(buf, PAGE_SIZE,
4034 kbdev->mem_pools.large, MEMORY_GROUP_MANAGER_NR_GROUPS,
4035 kbase_mem_pool_debugfs_size);
Sidath Senanayakec19c6272017-09-19 18:23:58 +02004036}
4037
4038/**
Siddharth Kapoor88d7d982022-03-02 14:51:29 +08004039 * lp_mem_pool_size_store - Set size of the large memory pages pool.
Sidath Senanayakec19c6272017-09-19 18:23:58 +02004040 * @dev: The device this sysfs file is for.
4041 * @attr: The attributes of the sysfs file.
4042 * @buf: The value written to the sysfs file.
4043 * @count: The number of bytes written to the sysfs file.
4044 *
4045 * This function is called to set the number of large memory pages which should populate the kbdev pool.
4046 * This may cause existing pages to be removed from the pool, or new pages to be created and then added to the pool.
4047 *
4048 * Return: @count if the function succeeded. An error code on failure.
4049 */
Siddharth Kapoor88d7d982022-03-02 14:51:29 +08004050static ssize_t lp_mem_pool_size_store(struct device *dev,
Sidath Senanayakec19c6272017-09-19 18:23:58 +02004051 struct device_attribute *attr, const char *buf, size_t count)
4052{
Sidath Senanayakee972f652019-04-10 14:37:00 +02004053 struct kbase_device *const kbdev = to_kbase_device(dev);
Sidath Senanayakec19c6272017-09-19 18:23:58 +02004054 int err;
4055
Sidath Senanayakec19c6272017-09-19 18:23:58 +02004056 if (!kbdev)
4057 return -ENODEV;
4058
Sidath Senanayakee972f652019-04-10 14:37:00 +02004059 err = kbase_debugfs_helper_set_attr_from_string(buf,
4060 kbdev->mem_pools.large, MEMORY_GROUP_MANAGER_NR_GROUPS,
4061 kbase_mem_pool_debugfs_trim);
Sidath Senanayakec19c6272017-09-19 18:23:58 +02004062
Sidath Senanayakee972f652019-04-10 14:37:00 +02004063 return err ? err : count;
Sidath Senanayakec19c6272017-09-19 18:23:58 +02004064}
4065
Siddharth Kapoor88d7d982022-03-02 14:51:29 +08004066static DEVICE_ATTR_RW(lp_mem_pool_size);
Sidath Senanayakec19c6272017-09-19 18:23:58 +02004067
4068/**
Siddharth Kapoor88d7d982022-03-02 14:51:29 +08004069 * lp_mem_pool_max_size_show - Show maximum size of the large memory pages pool.
Sidath Senanayakec19c6272017-09-19 18:23:58 +02004070 * @dev: The device this sysfs file is for.
4071 * @attr: The attributes of the sysfs file.
4072 * @buf: The output buffer to receive the pool size.
4073 *
4074 * This function is called to get the maximum number of large memory pages that the kbdev pool can possibly contain.
4075 *
4076 * Return: The number of bytes output to @buf.
4077 */
Siddharth Kapoor88d7d982022-03-02 14:51:29 +08004078static ssize_t lp_mem_pool_max_size_show(struct device *dev,
Sidath Senanayakec19c6272017-09-19 18:23:58 +02004079 struct device_attribute *attr, char * const buf)
4080{
Sidath Senanayakee972f652019-04-10 14:37:00 +02004081 struct kbase_device *const kbdev = to_kbase_device(dev);
Sidath Senanayakec19c6272017-09-19 18:23:58 +02004082
Sidath Senanayakec19c6272017-09-19 18:23:58 +02004083 if (!kbdev)
4084 return -ENODEV;
4085
Sidath Senanayakee972f652019-04-10 14:37:00 +02004086 return kbase_debugfs_helper_get_attr_to_string(buf, PAGE_SIZE,
4087 kbdev->mem_pools.large, MEMORY_GROUP_MANAGER_NR_GROUPS,
4088 kbase_mem_pool_debugfs_max_size);
Sidath Senanayakec19c6272017-09-19 18:23:58 +02004089}
4090
4091/**
Siddharth Kapoor88d7d982022-03-02 14:51:29 +08004092 * lp_mem_pool_max_size_store - Set maximum size of the large memory pages pool.
Sidath Senanayakec19c6272017-09-19 18:23:58 +02004093 * @dev: The device this sysfs file is for.
4094 * @attr: The attributes of the sysfs file.
4095 * @buf: The value written to the sysfs file.
4096 * @count: The number of bytes written to the sysfs file.
4097 *
4098 * This function is called to set the maximum number of large memory pages that the kbdev pool can possibly contain.
4099 *
4100 * Return: @count if the function succeeded. An error code on failure.
4101 */
Siddharth Kapoor88d7d982022-03-02 14:51:29 +08004102static ssize_t lp_mem_pool_max_size_store(struct device *dev,
Sidath Senanayakec19c6272017-09-19 18:23:58 +02004103 struct device_attribute *attr, const char *buf, size_t count)
4104{
Sidath Senanayakee972f652019-04-10 14:37:00 +02004105 struct kbase_device *const kbdev = to_kbase_device(dev);
Sidath Senanayakec19c6272017-09-19 18:23:58 +02004106 int err;
4107
Sidath Senanayakec19c6272017-09-19 18:23:58 +02004108 if (!kbdev)
4109 return -ENODEV;
4110
Sidath Senanayakee972f652019-04-10 14:37:00 +02004111 err = kbase_debugfs_helper_set_attr_from_string(buf,
4112 kbdev->mem_pools.large, MEMORY_GROUP_MANAGER_NR_GROUPS,
4113 kbase_mem_pool_debugfs_set_max_size);
Sidath Senanayakec19c6272017-09-19 18:23:58 +02004114
Sidath Senanayakee972f652019-04-10 14:37:00 +02004115 return err ? err : count;
Sidath Senanayakec19c6272017-09-19 18:23:58 +02004116}
4117
Siddharth Kapoor88d7d982022-03-02 14:51:29 +08004118static DEVICE_ATTR_RW(lp_mem_pool_max_size);
Sidath Senanayakec19c6272017-09-19 18:23:58 +02004119
Sidath Senanayake201c8bf2021-01-29 14:51:21 +00004120/**
4121 * show_simplified_mem_pool_max_size - Show the maximum size for the memory
4122 * pool 0 of small (4KiB) pages.
4123 * @dev: The device this sysfs file is for.
4124 * @attr: The attributes of the sysfs file.
4125 * @buf: The output buffer to receive the max size.
4126 *
4127 * This function is called to get the maximum size for the memory pool 0 of
4128 * small (4KiB) pages. It is assumed that the maximum size value is same for
4129 * all the pools.
4130 *
4131 * Return: The number of bytes output to @buf.
4132 */
4133static ssize_t show_simplified_mem_pool_max_size(struct device *dev,
4134 struct device_attribute *attr, char * const buf)
4135{
4136 struct kbase_device *const kbdev = to_kbase_device(dev);
4137
4138 if (!kbdev)
4139 return -ENODEV;
4140
4141 return kbase_debugfs_helper_get_attr_to_string(buf, PAGE_SIZE,
4142 kbdev->mem_pools.small, 1, kbase_mem_pool_debugfs_max_size);
4143}
4144
4145/**
4146 * set_simplified_mem_pool_max_size - Set the same maximum size for all the
4147 * memory pools of small (4KiB) pages.
4148 * @dev: The device with sysfs file is for
4149 * @attr: The attributes of the sysfs file
4150 * @buf: The value written to the sysfs file
4151 * @count: The number of bytes written to the sysfs file
4152 *
4153 * This function is called to set the same maximum size for all the memory
4154 * pools of small (4KiB) pages.
4155 *
4156 * Return: The number of bytes output to @buf.
4157 */
4158static ssize_t set_simplified_mem_pool_max_size(struct device *dev,
4159 struct device_attribute *attr, const char *buf, size_t count)
4160{
4161 struct kbase_device *const kbdev = to_kbase_device(dev);
4162 unsigned long new_size;
4163 int gid;
4164 int err;
4165
4166 if (!kbdev)
4167 return -ENODEV;
4168
4169 err = kstrtoul(buf, 0, &new_size);
4170 if (err)
4171 return -EINVAL;
4172
4173 for (gid = 0; gid < MEMORY_GROUP_MANAGER_NR_GROUPS; ++gid)
4174 kbase_mem_pool_debugfs_set_max_size(
4175 kbdev->mem_pools.small, gid, (size_t)new_size);
4176
4177 return count;
4178}
4179
4180static DEVICE_ATTR(max_size, 0600, show_simplified_mem_pool_max_size,
4181 set_simplified_mem_pool_max_size);
4182
4183/**
4184 * show_simplified_lp_mem_pool_max_size - Show the maximum size for the memory
4185 * pool 0 of large (2MiB) pages.
4186 * @dev: The device this sysfs file is for.
4187 * @attr: The attributes of the sysfs file.
4188 * @buf: The output buffer to receive the total current pool size.
4189 *
4190 * This function is called to get the maximum size for the memory pool 0 of
4191 * large (2MiB) pages. It is assumed that the maximum size value is same for
4192 * all the pools.
4193 *
4194 * Return: The number of bytes output to @buf.
4195 */
4196static ssize_t show_simplified_lp_mem_pool_max_size(struct device *dev,
4197 struct device_attribute *attr, char * const buf)
4198{
4199 struct kbase_device *const kbdev = to_kbase_device(dev);
4200
4201 if (!kbdev)
4202 return -ENODEV;
4203
4204 return kbase_debugfs_helper_get_attr_to_string(buf, PAGE_SIZE,
4205 kbdev->mem_pools.large, 1, kbase_mem_pool_debugfs_max_size);
4206}
4207
4208/**
4209 * set_simplified_lp_mem_pool_max_size - Set the same maximum size for all the
4210 * memory pools of large (2MiB) pages.
4211 * @dev: The device with sysfs file is for
4212 * @attr: The attributes of the sysfs file
4213 * @buf: The value written to the sysfs file
4214 * @count: The number of bytes written to the sysfs file
4215 *
4216 * This function is called to set the same maximum size for all the memory
4217 * pools of large (2MiB) pages.
4218 *
4219 * Return: The number of bytes output to @buf.
4220 */
4221static ssize_t set_simplified_lp_mem_pool_max_size(struct device *dev,
4222 struct device_attribute *attr, const char *buf, size_t count)
4223{
4224 struct kbase_device *const kbdev = to_kbase_device(dev);
4225 unsigned long new_size;
4226 int gid;
4227 int err;
4228
4229 if (!kbdev)
4230 return -ENODEV;
4231
4232 err = kstrtoul(buf, 0, &new_size);
4233 if (err)
4234 return -EINVAL;
4235
4236 for (gid = 0; gid < MEMORY_GROUP_MANAGER_NR_GROUPS; ++gid)
4237 kbase_mem_pool_debugfs_set_max_size(
4238 kbdev->mem_pools.large, gid, (size_t)new_size);
4239
4240 return count;
4241}
4242
4243static DEVICE_ATTR(lp_max_size, 0600, show_simplified_lp_mem_pool_max_size,
4244 set_simplified_lp_mem_pool_max_size);
4245
4246/**
4247 * show_simplified_ctx_default_max_size - Show the default maximum size for the
4248 * memory pool 0 of small (4KiB) pages.
4249 * @dev: The device this sysfs file is for.
4250 * @attr: The attributes of the sysfs file.
4251 * @buf: The output buffer to receive the pool size.
4252 *
4253 * This function is called to get the default ctx maximum size for the memory
4254 * pool 0 of small (4KiB) pages. It is assumed that maximum size value is same
4255 * for all the pools. The maximum size for the pool of large (2MiB) pages will
4256 * be same as max size of the pool of small (4KiB) pages in terms of bytes.
4257 *
4258 * Return: The number of bytes output to @buf.
4259 */
4260static ssize_t show_simplified_ctx_default_max_size(struct device *dev,
4261 struct device_attribute *attr, char * const buf)
4262{
4263 struct kbase_device *kbdev = to_kbase_device(dev);
4264 size_t max_size;
4265
4266 if (!kbdev)
4267 return -ENODEV;
4268
4269 max_size = kbase_mem_pool_config_debugfs_max_size(
4270 kbdev->mem_pool_defaults.small, 0);
4271
4272 return scnprintf(buf, PAGE_SIZE, "%zu\n", max_size);
4273}
4274
4275/**
4276 * set_simplified_ctx_default_max_size - Set the same default maximum size for
4277 * all the pools created for new
4278 * contexts. This covers the pool of
4279 * large pages as well and its max size
4280 * will be same as max size of the pool
4281 * of small pages in terms of bytes.
4282 * @dev: The device this sysfs file is for.
4283 * @attr: The attributes of the sysfs file.
4284 * @buf: The value written to the sysfs file.
4285 * @count: The number of bytes written to the sysfs file.
4286 *
4287 * This function is called to set the same maximum size for all pools created
4288 * for new contexts.
4289 *
4290 * Return: @count if the function succeeded. An error code on failure.
4291 */
4292static ssize_t set_simplified_ctx_default_max_size(struct device *dev,
4293 struct device_attribute *attr, const char *buf, size_t count)
4294{
4295 struct kbase_device *kbdev;
4296 unsigned long new_size;
4297 int err;
4298
4299 kbdev = to_kbase_device(dev);
4300 if (!kbdev)
4301 return -ENODEV;
4302
4303 err = kstrtoul(buf, 0, &new_size);
4304 if (err)
4305 return -EINVAL;
4306
4307 kbase_mem_pool_group_config_set_max_size(
4308 &kbdev->mem_pool_defaults, (size_t)new_size);
4309
4310 return count;
4311}
4312
4313static DEVICE_ATTR(ctx_default_max_size, 0600,
4314 show_simplified_ctx_default_max_size,
4315 set_simplified_ctx_default_max_size);
4316
Sidath Senanayake72f24572020-10-27 11:38:49 +00004317#if !MALI_USE_CSF
Sidath Senanayake8946bcd2018-03-19 13:26:23 +01004318/**
Siddharth Kapoor88d7d982022-03-02 14:51:29 +08004319 * js_ctx_scheduling_mode_show - Show callback for js_ctx_scheduling_mode sysfs
Sidath Senanayake8946bcd2018-03-19 13:26:23 +01004320 * entry.
4321 * @dev: The device this sysfs file is for.
4322 * @attr: The attributes of the sysfs file.
4323 * @buf: The output buffer to receive the context scheduling mode information.
4324 *
4325 * This function is called to get the context scheduling mode being used by JS.
4326 *
4327 * Return: The number of bytes output to @buf.
4328 */
Siddharth Kapoor88d7d982022-03-02 14:51:29 +08004329static ssize_t js_ctx_scheduling_mode_show(struct device *dev,
Sidath Senanayake8946bcd2018-03-19 13:26:23 +01004330 struct device_attribute *attr, char * const buf)
4331{
4332 struct kbase_device *kbdev;
4333
4334 kbdev = to_kbase_device(dev);
4335 if (!kbdev)
4336 return -ENODEV;
4337
4338 return scnprintf(buf, PAGE_SIZE, "%u\n", kbdev->js_ctx_scheduling_mode);
4339}
4340
4341/**
Siddharth Kapoor88d7d982022-03-02 14:51:29 +08004342 * js_ctx_scheduling_mode_store - Set callback for js_ctx_scheduling_mode sysfs
Sidath Senanayake8946bcd2018-03-19 13:26:23 +01004343 * entry.
4344 * @dev: The device this sysfs file is for.
4345 * @attr: The attributes of the sysfs file.
4346 * @buf: The value written to the sysfs file.
4347 * @count: The number of bytes written to the sysfs file.
4348 *
4349 * This function is called when the js_ctx_scheduling_mode sysfs file is written
4350 * to. It checks the data written, and if valid updates the ctx scheduling mode
4351 * being by JS.
4352 *
4353 * Return: @count if the function succeeded. An error code on failure.
4354 */
Siddharth Kapoor88d7d982022-03-02 14:51:29 +08004355static ssize_t js_ctx_scheduling_mode_store(struct device *dev,
Sidath Senanayake8946bcd2018-03-19 13:26:23 +01004356 struct device_attribute *attr, const char *buf, size_t count)
4357{
Sidath Senanayakeac90f0d2019-05-06 12:21:44 +02004358 struct kbase_context *kctx;
Sidath Senanayake8946bcd2018-03-19 13:26:23 +01004359 u32 new_js_ctx_scheduling_mode;
4360 struct kbase_device *kbdev;
4361 unsigned long flags;
4362 int ret;
4363
4364 kbdev = to_kbase_device(dev);
4365 if (!kbdev)
4366 return -ENODEV;
4367
4368 ret = kstrtouint(buf, 0, &new_js_ctx_scheduling_mode);
4369 if (ret || new_js_ctx_scheduling_mode >= KBASE_JS_PRIORITY_MODE_COUNT) {
4370 dev_err(kbdev->dev, "Couldn't process js_ctx_scheduling_mode"
4371 " write operation.\n"
4372 "Use format <js_ctx_scheduling_mode>\n");
4373 return -EINVAL;
4374 }
4375
4376 if (new_js_ctx_scheduling_mode == kbdev->js_ctx_scheduling_mode)
4377 return count;
4378
4379 mutex_lock(&kbdev->kctx_list_lock);
4380 spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
4381
4382 /* Update the context priority mode */
4383 kbdev->js_ctx_scheduling_mode = new_js_ctx_scheduling_mode;
4384
4385 /* Adjust priority of all the contexts as per the new mode */
Sidath Senanayakeac90f0d2019-05-06 12:21:44 +02004386 list_for_each_entry(kctx, &kbdev->kctx_list, kctx_list_link)
4387 kbase_js_update_ctx_priority(kctx);
Sidath Senanayake8946bcd2018-03-19 13:26:23 +01004388
4389 spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
4390 mutex_unlock(&kbdev->kctx_list_lock);
4391
4392 dev_dbg(kbdev->dev, "JS ctx scheduling mode: %u\n", new_js_ctx_scheduling_mode);
4393
4394 return count;
4395}
4396
Siddharth Kapoor88d7d982022-03-02 14:51:29 +08004397static DEVICE_ATTR_RW(js_ctx_scheduling_mode);
Sidath Senanayake228451e2019-06-27 14:37:54 +02004398
Sidath Senanayake5f133f22016-11-22 10:00:32 +01004399/* Number of entries in serialize_jobs_settings[] */
4400#define NR_SERIALIZE_JOBS_SETTINGS 5
4401/* Maximum string length in serialize_jobs_settings[].name */
4402#define MAX_SERIALIZE_JOBS_NAME_LEN 16
4403
4404static struct
4405{
4406 char *name;
4407 u8 setting;
4408} serialize_jobs_settings[NR_SERIALIZE_JOBS_SETTINGS] = {
4409 {"none", 0},
4410 {"intra-slot", KBASE_SERIALIZE_INTRA_SLOT},
4411 {"inter-slot", KBASE_SERIALIZE_INTER_SLOT},
4412 {"full", KBASE_SERIALIZE_INTRA_SLOT | KBASE_SERIALIZE_INTER_SLOT},
4413 {"full-reset", KBASE_SERIALIZE_INTRA_SLOT | KBASE_SERIALIZE_INTER_SLOT |
4414 KBASE_SERIALIZE_RESET}
4415};
4416
4417/**
Sidath Senanayaked4ca6eb2020-09-11 16:44:12 +01004418 * update_serialize_jobs_setting - Update the serialization setting for the
4419 * submission of GPU jobs.
4420 *
Sidath Senanayaked4ca6eb2020-09-11 16:44:12 +01004421 * @kbdev: An instance of the GPU platform device, allocated from the probe
4422 * method of the driver.
4423 * @buf: Buffer containing the value written to the sysfs/debugfs file.
4424 * @count: The number of bytes to write to the sysfs/debugfs file.
4425 *
Siddharth Kapoor88d7d982022-03-02 14:51:29 +08004426 * This function is called when the serialize_jobs sysfs/debugfs file is
4427 * written to. It matches the requested setting against the available settings
4428 * and if a matching setting is found updates kbdev->serialize_jobs.
4429 *
Sidath Senanayaked4ca6eb2020-09-11 16:44:12 +01004430 * Return: @count if the function succeeded. An error code on failure.
4431 */
4432static ssize_t update_serialize_jobs_setting(struct kbase_device *kbdev,
4433 const char *buf, size_t count)
4434{
4435 int i;
4436 bool valid = false;
4437
4438 for (i = 0; i < NR_SERIALIZE_JOBS_SETTINGS; i++) {
4439 if (sysfs_streq(serialize_jobs_settings[i].name, buf)) {
4440 kbdev->serialize_jobs =
4441 serialize_jobs_settings[i].setting;
4442 valid = true;
4443 break;
4444 }
4445 }
4446
4447 if (!valid) {
4448 dev_err(kbdev->dev, "serialize_jobs: invalid setting");
4449 return -EINVAL;
4450 }
4451
4452 return count;
4453}
4454
Sidath Senanayake2bfaaa52021-06-17 17:58:22 +01004455#if IS_ENABLED(CONFIG_DEBUG_FS)
Sidath Senanayaked4ca6eb2020-09-11 16:44:12 +01004456/**
4457 * kbasep_serialize_jobs_seq_debugfs_show - Show callback for the serialize_jobs
4458 * debugfs file
Sidath Senanayake5f133f22016-11-22 10:00:32 +01004459 * @sfile: seq_file pointer
4460 * @data: Private callback data
4461 *
4462 * This function is called to get the contents of the serialize_jobs debugfs
4463 * file. This is a list of the available settings with the currently active one
4464 * surrounded by square brackets.
4465 *
4466 * Return: 0 on success, or an error code on error
4467 */
Sidath Senanayaked4ca6eb2020-09-11 16:44:12 +01004468static int kbasep_serialize_jobs_seq_debugfs_show(struct seq_file *sfile,
4469 void *data)
Sidath Senanayake5f133f22016-11-22 10:00:32 +01004470{
4471 struct kbase_device *kbdev = sfile->private;
4472 int i;
4473
4474 CSTD_UNUSED(data);
4475
4476 for (i = 0; i < NR_SERIALIZE_JOBS_SETTINGS; i++) {
4477 if (kbdev->serialize_jobs == serialize_jobs_settings[i].setting)
4478 seq_printf(sfile, "[%s] ",
4479 serialize_jobs_settings[i].name);
4480 else
4481 seq_printf(sfile, "%s ",
4482 serialize_jobs_settings[i].name);
4483 }
4484
4485 seq_puts(sfile, "\n");
4486
4487 return 0;
4488}
4489
4490/**
Sidath Senanayake44e8be92017-01-24 10:48:35 +01004491 * kbasep_serialize_jobs_debugfs_write - Store callback for the serialize_jobs
4492 * debugfs file.
Sidath Senanayake5f133f22016-11-22 10:00:32 +01004493 * @file: File pointer
4494 * @ubuf: User buffer containing data to store
4495 * @count: Number of bytes in user buffer
4496 * @ppos: File position
4497 *
4498 * This function is called when the serialize_jobs debugfs file is written to.
4499 * It matches the requested setting against the available settings and if a
4500 * matching setting is found updates kbdev->serialize_jobs.
4501 *
Sidath Senanayake44e8be92017-01-24 10:48:35 +01004502 * Return: @count if the function succeeded. An error code on failure.
Sidath Senanayake5f133f22016-11-22 10:00:32 +01004503 */
4504static ssize_t kbasep_serialize_jobs_debugfs_write(struct file *file,
4505 const char __user *ubuf, size_t count, loff_t *ppos)
4506{
4507 struct seq_file *s = file->private_data;
4508 struct kbase_device *kbdev = s->private;
4509 char buf[MAX_SERIALIZE_JOBS_NAME_LEN];
Sidath Senanayake5f133f22016-11-22 10:00:32 +01004510
4511 CSTD_UNUSED(ppos);
4512
4513 count = min_t(size_t, sizeof(buf) - 1, count);
4514 if (copy_from_user(buf, ubuf, count))
4515 return -EFAULT;
4516
4517 buf[count] = 0;
4518
Sidath Senanayaked4ca6eb2020-09-11 16:44:12 +01004519 return update_serialize_jobs_setting(kbdev, buf, count);
Sidath Senanayake5f133f22016-11-22 10:00:32 +01004520}
4521
4522/**
Sidath Senanayake44e8be92017-01-24 10:48:35 +01004523 * kbasep_serialize_jobs_debugfs_open - Open callback for the serialize_jobs
Sidath Senanayake5f133f22016-11-22 10:00:32 +01004524 * debugfs file
4525 * @in: inode pointer
4526 * @file: file pointer
4527 *
4528 * Return: Zero on success, error code on failure
4529 */
4530static int kbasep_serialize_jobs_debugfs_open(struct inode *in,
4531 struct file *file)
4532{
Sidath Senanayaked4ca6eb2020-09-11 16:44:12 +01004533 return single_open(file, kbasep_serialize_jobs_seq_debugfs_show,
4534 in->i_private);
Sidath Senanayake5f133f22016-11-22 10:00:32 +01004535}
4536
4537static const struct file_operations kbasep_serialize_jobs_debugfs_fops = {
Sidath Senanayake228451e2019-06-27 14:37:54 +02004538 .owner = THIS_MODULE,
Sidath Senanayake5f133f22016-11-22 10:00:32 +01004539 .open = kbasep_serialize_jobs_debugfs_open,
4540 .read = seq_read,
4541 .write = kbasep_serialize_jobs_debugfs_write,
4542 .llseek = seq_lseek,
4543 .release = single_release,
4544};
4545
4546#endif /* CONFIG_DEBUG_FS */
Sidath Senanayaked4ca6eb2020-09-11 16:44:12 +01004547
4548/**
4549 * show_serialize_jobs_sysfs - Show callback for serialize_jobs sysfs file.
4550 *
Sidath Senanayaked4ca6eb2020-09-11 16:44:12 +01004551 * @dev: The device this sysfs file is for
4552 * @attr: The attributes of the sysfs file
4553 * @buf: The output buffer for the sysfs file contents
4554 *
Siddharth Kapoor88d7d982022-03-02 14:51:29 +08004555 * This function is called to get the contents of the serialize_jobs sysfs
4556 * file. This is a list of the available settings with the currently active
4557 * one surrounded by square brackets.
4558 *
Sidath Senanayaked4ca6eb2020-09-11 16:44:12 +01004559 * Return: The number of bytes output to @buf.
4560 */
4561static ssize_t show_serialize_jobs_sysfs(struct device *dev,
4562 struct device_attribute *attr,
4563 char *buf)
4564{
4565 struct kbase_device *kbdev = to_kbase_device(dev);
4566 ssize_t ret = 0;
4567 int i;
4568
4569 for (i = 0; i < NR_SERIALIZE_JOBS_SETTINGS; i++) {
4570 if (kbdev->serialize_jobs ==
4571 serialize_jobs_settings[i].setting)
4572 ret += scnprintf(buf + ret, PAGE_SIZE - ret, "[%s]",
4573 serialize_jobs_settings[i].name);
4574 else
4575 ret += scnprintf(buf + ret, PAGE_SIZE - ret, "%s ",
4576 serialize_jobs_settings[i].name);
4577 }
4578
4579 if (ret < PAGE_SIZE - 1) {
4580 ret += scnprintf(buf + ret, PAGE_SIZE - ret, "\n");
4581 } else {
4582 buf[PAGE_SIZE - 2] = '\n';
4583 buf[PAGE_SIZE - 1] = '\0';
4584 ret = PAGE_SIZE - 1;
4585 }
4586
4587 return ret;
4588}
4589
4590/**
4591 * store_serialize_jobs_sysfs - Store callback for serialize_jobs sysfs file.
4592 *
Sidath Senanayaked4ca6eb2020-09-11 16:44:12 +01004593 * @dev: The device this sysfs file is for
4594 * @attr: The attributes of the sysfs file
4595 * @buf: The value written to the sysfs file
4596 * @count: The number of bytes to write to the sysfs file
4597 *
Siddharth Kapoor88d7d982022-03-02 14:51:29 +08004598 * This function is called when the serialize_jobs sysfs file is written to.
4599 * It matches the requested setting against the available settings and if a
4600 * matching setting is found updates kbdev->serialize_jobs.
4601 *
Sidath Senanayaked4ca6eb2020-09-11 16:44:12 +01004602 * Return: @count if the function succeeded. An error code on failure.
4603 */
4604static ssize_t store_serialize_jobs_sysfs(struct device *dev,
4605 struct device_attribute *attr,
4606 const char *buf, size_t count)
4607{
4608 return update_serialize_jobs_setting(to_kbase_device(dev), buf, count);
4609}
4610
4611static DEVICE_ATTR(serialize_jobs, 0600, show_serialize_jobs_sysfs,
4612 store_serialize_jobs_sysfs);
Sidath Senanayake72f24572020-10-27 11:38:49 +00004613#endif /* !MALI_USE_CSF */
Sidath Senanayake823a7602016-06-29 16:03:55 +02004614
Sidath Senanayakea9704312018-12-06 09:09:59 +01004615static void kbasep_protected_mode_hwcnt_disable_worker(struct work_struct *data)
4616{
4617 struct kbase_device *kbdev = container_of(data, struct kbase_device,
4618 protected_mode_hwcnt_disable_work);
Sidath Senanayakefca86132021-06-15 13:39:30 +01004619 spinlock_t *backend_lock;
Sidath Senanayakea9704312018-12-06 09:09:59 +01004620 unsigned long flags;
4621
4622 bool do_disable;
4623
Sidath Senanayakefca86132021-06-15 13:39:30 +01004624#if MALI_USE_CSF
4625 backend_lock = &kbdev->csf.scheduler.interrupt_lock;
4626#else
4627 backend_lock = &kbdev->hwaccess_lock;
4628#endif
4629
4630 spin_lock_irqsave(backend_lock, flags);
Sidath Senanayakea9704312018-12-06 09:09:59 +01004631 do_disable = !kbdev->protected_mode_hwcnt_desired &&
4632 !kbdev->protected_mode_hwcnt_disabled;
Sidath Senanayakefca86132021-06-15 13:39:30 +01004633 spin_unlock_irqrestore(backend_lock, flags);
Sidath Senanayakea9704312018-12-06 09:09:59 +01004634
4635 if (!do_disable)
4636 return;
4637
4638 kbase_hwcnt_context_disable(kbdev->hwcnt_gpu_ctx);
4639
Sidath Senanayakefca86132021-06-15 13:39:30 +01004640 spin_lock_irqsave(backend_lock, flags);
Sidath Senanayakea9704312018-12-06 09:09:59 +01004641 do_disable = !kbdev->protected_mode_hwcnt_desired &&
4642 !kbdev->protected_mode_hwcnt_disabled;
4643
4644 if (do_disable) {
4645 /* Protected mode state did not change while we were doing the
4646 * disable, so commit the work we just performed and continue
4647 * the state machine.
4648 */
4649 kbdev->protected_mode_hwcnt_disabled = true;
Sidath Senanayake72f24572020-10-27 11:38:49 +00004650#if !MALI_USE_CSF
Sidath Senanayakea9704312018-12-06 09:09:59 +01004651 kbase_backend_slot_update(kbdev);
Sidath Senanayake72f24572020-10-27 11:38:49 +00004652#endif /* !MALI_USE_CSF */
Sidath Senanayakea9704312018-12-06 09:09:59 +01004653 } else {
4654 /* Protected mode state was updated while we were doing the
4655 * disable, so we need to undo the disable we just performed.
4656 */
4657 kbase_hwcnt_context_enable(kbdev->hwcnt_gpu_ctx);
4658 }
4659
Sidath Senanayakefca86132021-06-15 13:39:30 +01004660 spin_unlock_irqrestore(backend_lock, flags);
Sidath Senanayakea9704312018-12-06 09:09:59 +01004661}
4662
Petri Gyntherc9155822020-12-10 14:55:40 -08004663#ifndef PLATFORM_PROTECTED_CALLBACKS
Sidath Senanayake7ed9a0b2020-02-03 12:15:52 +01004664static int kbasep_protected_mode_enable(struct protected_mode_device *pdev)
Sidath Senanayake823a7602016-06-29 16:03:55 +02004665{
Sidath Senanayake7ed9a0b2020-02-03 12:15:52 +01004666 struct kbase_device *kbdev = pdev->data;
Sidath Senanayake192bd792016-11-09 14:14:45 +01004667
Sidath Senanayake7ed9a0b2020-02-03 12:15:52 +01004668 return kbase_pm_protected_mode_enable(kbdev);
4669}
Sidath Senanayake192bd792016-11-09 14:14:45 +01004670
Sidath Senanayake7ed9a0b2020-02-03 12:15:52 +01004671static int kbasep_protected_mode_disable(struct protected_mode_device *pdev)
4672{
4673 struct kbase_device *kbdev = pdev->data;
Sidath Senanayake6f5ab3b2017-05-18 14:43:17 +02004674
Sidath Senanayake7ed9a0b2020-02-03 12:15:52 +01004675 return kbase_pm_protected_mode_disable(kbdev);
4676}
Sidath Senanayake6f5ab3b2017-05-18 14:43:17 +02004677
Sidath Senanayake7ed9a0b2020-02-03 12:15:52 +01004678static const struct protected_mode_ops kbasep_native_protected_ops = {
4679 .protected_mode_enable = kbasep_protected_mode_enable,
4680 .protected_mode_disable = kbasep_protected_mode_disable
4681};
Sidath Senanayake6f5ab3b2017-05-18 14:43:17 +02004682
Sidath Senanayake6e032ea2020-08-28 18:41:57 +01004683#define PLATFORM_PROTECTED_CALLBACKS (&kbasep_native_protected_ops)
4684#endif /* PLATFORM_PROTECTED_CALLBACKS */
4685
Sidath Senanayake7ed9a0b2020-02-03 12:15:52 +01004686int kbase_protected_mode_init(struct kbase_device *kbdev)
4687{
4688 /* Use native protected ops */
4689 kbdev->protected_dev = kzalloc(sizeof(*kbdev->protected_dev),
4690 GFP_KERNEL);
4691 if (!kbdev->protected_dev)
4692 return -ENOMEM;
4693 kbdev->protected_dev->data = kbdev;
Sidath Senanayake6e032ea2020-08-28 18:41:57 +01004694 kbdev->protected_ops = PLATFORM_PROTECTED_CALLBACKS;
Sidath Senanayake7ed9a0b2020-02-03 12:15:52 +01004695 INIT_WORK(&kbdev->protected_mode_hwcnt_disable_work,
4696 kbasep_protected_mode_hwcnt_disable_worker);
4697 kbdev->protected_mode_hwcnt_desired = true;
4698 kbdev->protected_mode_hwcnt_disabled = false;
Sidath Senanayake6f5ab3b2017-05-18 14:43:17 +02004699 return 0;
4700}
4701
Sidath Senanayake7ed9a0b2020-02-03 12:15:52 +01004702void kbase_protected_mode_term(struct kbase_device *kbdev)
Sidath Senanayake6f5ab3b2017-05-18 14:43:17 +02004703{
Sidath Senanayake7ed9a0b2020-02-03 12:15:52 +01004704 cancel_work_sync(&kbdev->protected_mode_hwcnt_disable_work);
4705 kfree(kbdev->protected_dev);
Sidath Senanayake823a7602016-06-29 16:03:55 +02004706}
4707
Siddharth Kapoor0207d6c2022-01-07 19:09:01 +08004708#if IS_ENABLED(CONFIG_MALI_NO_MALI)
4709static int kbase_common_reg_map(struct kbase_device *kbdev)
4710{
4711 return 0;
4712}
4713static void kbase_common_reg_unmap(struct kbase_device * const kbdev)
4714{
4715}
Debarshi Dutta20fff722023-06-02 13:36:22 +00004716#else /* !IS_ENABLED(CONFIG_MALI_NO_MALI) */
Sidath Senanayake823a7602016-06-29 16:03:55 +02004717static int kbase_common_reg_map(struct kbase_device *kbdev)
4718{
Sidath Senanayakec19c6272017-09-19 18:23:58 +02004719 int err = 0;
Sidath Senanayake823a7602016-06-29 16:03:55 +02004720
4721 if (!request_mem_region(kbdev->reg_start, kbdev->reg_size, dev_name(kbdev->dev))) {
4722 dev_err(kbdev->dev, "Register window unavailable\n");
4723 err = -EIO;
4724 goto out_region;
4725 }
4726
4727 kbdev->reg = ioremap(kbdev->reg_start, kbdev->reg_size);
4728 if (!kbdev->reg) {
4729 dev_err(kbdev->dev, "Can't remap register window\n");
4730 err = -EINVAL;
4731 goto out_ioremap;
4732 }
4733
Sidath Senanayakec19c6272017-09-19 18:23:58 +02004734 return err;
Sidath Senanayake823a7602016-06-29 16:03:55 +02004735
Sidath Senanayake7ed9a0b2020-02-03 12:15:52 +01004736out_ioremap:
Sidath Senanayake823a7602016-06-29 16:03:55 +02004737 release_mem_region(kbdev->reg_start, kbdev->reg_size);
Sidath Senanayake7ed9a0b2020-02-03 12:15:52 +01004738out_region:
Sidath Senanayake823a7602016-06-29 16:03:55 +02004739 return err;
4740}
4741
4742static void kbase_common_reg_unmap(struct kbase_device * const kbdev)
4743{
4744 if (kbdev->reg) {
4745 iounmap(kbdev->reg);
4746 release_mem_region(kbdev->reg_start, kbdev->reg_size);
4747 kbdev->reg = NULL;
4748 kbdev->reg_start = 0;
4749 kbdev->reg_size = 0;
4750 }
4751}
Debarshi Dutta20fff722023-06-02 13:36:22 +00004752#endif /* !IS_ENABLED(CONFIG_MALI_NO_MALI) */
Sidath Senanayake823a7602016-06-29 16:03:55 +02004753
Sidath Senanayake7ed9a0b2020-02-03 12:15:52 +01004754int registers_map(struct kbase_device * const kbdev)
Sidath Senanayake823a7602016-06-29 16:03:55 +02004755{
Sidath Senanayake7ed9a0b2020-02-03 12:15:52 +01004756 /* the first memory resource is the physical address of the GPU
4757 * registers.
4758 */
4759 struct platform_device *pdev = to_platform_device(kbdev->dev);
4760 struct resource *reg_res;
4761 int err;
Sidath Senanayake823a7602016-06-29 16:03:55 +02004762
Sidath Senanayake7ed9a0b2020-02-03 12:15:52 +01004763 reg_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
4764 if (!reg_res) {
4765 dev_err(kbdev->dev, "Invalid register resource\n");
4766 return -ENOENT;
4767 }
Sidath Senanayake823a7602016-06-29 16:03:55 +02004768
Sidath Senanayake7ed9a0b2020-02-03 12:15:52 +01004769 kbdev->reg_start = reg_res->start;
4770 kbdev->reg_size = resource_size(reg_res);
Sidath Senanayake823a7602016-06-29 16:03:55 +02004771
Sidath Senanayake72f24572020-10-27 11:38:49 +00004772#if MALI_USE_CSF
4773 if (kbdev->reg_size <
4774 (CSF_HW_DOORBELL_PAGE_OFFSET +
4775 CSF_NUM_DOORBELL * CSF_HW_DOORBELL_PAGE_SIZE)) {
4776 dev_err(kbdev->dev, "Insufficient register space, will override to the required size\n");
4777 kbdev->reg_size = CSF_HW_DOORBELL_PAGE_OFFSET +
4778 CSF_NUM_DOORBELL * CSF_HW_DOORBELL_PAGE_SIZE;
4779 }
4780#endif
Sidath Senanayakef32af5a2018-07-31 15:28:14 +02004781
Sidath Senanayake7ed9a0b2020-02-03 12:15:52 +01004782 err = kbase_common_reg_map(kbdev);
4783 if (err) {
4784 dev_err(kbdev->dev, "Failed to map registers\n");
4785 return err;
4786 }
Sidath Senanayake823a7602016-06-29 16:03:55 +02004787
4788 return 0;
4789}
4790
Sidath Senanayake7ed9a0b2020-02-03 12:15:52 +01004791void registers_unmap(struct kbase_device *kbdev)
Sidath Senanayake823a7602016-06-29 16:03:55 +02004792{
4793 kbase_common_reg_unmap(kbdev);
4794}
4795
Sidath Senanayakebc3c01e2020-06-18 09:26:13 +02004796#if defined(CONFIG_MALI_ARBITER_SUPPORT) && defined(CONFIG_OF)
4797
4798static bool kbase_is_pm_enabled(const struct device_node *gpu_node)
4799{
4800 const struct device_node *power_model_node;
4801 const void *cooling_cells_node;
4802 const void *operating_point_node;
4803 bool is_pm_enable = false;
4804
Jörg Wagnerdacf0042023-08-01 13:38:22 +00004805 power_model_node = of_get_child_by_name(gpu_node, "power-model");
4806 if (!power_model_node)
4807 power_model_node = of_get_child_by_name(gpu_node, "power_model");
4808
Sidath Senanayakebc3c01e2020-06-18 09:26:13 +02004809 if (power_model_node)
4810 is_pm_enable = true;
4811
4812 cooling_cells_node = of_get_property(gpu_node,
4813 "#cooling-cells", NULL);
4814 if (cooling_cells_node)
4815 is_pm_enable = true;
4816
4817 operating_point_node = of_get_property(gpu_node,
4818 "operating-points", NULL);
4819 if (operating_point_node)
4820 is_pm_enable = true;
4821
4822 return is_pm_enable;
4823}
4824
4825static bool kbase_is_pv_enabled(const struct device_node *gpu_node)
4826{
4827 const void *arbiter_if_node;
4828
Jörg Wagnerdacf0042023-08-01 13:38:22 +00004829 arbiter_if_node = of_get_property(gpu_node, "arbiter-if", NULL);
4830 if (!arbiter_if_node)
4831 arbiter_if_node = of_get_property(gpu_node, "arbiter_if", NULL);
Sidath Senanayakebc3c01e2020-06-18 09:26:13 +02004832
4833 return arbiter_if_node ? true : false;
4834}
4835
4836static bool kbase_is_full_coherency_enabled(const struct device_node *gpu_node)
4837{
4838 const void *coherency_dts;
4839 u32 coherency;
4840
4841 coherency_dts = of_get_property(gpu_node,
4842 "system-coherency",
4843 NULL);
4844 if (coherency_dts) {
4845 coherency = be32_to_cpup(coherency_dts);
4846 if (coherency == COHERENCY_ACE)
4847 return true;
4848 }
4849 return false;
4850}
4851
4852#endif /* CONFIG_MALI_ARBITER_SUPPORT && CONFIG_OF */
4853
4854int kbase_device_pm_init(struct kbase_device *kbdev)
4855{
4856 int err = 0;
4857
4858#if defined(CONFIG_MALI_ARBITER_SUPPORT) && defined(CONFIG_OF)
4859
4860 u32 gpu_id;
4861 u32 product_id;
4862 u32 gpu_model_id;
4863
4864 if (kbase_is_pv_enabled(kbdev->dev->of_node)) {
Sidath Senanayakefca86132021-06-15 13:39:30 +01004865 dev_info(kbdev->dev, "Arbitration interface enabled\n");
Sidath Senanayakebc3c01e2020-06-18 09:26:13 +02004866 if (kbase_is_pm_enabled(kbdev->dev->of_node)) {
4867 /* Arbitration AND power management invalid */
4868 dev_err(kbdev->dev, "Invalid combination of arbitration AND power management\n");
4869 return -EPERM;
4870 }
4871 if (kbase_is_full_coherency_enabled(kbdev->dev->of_node)) {
4872 /* Arbitration AND full coherency invalid */
4873 dev_err(kbdev->dev, "Invalid combination of arbitration AND full coherency\n");
4874 return -EPERM;
4875 }
4876 err = kbase_arbiter_pm_early_init(kbdev);
4877 if (err == 0) {
4878 /* Check if Arbitration is running on
4879 * supported GPU platform
4880 */
4881 kbase_pm_register_access_enable(kbdev);
4882 gpu_id = kbase_reg_read(kbdev, GPU_CONTROL_REG(GPU_ID));
4883 kbase_pm_register_access_disable(kbdev);
Siddharth Kapoor88d7d982022-03-02 14:51:29 +08004884 product_id =
4885 KBASE_UBFX32(gpu_id, KBASE_GPU_ID_VERSION_PRODUCT_ID_SHIFT, 16);
Sidath Senanayakebc3c01e2020-06-18 09:26:13 +02004886 gpu_model_id = GPU_ID2_MODEL_MATCH_VALUE(product_id);
4887
4888 if (gpu_model_id != GPU_ID2_PRODUCT_TGOX
Sidath Senanayakefca86132021-06-15 13:39:30 +01004889 && gpu_model_id != GPU_ID2_PRODUCT_TNOX
4890 && gpu_model_id != GPU_ID2_PRODUCT_TBAX) {
Sidath Senanayakebc3c01e2020-06-18 09:26:13 +02004891 kbase_arbiter_pm_early_term(kbdev);
4892 dev_err(kbdev->dev, "GPU platform not suitable for arbitration\n");
4893 return -EPERM;
4894 }
4895 }
4896 } else {
Sidath Senanayake97483052021-01-29 15:03:53 +00004897 kbdev->arb.arb_if = NULL;
4898 kbdev->arb.arb_dev = NULL;
Sidath Senanayakebc3c01e2020-06-18 09:26:13 +02004899 err = power_control_init(kbdev);
4900 }
4901#else
4902 err = power_control_init(kbdev);
4903#endif /* CONFIG_MALI_ARBITER_SUPPORT && CONFIG_OF */
4904 return err;
4905}
4906
4907void kbase_device_pm_term(struct kbase_device *kbdev)
4908{
4909#ifdef CONFIG_MALI_ARBITER_SUPPORT
Sidath Senanayake2bfaaa52021-06-17 17:58:22 +01004910#if IS_ENABLED(CONFIG_OF)
Sidath Senanayakebc3c01e2020-06-18 09:26:13 +02004911 if (kbase_is_pv_enabled(kbdev->dev->of_node))
4912 kbase_arbiter_pm_early_term(kbdev);
4913 else
4914 power_control_term(kbdev);
4915#endif /* CONFIG_OF */
4916#else
4917 power_control_term(kbdev);
4918#endif
4919}
4920
Sidath Senanayake7ed9a0b2020-02-03 12:15:52 +01004921int power_control_init(struct kbase_device *kbdev)
Sidath Senanayake823a7602016-06-29 16:03:55 +02004922{
Sidath Senanayake97483052021-01-29 15:03:53 +00004923#ifndef CONFIG_OF
Sidath Senanayake228451e2019-06-27 14:37:54 +02004924 /* Power control initialization requires at least the capability to get
4925 * regulators and clocks from the device tree, as well as parsing
4926 * arrays of unsigned integer values.
4927 *
4928 * The whole initialization process shall simply be skipped if the
4929 * minimum capability is not available.
4930 */
4931 return 0;
4932#else
Sidath Senanayake7ed9a0b2020-02-03 12:15:52 +01004933 struct platform_device *pdev;
Sidath Senanayake823a7602016-06-29 16:03:55 +02004934 int err = 0;
Sidath Senanayake228451e2019-06-27 14:37:54 +02004935 unsigned int i;
4936#if defined(CONFIG_REGULATOR)
Siddharth Kapoor88d7d982022-03-02 14:51:29 +08004937 static const char * const regulator_names[] = {
Sidath Senanayake228451e2019-06-27 14:37:54 +02004938 "mali", "shadercores"
4939 };
4940 BUILD_BUG_ON(ARRAY_SIZE(regulator_names) < BASE_MAX_NR_CLOCKS_REGULATORS);
4941#endif /* CONFIG_REGULATOR */
Sidath Senanayake823a7602016-06-29 16:03:55 +02004942
4943 if (!kbdev)
4944 return -ENODEV;
4945
Sidath Senanayake7ed9a0b2020-02-03 12:15:52 +01004946 pdev = to_platform_device(kbdev->dev);
4947
Sidath Senanayake228451e2019-06-27 14:37:54 +02004948#if defined(CONFIG_REGULATOR)
4949 /* Since the error code EPROBE_DEFER causes the entire probing
4950 * procedure to be restarted from scratch at a later time,
4951 * all regulators will be released before returning.
4952 *
4953 * Any other error is ignored and the driver will continue
4954 * operating with a partial initialization of regulators.
4955 */
4956 for (i = 0; i < BASE_MAX_NR_CLOCKS_REGULATORS; i++) {
4957 kbdev->regulators[i] = regulator_get_optional(kbdev->dev,
4958 regulator_names[i]);
Debarshi Dutta20fff722023-06-02 13:36:22 +00004959 if (IS_ERR(kbdev->regulators[i])) {
Sidath Senanayake228451e2019-06-27 14:37:54 +02004960 err = PTR_ERR(kbdev->regulators[i]);
4961 kbdev->regulators[i] = NULL;
4962 break;
Sidath Senanayake823a7602016-06-29 16:03:55 +02004963 }
Sidath Senanayake823a7602016-06-29 16:03:55 +02004964 }
Sidath Senanayake228451e2019-06-27 14:37:54 +02004965 if (err == -EPROBE_DEFER) {
Jack Divere19249e2022-11-07 12:13:47 +00004966 while (i > 0)
Sidath Senanayake228451e2019-06-27 14:37:54 +02004967 regulator_put(kbdev->regulators[--i]);
4968 return err;
4969 }
Sidath Senanayake823a7602016-06-29 16:03:55 +02004970
Sidath Senanayake228451e2019-06-27 14:37:54 +02004971 kbdev->nr_regulators = i;
4972 dev_dbg(&pdev->dev, "Regulators probed: %u\n", kbdev->nr_regulators);
4973#endif
4974
4975 /* Having more clocks than regulators is acceptable, while the
4976 * opposite shall not happen.
4977 *
4978 * Since the error code EPROBE_DEFER causes the entire probing
4979 * procedure to be restarted from scratch at a later time,
4980 * all clocks and regulators will be released before returning.
4981 *
4982 * Any other error is ignored and the driver will continue
4983 * operating with a partial initialization of clocks.
4984 */
4985 for (i = 0; i < BASE_MAX_NR_CLOCKS_REGULATORS; i++) {
4986 kbdev->clocks[i] = of_clk_get(kbdev->dev->of_node, i);
Debarshi Dutta20fff722023-06-02 13:36:22 +00004987 if (IS_ERR(kbdev->clocks[i])) {
Sidath Senanayake228451e2019-06-27 14:37:54 +02004988 err = PTR_ERR(kbdev->clocks[i]);
4989 kbdev->clocks[i] = NULL;
4990 break;
Sidath Senanayake823a7602016-06-29 16:03:55 +02004991 }
Sidath Senanayake228451e2019-06-27 14:37:54 +02004992
4993 err = clk_prepare_enable(kbdev->clocks[i]);
Sidath Senanayake823a7602016-06-29 16:03:55 +02004994 if (err) {
4995 dev_err(kbdev->dev,
4996 "Failed to prepare and enable clock (%d)\n",
4997 err);
Sidath Senanayake228451e2019-06-27 14:37:54 +02004998 clk_put(kbdev->clocks[i]);
4999 break;
Sidath Senanayake823a7602016-06-29 16:03:55 +02005000 }
5001 }
Sidath Senanayake228451e2019-06-27 14:37:54 +02005002 if (err == -EPROBE_DEFER) {
Jack Divere19249e2022-11-07 12:13:47 +00005003 while (i > 0) {
Sidath Senanayake228451e2019-06-27 14:37:54 +02005004 clk_disable_unprepare(kbdev->clocks[--i]);
5005 clk_put(kbdev->clocks[i]);
5006 }
5007 goto clocks_probe_defer;
5008 }
Sidath Senanayake823a7602016-06-29 16:03:55 +02005009
Sidath Senanayake228451e2019-06-27 14:37:54 +02005010 kbdev->nr_clocks = i;
5011 dev_dbg(&pdev->dev, "Clocks probed: %u\n", kbdev->nr_clocks);
5012
5013 /* Any error in parsing the OPP table from the device file
5014 * shall be ignored. The fact that the table may be absent or wrong
5015 * on the device tree of the platform shouldn't prevent the driver
5016 * from completing its initialization.
5017 */
Sidath Senanayake228451e2019-06-27 14:37:54 +02005018#if defined(CONFIG_PM_OPP)
Debarshi Dutta20fff722023-06-02 13:36:22 +00005019#if defined(CONFIG_REGULATOR)
5020#if (KERNEL_VERSION(6, 0, 0) <= LINUX_VERSION_CODE)
5021 if (kbdev->nr_regulators > 0) {
5022 kbdev->token = dev_pm_opp_set_regulators(kbdev->dev, regulator_names);
5023
5024 if (kbdev->token < 0) {
5025 err = kbdev->token;
5026 goto regulators_probe_defer;
5027 }
5028
5029 }
5030#elif (KERNEL_VERSION(4, 10, 0) <= LINUX_VERSION_CODE)
Sidath Senanayake228451e2019-06-27 14:37:54 +02005031 if (kbdev->nr_regulators > 0) {
5032 kbdev->opp_table = dev_pm_opp_set_regulators(kbdev->dev,
5033 regulator_names, BASE_MAX_NR_CLOCKS_REGULATORS);
Jack Divere19249e2022-11-07 12:13:47 +00005034
Debarshi Dutta20fff722023-06-02 13:36:22 +00005035 if (IS_ERR(kbdev->opp_table)) {
Jack Divere19249e2022-11-07 12:13:47 +00005036 err = PTR_ERR(kbdev->opp_table);
5037 goto regulators_probe_defer;
5038 }
Sidath Senanayake228451e2019-06-27 14:37:54 +02005039 }
Debarshi Dutta20fff722023-06-02 13:36:22 +00005040#endif /* (KERNEL_VERSION(6, 0, 0) <= LINUX_VERSION_CODE) */
5041#endif /* CONFIG_REGULATOR */
Sidath Senanayake228451e2019-06-27 14:37:54 +02005042 err = dev_pm_opp_of_add_table(kbdev->dev);
5043 CSTD_UNUSED(err);
5044#endif /* CONFIG_PM_OPP */
Sidath Senanayake823a7602016-06-29 16:03:55 +02005045 return 0;
5046
Jack Divere19249e2022-11-07 12:13:47 +00005047#if defined(CONFIG_PM_OPP) && \
5048 ((KERNEL_VERSION(4, 10, 0) <= LINUX_VERSION_CODE) && defined(CONFIG_REGULATOR))
5049regulators_probe_defer:
5050 for (i = 0; i < BASE_MAX_NR_CLOCKS_REGULATORS; i++) {
5051 if (kbdev->clocks[i]) {
5052 if (__clk_is_enabled(kbdev->clocks[i]))
5053 clk_disable_unprepare(kbdev->clocks[i]);
5054 clk_put(kbdev->clocks[i]);
5055 kbdev->clocks[i] = NULL;
5056 } else
5057 break;
5058 }
5059#endif
5060
Sidath Senanayake228451e2019-06-27 14:37:54 +02005061clocks_probe_defer:
5062#if defined(CONFIG_REGULATOR)
5063 for (i = 0; i < BASE_MAX_NR_CLOCKS_REGULATORS; i++)
5064 regulator_put(kbdev->regulators[i]);
Sidath Senanayake823a7602016-06-29 16:03:55 +02005065#endif
Sidath Senanayake823a7602016-06-29 16:03:55 +02005066 return err;
Sidath Senanayake97483052021-01-29 15:03:53 +00005067#endif /* CONFIG_OF */
Sidath Senanayake823a7602016-06-29 16:03:55 +02005068}
5069
Sidath Senanayake7ed9a0b2020-02-03 12:15:52 +01005070void power_control_term(struct kbase_device *kbdev)
Sidath Senanayake823a7602016-06-29 16:03:55 +02005071{
Sidath Senanayake228451e2019-06-27 14:37:54 +02005072 unsigned int i;
5073
Sidath Senanayake228451e2019-06-27 14:37:54 +02005074#if defined(CONFIG_PM_OPP)
5075 dev_pm_opp_of_remove_table(kbdev->dev);
Debarshi Dutta20fff722023-06-02 13:36:22 +00005076#if defined(CONFIG_REGULATOR)
5077#if (KERNEL_VERSION(6, 0, 0) <= LINUX_VERSION_CODE)
5078 if (kbdev->token > -EPERM)
5079 dev_pm_opp_put_regulators(kbdev->token);
5080#elif (KERNEL_VERSION(4, 10, 0) <= LINUX_VERSION_CODE)
Sidath Senanayake228451e2019-06-27 14:37:54 +02005081 if (!IS_ERR_OR_NULL(kbdev->opp_table))
5082 dev_pm_opp_put_regulators(kbdev->opp_table);
Debarshi Dutta20fff722023-06-02 13:36:22 +00005083#endif /* (KERNEL_VERSION(6, 0, 0) <= LINUX_VERSION_CODE) */
5084#endif /* CONFIG_REGULATOR */
Sidath Senanayake228451e2019-06-27 14:37:54 +02005085#endif /* CONFIG_PM_OPP */
5086
Sidath Senanayake228451e2019-06-27 14:37:54 +02005087 for (i = 0; i < BASE_MAX_NR_CLOCKS_REGULATORS; i++) {
5088 if (kbdev->clocks[i]) {
5089 if (__clk_is_enabled(kbdev->clocks[i]))
5090 clk_disable_unprepare(kbdev->clocks[i]);
5091 clk_put(kbdev->clocks[i]);
5092 kbdev->clocks[i] = NULL;
5093 } else
5094 break;
Sidath Senanayake823a7602016-06-29 16:03:55 +02005095 }
5096
Sidath Senanayake97483052021-01-29 15:03:53 +00005097#if defined(CONFIG_OF) && defined(CONFIG_REGULATOR)
Sidath Senanayake228451e2019-06-27 14:37:54 +02005098 for (i = 0; i < BASE_MAX_NR_CLOCKS_REGULATORS; i++) {
5099 if (kbdev->regulators[i]) {
5100 regulator_put(kbdev->regulators[i]);
5101 kbdev->regulators[i] = NULL;
5102 }
Sidath Senanayake823a7602016-06-29 16:03:55 +02005103 }
Sidath Senanayake97483052021-01-29 15:03:53 +00005104#endif
Sidath Senanayake823a7602016-06-29 16:03:55 +02005105}
5106
Sidath Senanayake2bfaaa52021-06-17 17:58:22 +01005107#if IS_ENABLED(CONFIG_DEBUG_FS)
Sidath Senanayake823a7602016-06-29 16:03:55 +02005108
Sidath Senanayakeb2b17642020-02-27 15:37:17 +01005109static void trigger_reset(struct kbase_device *kbdev)
Sidath Senanayake823a7602016-06-29 16:03:55 +02005110{
5111 kbase_pm_context_active(kbdev);
Sidath Senanayakefca86132021-06-15 13:39:30 +01005112 if (kbase_prepare_to_reset_gpu(kbdev, RESET_FLAGS_NONE))
Sidath Senanayake823a7602016-06-29 16:03:55 +02005113 kbase_reset_gpu(kbdev);
5114 kbase_pm_context_idle(kbdev);
5115}
5116
5117#define MAKE_QUIRK_ACCESSORS(type) \
5118static int type##_quirks_set(void *data, u64 val) \
5119{ \
5120 struct kbase_device *kbdev; \
5121 kbdev = (struct kbase_device *)data; \
5122 kbdev->hw_quirks_##type = (u32)val; \
Sidath Senanayakeb2b17642020-02-27 15:37:17 +01005123 trigger_reset(kbdev); \
Jack Divere19249e2022-11-07 12:13:47 +00005124 return 0; \
Sidath Senanayake823a7602016-06-29 16:03:55 +02005125} \
5126\
5127static int type##_quirks_get(void *data, u64 *val) \
5128{ \
Jack Divere19249e2022-11-07 12:13:47 +00005129 struct kbase_device *kbdev; \
5130 kbdev = (struct kbase_device *)data; \
5131 *val = kbdev->hw_quirks_##type; \
5132 return 0; \
Sidath Senanayake823a7602016-06-29 16:03:55 +02005133} \
Jack Divere19249e2022-11-07 12:13:47 +00005134DEFINE_DEBUGFS_ATTRIBUTE(fops_##type##_quirks, type##_quirks_get, \
5135 type##_quirks_set, "%llu\n")
Sidath Senanayake823a7602016-06-29 16:03:55 +02005136
5137MAKE_QUIRK_ACCESSORS(sc);
5138MAKE_QUIRK_ACCESSORS(tiler);
5139MAKE_QUIRK_ACCESSORS(mmu);
Sidath Senanayakefca86132021-06-15 13:39:30 +01005140MAKE_QUIRK_ACCESSORS(gpu);
Sidath Senanayake823a7602016-06-29 16:03:55 +02005141
Jesse Hall0c596dc2021-11-23 14:38:46 -08005142/**
5143 * kbase_device_debugfs_reset_write() - Reset the GPU
5144 *
5145 * @data: Pointer to the Kbase device.
5146 * @wait_for_reset: Value written to the file.
5147 *
5148 * This function will perform the GPU reset, and if the value written to
5149 * the file is 1 it will also wait for the reset to complete.
5150 *
5151 * Return: 0 in case of no error otherwise a negative value.
5152 */
5153static int kbase_device_debugfs_reset_write(void *data, u64 wait_for_reset)
Sidath Senanayakeb2b17642020-02-27 15:37:17 +01005154{
Jesse Hall0c596dc2021-11-23 14:38:46 -08005155 struct kbase_device *kbdev = data;
Sidath Senanayakeb2b17642020-02-27 15:37:17 +01005156
5157 trigger_reset(kbdev);
5158
Jesse Hall0c596dc2021-11-23 14:38:46 -08005159 if (wait_for_reset == 1)
5160 return kbase_reset_gpu_wait(kbdev);
5161
5162 return 0;
Sidath Senanayakeb2b17642020-02-27 15:37:17 +01005163}
5164
Jack Divere19249e2022-11-07 12:13:47 +00005165DEFINE_DEBUGFS_ATTRIBUTE(fops_trigger_reset, NULL, &kbase_device_debugfs_reset_write, "%llu\n");
Sidath Senanayake823a7602016-06-29 16:03:55 +02005166
Sidath Senanayake92327782016-11-09 14:53:08 +01005167/**
Varad Gautam96dd6b02023-03-24 12:46:23 +00005168 * kbase_device_debugfs_trigger_uevent_write - send a GPU uevent
5169 * @file: File object to write to
5170 * @ubuf: User buffer to read data from
5171 * @count: Length of user buffer
5172 * @ppos: Offset within file object
5173 *
5174 * Return: bytes read.
5175 */
5176static ssize_t kbase_device_debugfs_trigger_uevent_write(struct file *file,
5177 const char __user *ubuf, size_t count, loff_t *ppos)
5178{
5179 struct kbase_device *kbdev = (struct kbase_device *)file->private_data;
5180 struct gpu_uevent evt = { 0 };
5181 char str[8] = { 0 };
5182
5183 if (count >= sizeof(str))
5184 return -EINVAL;
5185
5186 if (copy_from_user(str, ubuf, count))
5187 return -EINVAL;
5188
5189 str[count] = '\0';
5190
5191 if (sscanf(str, "%u %u", &evt.type, &evt.info) != 2)
5192 return -EINVAL;
5193
5194 pixel_gpu_uevent_send(kbdev, (const struct gpu_uevent *) &evt);
5195
5196 return count;
5197}
5198
5199static const struct file_operations fops_trigger_uevent = {
5200 .owner = THIS_MODULE,
5201 .open = simple_open,
5202 .write = kbase_device_debugfs_trigger_uevent_write,
5203 .llseek = default_llseek,
5204};
5205
5206/**
Sidath Senanayake92327782016-11-09 14:53:08 +01005207 * debugfs_protected_debug_mode_read - "protected_debug_mode" debugfs read
5208 * @file: File object to read is for
5209 * @buf: User buffer to populate with data
5210 * @len: Length of user buffer
5211 * @ppos: Offset within file object
5212 *
5213 * Retrieves the current status of protected debug mode
5214 * (0 = disabled, 1 = enabled)
5215 *
5216 * Return: Number of bytes added to user buffer
5217 */
5218static ssize_t debugfs_protected_debug_mode_read(struct file *file,
5219 char __user *buf, size_t len, loff_t *ppos)
5220{
5221 struct kbase_device *kbdev = (struct kbase_device *)file->private_data;
5222 u32 gpu_status;
5223 ssize_t ret_val;
5224
5225 kbase_pm_context_active(kbdev);
Sidath Senanayakef32af5a2018-07-31 15:28:14 +02005226 gpu_status = kbase_reg_read(kbdev, GPU_CONTROL_REG(GPU_STATUS));
Sidath Senanayake92327782016-11-09 14:53:08 +01005227 kbase_pm_context_idle(kbdev);
5228
5229 if (gpu_status & GPU_DBGEN)
5230 ret_val = simple_read_from_buffer(buf, len, ppos, "1\n", 2);
5231 else
5232 ret_val = simple_read_from_buffer(buf, len, ppos, "0\n", 2);
5233
5234 return ret_val;
5235}
5236
5237/*
5238 * struct fops_protected_debug_mode - "protected_debug_mode" debugfs fops
5239 *
5240 * Contains the file operations for the "protected_debug_mode" debugfs file
5241 */
5242static const struct file_operations fops_protected_debug_mode = {
Sidath Senanayake228451e2019-06-27 14:37:54 +02005243 .owner = THIS_MODULE,
Sidath Senanayake92327782016-11-09 14:53:08 +01005244 .open = simple_open,
5245 .read = debugfs_protected_debug_mode_read,
5246 .llseek = default_llseek,
5247};
5248
Sidath Senanayakee972f652019-04-10 14:37:00 +02005249static int kbase_device_debugfs_mem_pool_max_size_show(struct seq_file *sfile,
5250 void *data)
5251{
5252 CSTD_UNUSED(data);
5253 return kbase_debugfs_helper_seq_read(sfile,
5254 MEMORY_GROUP_MANAGER_NR_GROUPS,
5255 kbase_mem_pool_config_debugfs_max_size);
5256}
5257
5258static ssize_t kbase_device_debugfs_mem_pool_max_size_write(struct file *file,
5259 const char __user *ubuf, size_t count, loff_t *ppos)
5260{
5261 int err = 0;
5262
5263 CSTD_UNUSED(ppos);
5264 err = kbase_debugfs_helper_seq_write(file, ubuf, count,
5265 MEMORY_GROUP_MANAGER_NR_GROUPS,
5266 kbase_mem_pool_config_debugfs_set_max_size);
5267
5268 return err ? err : count;
5269}
5270
5271static int kbase_device_debugfs_mem_pool_max_size_open(struct inode *in,
5272 struct file *file)
5273{
5274 return single_open(file, kbase_device_debugfs_mem_pool_max_size_show,
5275 in->i_private);
5276}
5277
5278static const struct file_operations
5279 kbase_device_debugfs_mem_pool_max_size_fops = {
5280 .owner = THIS_MODULE,
5281 .open = kbase_device_debugfs_mem_pool_max_size_open,
5282 .read = seq_read,
5283 .write = kbase_device_debugfs_mem_pool_max_size_write,
5284 .llseek = seq_lseek,
5285 .release = single_release,
5286};
5287
Debarshi Dutta20fff722023-06-02 13:36:22 +00005288/**
5289 * debugfs_ctx_defaults_init - Create the default configuration of new contexts in debugfs
5290 * @kbdev: An instance of the GPU platform device, allocated from the probe method of the driver.
5291 * Return: A pointer to the last dentry that it tried to create, whether successful or not.
5292 * Could be NULL or encode another error value.
5293 */
5294static struct dentry *debugfs_ctx_defaults_init(struct kbase_device *const kbdev)
Sidath Senanayake823a7602016-06-29 16:03:55 +02005295{
Sidath Senanayakeb2b17642020-02-27 15:37:17 +01005296 /* prevent unprivileged use of debug file system
5297 * in old kernel version
5298 */
Sidath Senanayakeb2b17642020-02-27 15:37:17 +01005299 const mode_t mode = 0644;
Debarshi Dutta20fff722023-06-02 13:36:22 +00005300 struct dentry *dentry = debugfs_create_dir("defaults", kbdev->debugfs_ctx_directory);
5301 struct dentry *debugfs_ctx_defaults_directory = dentry;
Sidath Senanayake823a7602016-06-29 16:03:55 +02005302
Debarshi Dutta20fff722023-06-02 13:36:22 +00005303 if (IS_ERR_OR_NULL(dentry)) {
5304 dev_err(kbdev->dev, "Couldn't create mali debugfs ctx defaults directory\n");
5305 return dentry;
5306 }
5307
5308 debugfs_create_bool("infinite_cache", mode,
5309 debugfs_ctx_defaults_directory,
5310 &kbdev->infinite_cache_active_default);
5311
5312 dentry = debugfs_create_file("mem_pool_max_size", mode, debugfs_ctx_defaults_directory,
5313 &kbdev->mem_pool_defaults.small,
5314 &kbase_device_debugfs_mem_pool_max_size_fops);
5315 if (IS_ERR_OR_NULL(dentry)) {
5316 dev_err(kbdev->dev, "Unable to create mem_pool_max_size debugfs entry\n");
5317 return dentry;
5318 }
5319
5320 dentry = debugfs_create_file("lp_mem_pool_max_size", mode, debugfs_ctx_defaults_directory,
5321 &kbdev->mem_pool_defaults.large,
5322 &kbase_device_debugfs_mem_pool_max_size_fops);
5323 if (IS_ERR_OR_NULL(dentry))
5324 dev_err(kbdev->dev, "Unable to create lp_mem_pool_max_size debugfs entry\n");
5325
5326 return dentry;
5327}
5328
5329/**
5330 * init_debugfs - Create device-wide debugfs directories and files for the Mali driver
5331 * @kbdev: An instance of the GPU platform device, allocated from the probe method of the driver.
5332 * Return: A pointer to the last dentry that it tried to create, whether successful or not.
5333 * Could be NULL or encode another error value.
5334 */
5335static struct dentry *init_debugfs(struct kbase_device *kbdev)
5336{
5337 struct dentry *dentry = debugfs_create_dir(kbdev->devname, NULL);
5338
5339 kbdev->mali_debugfs_directory = dentry;
5340 if (IS_ERR_OR_NULL(dentry)) {
Sidath Senanayakefca86132021-06-15 13:39:30 +01005341 dev_err(kbdev->dev,
5342 "Couldn't create mali debugfs directory: %s\n",
5343 kbdev->devname);
Debarshi Dutta20fff722023-06-02 13:36:22 +00005344 return dentry;
Sidath Senanayake823a7602016-06-29 16:03:55 +02005345 }
5346
Debarshi Dutta20fff722023-06-02 13:36:22 +00005347 dentry = debugfs_create_dir("ctx", kbdev->mali_debugfs_directory);
5348 kbdev->debugfs_ctx_directory = dentry;
5349 if (IS_ERR_OR_NULL(dentry)) {
Sidath Senanayake823a7602016-06-29 16:03:55 +02005350 dev_err(kbdev->dev, "Couldn't create mali debugfs ctx directory\n");
Debarshi Dutta20fff722023-06-02 13:36:22 +00005351 return dentry;
Sidath Senanayake823a7602016-06-29 16:03:55 +02005352 }
5353
Debarshi Dutta20fff722023-06-02 13:36:22 +00005354 dentry = debugfs_create_dir("instrumentation", kbdev->mali_debugfs_directory);
5355 kbdev->debugfs_instr_directory = dentry;
5356 if (IS_ERR_OR_NULL(dentry)) {
Sidath Senanayake72f24572020-10-27 11:38:49 +00005357 dev_err(kbdev->dev, "Couldn't create mali debugfs instrumentation directory\n");
Debarshi Dutta20fff722023-06-02 13:36:22 +00005358 return dentry;
Sidath Senanayake823a7602016-06-29 16:03:55 +02005359 }
5360
Sidath Senanayake92327782016-11-09 14:53:08 +01005361 kbasep_regs_history_debugfs_init(kbdev);
Sidath Senanayake823a7602016-06-29 16:03:55 +02005362
Debarshi Dutta20fff722023-06-02 13:36:22 +00005363#if MALI_USE_CSF
5364 kbase_debug_csf_fault_debugfs_init(kbdev);
5365#else /* MALI_USE_CSF */
Sidath Senanayake823a7602016-06-29 16:03:55 +02005366 kbase_debug_job_fault_debugfs_init(kbdev);
Sidath Senanayake72f24572020-10-27 11:38:49 +00005367#endif /* !MALI_USE_CSF */
Sidath Senanayake7ed9a0b2020-02-03 12:15:52 +01005368
Sidath Senanayake823a7602016-06-29 16:03:55 +02005369 kbasep_gpu_memory_debugfs_init(kbdev);
Sidath Senanayake192bd792016-11-09 14:14:45 +01005370 kbase_as_fault_debugfs_init(kbdev);
Sidath Senanayake201c8bf2021-01-29 14:51:21 +00005371#ifdef CONFIG_MALI_PRFCNT_SET_SELECT_VIA_DEBUG_FS
Sidath Senanayakeb64f5682020-04-14 14:55:25 +02005372 kbase_instr_backend_debugfs_init(kbdev);
5373#endif
Jesse Hall0c596dc2021-11-23 14:38:46 -08005374 kbase_pbha_debugfs_init(kbdev);
5375
Sidath Senanayake48f35542017-03-31 14:00:22 +02005376 /* fops_* variables created by invocations of macro
Sidath Senanayake97483052021-01-29 15:03:53 +00005377 * MAKE_QUIRK_ACCESSORS() above.
5378 */
Debarshi Dutta20fff722023-06-02 13:36:22 +00005379 dentry = debugfs_create_file("quirks_sc", 0644,
Sidath Senanayake823a7602016-06-29 16:03:55 +02005380 kbdev->mali_debugfs_directory, kbdev,
5381 &fops_sc_quirks);
Debarshi Dutta20fff722023-06-02 13:36:22 +00005382 if (IS_ERR_OR_NULL(dentry)) {
5383 dev_err(kbdev->dev, "Unable to create quirks_sc debugfs entry\n");
5384 return dentry;
Sidath Senanayake92327782016-11-09 14:53:08 +01005385 }
5386
Debarshi Dutta20fff722023-06-02 13:36:22 +00005387 dentry = debugfs_create_file("quirks_tiler", 0644,
5388 kbdev->mali_debugfs_directory, kbdev,
5389 &fops_tiler_quirks);
5390 if (IS_ERR_OR_NULL(dentry)) {
5391 dev_err(kbdev->dev, "Unable to create quirks_tiler debugfs entry\n");
5392 return dentry;
5393 }
5394
5395 dentry = debugfs_create_file("quirks_mmu", 0644,
5396 kbdev->mali_debugfs_directory, kbdev,
5397 &fops_mmu_quirks);
5398 if (IS_ERR_OR_NULL(dentry)) {
5399 dev_err(kbdev->dev, "Unable to create quirks_mmu debugfs entry\n");
5400 return dentry;
5401 }
5402
5403 dentry = debugfs_create_file("quirks_gpu", 0644, kbdev->mali_debugfs_directory,
5404 kbdev, &fops_gpu_quirks);
5405 if (IS_ERR_OR_NULL(dentry)) {
5406 dev_err(kbdev->dev, "Unable to create quirks_gpu debugfs entry\n");
5407 return dentry;
5408 }
5409
5410 dentry = debugfs_ctx_defaults_init(kbdev);
5411 if (IS_ERR_OR_NULL(dentry))
5412 return dentry;
5413
5414 if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_PROTECTED_DEBUG_MODE)) {
5415 dentry = debugfs_create_file("protected_debug_mode", 0444,
5416 kbdev->mali_debugfs_directory, kbdev,
5417 &fops_protected_debug_mode);
5418 if (IS_ERR_OR_NULL(dentry)) {
5419 dev_err(kbdev->dev, "Unable to create protected_debug_mode debugfs entry\n");
5420 return dentry;
5421 }
5422 }
5423
5424 dentry = debugfs_create_file("reset", 0644,
Sidath Senanayakeb2b17642020-02-27 15:37:17 +01005425 kbdev->mali_debugfs_directory, kbdev,
5426 &fops_trigger_reset);
Debarshi Dutta20fff722023-06-02 13:36:22 +00005427 if (IS_ERR_OR_NULL(dentry)) {
5428 dev_err(kbdev->dev, "Unable to create reset debugfs entry\n");
5429 return dentry;
5430 }
Sidath Senanayakeb2b17642020-02-27 15:37:17 +01005431
Varad Gautam96dd6b02023-03-24 12:46:23 +00005432 debugfs_create_file("trigger_uevent", 0644,
5433 kbdev->mali_debugfs_directory, kbdev,
5434 &fops_trigger_uevent);
5435
Sidath Senanayakebc3c01e2020-06-18 09:26:13 +02005436 kbase_ktrace_debugfs_init(kbdev);
Sidath Senanayake823a7602016-06-29 16:03:55 +02005437
Sidath Senanayake48f35542017-03-31 14:00:22 +02005438#ifdef CONFIG_MALI_DEVFREQ
Sidath Senanayake2bfaaa52021-06-17 17:58:22 +01005439#if IS_ENABLED(CONFIG_DEVFREQ_THERMAL)
Sidath Senanayake7ed9a0b2020-02-03 12:15:52 +01005440 if (kbdev->devfreq)
Sidath Senanayake48f35542017-03-31 14:00:22 +02005441 kbase_ipa_debugfs_init(kbdev);
5442#endif /* CONFIG_DEVFREQ_THERMAL */
5443#endif /* CONFIG_MALI_DEVFREQ */
5444
Sidath Senanayake2bfaaa52021-06-17 17:58:22 +01005445#if !MALI_USE_CSF
Debarshi Dutta20fff722023-06-02 13:36:22 +00005446 dentry = debugfs_create_file("serialize_jobs", 0644,
Sidath Senanayake5f133f22016-11-22 10:00:32 +01005447 kbdev->mali_debugfs_directory, kbdev,
5448 &kbasep_serialize_jobs_debugfs_fops);
Debarshi Dutta20fff722023-06-02 13:36:22 +00005449 if (IS_ERR_OR_NULL(dentry)) {
5450 dev_err(kbdev->dev, "Unable to create serialize_jobs debugfs entry\n");
5451 return dentry;
5452 }
Jack Divere19249e2022-11-07 12:13:47 +00005453 kbase_timeline_io_debugfs_init(kbdev);
Sidath Senanayake2bfaaa52021-06-17 17:58:22 +01005454#endif
Sidath Senanayake201c8bf2021-01-29 14:51:21 +00005455 kbase_dvfs_status_debugfs_init(kbdev);
5456
Jack Divere19249e2022-11-07 12:13:47 +00005457
Debarshi Dutta20fff722023-06-02 13:36:22 +00005458 return dentry;
5459}
Sidath Senanayake823a7602016-06-29 16:03:55 +02005460
Debarshi Dutta20fff722023-06-02 13:36:22 +00005461int kbase_device_debugfs_init(struct kbase_device *kbdev)
5462{
5463 struct dentry *dentry = init_debugfs(kbdev);
5464
5465 if (IS_ERR_OR_NULL(dentry)) {
5466 debugfs_remove_recursive(kbdev->mali_debugfs_directory);
5467 return IS_ERR(dentry) ? PTR_ERR(dentry) : -ENOMEM;
5468 }
5469 return 0;
Sidath Senanayake823a7602016-06-29 16:03:55 +02005470}
5471
Sidath Senanayake7ed9a0b2020-02-03 12:15:52 +01005472void kbase_device_debugfs_term(struct kbase_device *kbdev)
Sidath Senanayake823a7602016-06-29 16:03:55 +02005473{
5474 debugfs_remove_recursive(kbdev->mali_debugfs_directory);
5475}
Sidath Senanayake823a7602016-06-29 16:03:55 +02005476#endif /* CONFIG_DEBUG_FS */
5477
Sidath Senanayake7ed9a0b2020-02-03 12:15:52 +01005478int kbase_device_coherency_init(struct kbase_device *kbdev)
Sidath Senanayake823a7602016-06-29 16:03:55 +02005479{
Sidath Senanayake2bfaaa52021-06-17 17:58:22 +01005480#if IS_ENABLED(CONFIG_OF)
Sidath Senanayake823a7602016-06-29 16:03:55 +02005481 u32 supported_coherency_bitmap =
5482 kbdev->gpu_props.props.raw_props.coherency_mode;
5483 const void *coherency_override_dts;
Siddharth Kapoor88d7d982022-03-02 14:51:29 +08005484 bool dma_coherent;
Sidath Senanayake7ed9a0b2020-02-03 12:15:52 +01005485 u32 override_coherency, gpu_id;
5486 unsigned int prod_id;
5487
5488 gpu_id = kbdev->gpu_props.props.raw_props.gpu_id;
5489 gpu_id &= GPU_ID_VERSION_PRODUCT_ID;
Siddharth Kapoor88d7d982022-03-02 14:51:29 +08005490 prod_id = gpu_id >> KBASE_GPU_ID_VERSION_PRODUCT_ID_SHIFT;
Sidath Senanayake44e8be92017-01-24 10:48:35 +01005491
5492 /* Only for tMIx :
5493 * (COHERENCY_ACE_LITE | COHERENCY_ACE) was incorrectly
5494 * documented for tMIx so force correct value here.
5495 */
Sidath Senanayake86966062019-08-23 15:40:27 +02005496 if (GPU_ID2_MODEL_MATCH_VALUE(prod_id) ==
5497 GPU_ID2_PRODUCT_TMIX)
Sidath Senanayake44e8be92017-01-24 10:48:35 +01005498 if (supported_coherency_bitmap ==
5499 COHERENCY_FEATURE_BIT(COHERENCY_ACE))
5500 supported_coherency_bitmap |=
5501 COHERENCY_FEATURE_BIT(COHERENCY_ACE_LITE);
5502
Sidath Senanayake823a7602016-06-29 16:03:55 +02005503#endif /* CONFIG_OF */
5504
5505 kbdev->system_coherency = COHERENCY_NONE;
5506
5507 /* device tree may override the coherency */
Sidath Senanayake2bfaaa52021-06-17 17:58:22 +01005508#if IS_ENABLED(CONFIG_OF)
Siddharth Kapoor88d7d982022-03-02 14:51:29 +08005509 /* treat "dma-coherency" as a synonym for ACE-lite */
5510 dma_coherent = of_dma_is_coherent(kbdev->dev->of_node);
Sidath Senanayake823a7602016-06-29 16:03:55 +02005511 coherency_override_dts = of_get_property(kbdev->dev->of_node,
5512 "system-coherency",
5513 NULL);
Siddharth Kapoor88d7d982022-03-02 14:51:29 +08005514 if (coherency_override_dts || dma_coherent) {
5515 if (coherency_override_dts) {
5516 override_coherency = be32_to_cpup(coherency_override_dts);
5517 if (dma_coherent && override_coherency != COHERENCY_ACE_LITE) {
5518 dev_err(kbdev->dev,
5519 "system-coherency needs to be 0 when dma-coherent is set\n");
5520 return -EINVAL;
5521 }
5522 } else {
5523 /* dma-coherent set and system-coherency not specified */
5524 override_coherency = COHERENCY_ACE_LITE;
5525 }
Sidath Senanayake823a7602016-06-29 16:03:55 +02005526
Sidath Senanayake2bfaaa52021-06-17 17:58:22 +01005527#if MALI_USE_CSF && !IS_ENABLED(CONFIG_MALI_NO_MALI)
Sidath Senanayake8037b532021-04-14 19:14:30 +01005528 /* ACE coherency mode is not supported by Driver on CSF GPUs.
5529 * Return an error to signal the invalid device tree configuration.
5530 */
5531 if (override_coherency == COHERENCY_ACE) {
5532 dev_err(kbdev->dev,
5533 "ACE coherency not supported, wrong DT configuration");
5534 return -EINVAL;
5535 }
5536#endif
5537
Sidath Senanayake823a7602016-06-29 16:03:55 +02005538 if ((override_coherency <= COHERENCY_NONE) &&
5539 (supported_coherency_bitmap &
5540 COHERENCY_FEATURE_BIT(override_coherency))) {
5541
5542 kbdev->system_coherency = override_coherency;
5543
5544 dev_info(kbdev->dev,
5545 "Using coherency mode %u set from dtb",
5546 override_coherency);
5547 } else
5548 dev_warn(kbdev->dev,
5549 "Ignoring unsupported coherency mode %u set from dtb",
5550 override_coherency);
5551 }
5552
5553#endif /* CONFIG_OF */
5554
5555 kbdev->gpu_props.props.raw_props.coherency_mode =
5556 kbdev->system_coherency;
Sidath Senanayake7ed9a0b2020-02-03 12:15:52 +01005557
5558 return 0;
Sidath Senanayake823a7602016-06-29 16:03:55 +02005559}
5560
Sidath Senanayake823a7602016-06-29 16:03:55 +02005561
Sidath Senanayake97483052021-01-29 15:03:53 +00005562#if MALI_USE_CSF
5563/**
5564 * csg_scheduling_period_store - Store callback for the csg_scheduling_period
5565 * sysfs file.
5566 * @dev: The device with sysfs file is for
5567 * @attr: The attributes of the sysfs file
5568 * @buf: The value written to the sysfs file
5569 * @count: The number of bytes written to the sysfs file
5570 *
5571 * This function is called when the csg_scheduling_period sysfs file is written
5572 * to. It checks the data written, and if valid updates the reset timeout.
5573 *
5574 * Return: @count if the function succeeded. An error code on failure.
5575 */
5576static ssize_t csg_scheduling_period_store(struct device *dev,
5577 struct device_attribute *attr,
5578 const char *buf, size_t count)
5579{
5580 struct kbase_device *kbdev;
5581 int ret;
5582 unsigned int csg_scheduling_period;
5583
5584 kbdev = to_kbase_device(dev);
5585 if (!kbdev)
5586 return -ENODEV;
5587
5588 ret = kstrtouint(buf, 0, &csg_scheduling_period);
5589 if (ret || csg_scheduling_period == 0) {
5590 dev_err(kbdev->dev,
5591 "Couldn't process csg_scheduling_period write operation.\n"
5592 "Use format 'csg_scheduling_period_ms', and csg_scheduling_period_ms > 0\n");
5593 return -EINVAL;
5594 }
5595
5596 kbase_csf_scheduler_lock(kbdev);
5597 kbdev->csf.scheduler.csg_scheduling_period_ms = csg_scheduling_period;
5598 dev_dbg(kbdev->dev, "CSG scheduling period: %ums\n",
5599 csg_scheduling_period);
5600 kbase_csf_scheduler_unlock(kbdev);
5601
5602 return count;
5603}
5604
5605/**
5606 * csg_scheduling_period_show - Show callback for the csg_scheduling_period
5607 * sysfs entry.
5608 * @dev: The device this sysfs file is for.
5609 * @attr: The attributes of the sysfs file.
5610 * @buf: The output buffer to receive the GPU information.
5611 *
5612 * This function is called to get the current reset timeout.
5613 *
5614 * Return: The number of bytes output to @buf.
5615 */
5616static ssize_t csg_scheduling_period_show(struct device *dev,
5617 struct device_attribute *attr,
5618 char *const buf)
5619{
5620 struct kbase_device *kbdev;
5621 ssize_t ret;
5622
5623 kbdev = to_kbase_device(dev);
5624 if (!kbdev)
5625 return -ENODEV;
5626
5627 ret = scnprintf(buf, PAGE_SIZE, "%u\n",
5628 kbdev->csf.scheduler.csg_scheduling_period_ms);
5629
5630 return ret;
5631}
5632
Siddharth Kapoor88d7d982022-03-02 14:51:29 +08005633static DEVICE_ATTR_RW(csg_scheduling_period);
Sidath Senanayake97483052021-01-29 15:03:53 +00005634
5635/**
5636 * fw_timeout_store - Store callback for the fw_timeout sysfs file.
5637 * @dev: The device with sysfs file is for
5638 * @attr: The attributes of the sysfs file
5639 * @buf: The value written to the sysfs file
5640 * @count: The number of bytes written to the sysfs file
5641 *
5642 * This function is called when the fw_timeout sysfs file is written to. It
5643 * checks the data written, and if valid updates the reset timeout.
5644 *
5645 * Return: @count if the function succeeded. An error code on failure.
5646 */
5647static ssize_t fw_timeout_store(struct device *dev,
5648 struct device_attribute *attr, const char *buf,
5649 size_t count)
5650{
5651 struct kbase_device *kbdev;
5652 int ret;
5653 unsigned int fw_timeout;
5654
5655 kbdev = to_kbase_device(dev);
5656 if (!kbdev)
5657 return -ENODEV;
5658
5659 ret = kstrtouint(buf, 0, &fw_timeout);
5660 if (ret || fw_timeout == 0) {
Jack Divere19249e2022-11-07 12:13:47 +00005661 dev_err(kbdev->dev,
5662 "Couldn't process fw_timeout write operation.\n"
5663 "Use format 'fw_timeout_ms', and fw_timeout_ms > 0\n"
5664 "Default fw_timeout: %u",
5665 kbase_get_timeout_ms(kbdev, CSF_FIRMWARE_PING_TIMEOUT));
Sidath Senanayake97483052021-01-29 15:03:53 +00005666 return -EINVAL;
5667 }
5668
5669 kbase_csf_scheduler_lock(kbdev);
5670 kbdev->csf.fw_timeout_ms = fw_timeout;
5671 kbase_csf_scheduler_unlock(kbdev);
5672 dev_dbg(kbdev->dev, "Firmware timeout: %ums\n", fw_timeout);
5673
5674 return count;
5675}
5676
5677/**
5678 * fw_timeout_show - Show callback for the firmware timeout sysfs entry.
5679 * @dev: The device this sysfs file is for.
5680 * @attr: The attributes of the sysfs file.
5681 * @buf: The output buffer to receive the GPU information.
5682 *
5683 * This function is called to get the current reset timeout.
5684 *
5685 * Return: The number of bytes output to @buf.
5686 */
5687static ssize_t fw_timeout_show(struct device *dev,
5688 struct device_attribute *attr, char *const buf)
5689{
5690 struct kbase_device *kbdev;
5691 ssize_t ret;
5692
5693 kbdev = to_kbase_device(dev);
5694 if (!kbdev)
5695 return -ENODEV;
5696
5697 ret = scnprintf(buf, PAGE_SIZE, "%u\n", kbdev->csf.fw_timeout_ms);
5698
5699 return ret;
5700}
5701
Siddharth Kapoor88d7d982022-03-02 14:51:29 +08005702static DEVICE_ATTR_RW(fw_timeout);
5703
5704/**
5705 * idle_hysteresis_time_store - Store callback for CSF idle_hysteresis_time
5706 * sysfs file.
5707 * @dev: The device with sysfs file is for
5708 * @attr: The attributes of the sysfs file
5709 * @buf: The value written to the sysfs file
5710 * @count: The number of bytes written to the sysfs file
5711 *
5712 * This function is called when the idle_hysteresis_time sysfs file is
5713 * written to.
5714 *
5715 * This file contains values of the idle hysteresis duration.
5716 *
5717 * Return: @count if the function succeeded. An error code on failure.
5718 */
5719static ssize_t idle_hysteresis_time_store(struct device *dev,
5720 struct device_attribute *attr, const char *buf, size_t count)
5721{
5722 struct kbase_device *kbdev;
5723 u32 dur = 0;
5724
5725 kbdev = to_kbase_device(dev);
5726 if (!kbdev)
5727 return -ENODEV;
5728
5729 if (kstrtou32(buf, 0, &dur)) {
5730 dev_err(kbdev->dev, "Couldn't process idle_hysteresis_time write operation.\n"
5731 "Use format <idle_hysteresis_time>\n");
5732 return -EINVAL;
5733 }
5734
Jörg Wagnere61eb932023-08-31 17:27:24 +00005735 /* In sysFs, The unit of the input value of idle_hysteresis_time is us.
5736 * But the unit of the input parameter of this function is ns, so multiply by 1000
5737 */
5738 kbase_csf_firmware_set_gpu_idle_hysteresis_time(kbdev, dur * NSEC_PER_USEC);
Siddharth Kapoor88d7d982022-03-02 14:51:29 +08005739
5740 return count;
5741}
5742
5743/**
5744 * idle_hysteresis_time_show - Show callback for CSF idle_hysteresis_time
5745 * sysfs entry.
5746 * @dev: The device this sysfs file is for.
5747 * @attr: The attributes of the sysfs file.
5748 * @buf: The output buffer to receive the GPU information.
5749 *
5750 * This function is called to get the current idle hysteresis duration in ms.
5751 *
5752 * Return: The number of bytes output to @buf.
5753 */
5754static ssize_t idle_hysteresis_time_show(struct device *dev,
5755 struct device_attribute *attr, char * const buf)
5756{
5757 struct kbase_device *kbdev;
5758 ssize_t ret;
5759 u32 dur;
5760
5761 kbdev = to_kbase_device(dev);
5762 if (!kbdev)
5763 return -ENODEV;
5764
Jörg Wagnere61eb932023-08-31 17:27:24 +00005765 /* The unit of return value of idle_hysteresis_time_show is us, So divide by 1000.*/
5766 dur = kbase_csf_firmware_get_gpu_idle_hysteresis_time(kbdev) / NSEC_PER_USEC;
Siddharth Kapoor88d7d982022-03-02 14:51:29 +08005767 ret = scnprintf(buf, PAGE_SIZE, "%u\n", dur);
5768
5769 return ret;
5770}
5771
5772static DEVICE_ATTR_RW(idle_hysteresis_time);
Jack Divere19249e2022-11-07 12:13:47 +00005773
5774/**
Jörg Wagnere61eb932023-08-31 17:27:24 +00005775 * idle_hysteresis_time_ns_store - Store callback for CSF
5776 * idle_hysteresis_time_ns sysfs file.
5777 *
5778 * @dev: The device with sysfs file is for
5779 * @attr: The attributes of the sysfs file
5780 * @buf: The value written to the sysfs file
5781 * @count: The number of bytes written to the sysfs file
5782 *
5783 * This function is called when the idle_hysteresis_time_ns sysfs
5784 * file is written to.
5785 *
5786 * This file contains values of the idle hysteresis duration in ns.
5787 *
5788 * Return: @count if the function succeeded. An error code on failure.
5789 */
5790static ssize_t idle_hysteresis_time_ns_store(struct device *dev, struct device_attribute *attr,
5791 const char *buf, size_t count)
5792{
5793 struct kbase_device *kbdev;
5794 u32 dur = 0;
5795
5796 kbdev = to_kbase_device(dev);
5797 if (!kbdev)
5798 return -ENODEV;
5799
5800 if (kstrtou32(buf, 0, &dur)) {
5801 dev_err(kbdev->dev, "Couldn't process idle_hysteresis_time_ns write operation.\n"
5802 "Use format <idle_hysteresis_time_ns>\n");
5803 return -EINVAL;
5804 }
5805
5806 kbase_csf_firmware_set_gpu_idle_hysteresis_time(kbdev, dur);
5807
5808 return count;
5809}
5810
5811/**
5812 * idle_hysteresis_time_ns_show - Show callback for CSF
5813 * idle_hysteresis_time_ns sysfs entry.
5814 *
5815 * @dev: The device this sysfs file is for.
5816 * @attr: The attributes of the sysfs file.
5817 * @buf: The output buffer to receive the GPU information.
5818 *
5819 * This function is called to get the current idle hysteresis duration in ns.
5820 *
5821 * Return: The number of bytes output to @buf.
5822 */
5823static ssize_t idle_hysteresis_time_ns_show(struct device *dev, struct device_attribute *attr,
5824 char *const buf)
5825{
5826 struct kbase_device *kbdev;
5827 ssize_t ret;
5828 u32 dur;
5829
5830 kbdev = to_kbase_device(dev);
5831 if (!kbdev)
5832 return -ENODEV;
5833
5834 dur = kbase_csf_firmware_get_gpu_idle_hysteresis_time(kbdev);
5835 ret = scnprintf(buf, PAGE_SIZE, "%u\n", dur);
5836
5837 return ret;
5838}
5839
5840static DEVICE_ATTR_RW(idle_hysteresis_time_ns);
5841
5842/**
Jack Divere19249e2022-11-07 12:13:47 +00005843 * mcu_shader_pwroff_timeout_show - Get the MCU shader Core power-off time value.
5844 *
5845 * @dev: The device this sysfs file is for.
5846 * @attr: The attributes of the sysfs file.
5847 * @buf: The output buffer for the sysfs file contents
5848 *
5849 * Get the internally recorded MCU shader Core power-off (nominal) timeout value.
5850 * The unit of the value is in micro-seconds.
5851 *
5852 * Return: The number of bytes output to @buf if the
5853 * function succeeded. A Negative value on failure.
5854 */
5855static ssize_t mcu_shader_pwroff_timeout_show(struct device *dev, struct device_attribute *attr,
5856 char *const buf)
5857{
5858 struct kbase_device *kbdev = dev_get_drvdata(dev);
5859 u32 pwroff;
5860
5861 if (!kbdev)
5862 return -ENODEV;
5863
Jörg Wagnere61eb932023-08-31 17:27:24 +00005864 /* The unit of return value of the function is us, So divide by 1000.*/
5865 pwroff = kbase_csf_firmware_get_mcu_core_pwroff_time(kbdev) / NSEC_PER_USEC;
Jack Divere19249e2022-11-07 12:13:47 +00005866 return scnprintf(buf, PAGE_SIZE, "%u\n", pwroff);
5867}
5868
5869/**
5870 * mcu_shader_pwroff_timeout_store - Set the MCU shader core power-off time value.
5871 *
5872 * @dev: The device with sysfs file is for
5873 * @attr: The attributes of the sysfs file
5874 * @buf: The value written to the sysfs file
5875 * @count: The number of bytes to write to the sysfs file
5876 *
5877 * The duration value (unit: micro-seconds) for configuring MCU Shader Core
5878 * timer, when the shader cores' power transitions are delegated to the
5879 * MCU (normal operational mode)
5880 *
5881 * Return: @count if the function succeeded. An error code on failure.
5882 */
5883static ssize_t mcu_shader_pwroff_timeout_store(struct device *dev, struct device_attribute *attr,
5884 const char *buf, size_t count)
5885{
5886 struct kbase_device *kbdev = dev_get_drvdata(dev);
5887 u32 dur;
5888
Jörg Wagnerdacf0042023-08-01 13:38:22 +00005889 const struct kbase_pm_policy *current_policy;
5890 bool always_on;
5891
Jack Divere19249e2022-11-07 12:13:47 +00005892 if (!kbdev)
5893 return -ENODEV;
5894
5895 if (kstrtouint(buf, 0, &dur))
5896 return -EINVAL;
5897
Jörg Wagnerdacf0042023-08-01 13:38:22 +00005898 current_policy = kbase_pm_get_policy(kbdev);
5899 always_on = current_policy == &kbase_pm_always_on_policy_ops;
5900 if (dur == 0 && !always_on)
5901 return -EINVAL;
5902
Jörg Wagnere61eb932023-08-31 17:27:24 +00005903 /* In sysFs, The unit of the input value of mcu_shader_pwroff_timeout is us.
5904 * But the unit of the input parameter of this function is ns, so multiply by 1000
5905 */
5906 kbase_csf_firmware_set_mcu_core_pwroff_time(kbdev, dur * NSEC_PER_USEC);
Jack Diverc3053352022-09-02 11:38:04 +00005907
5908 return count;
5909}
5910
5911static DEVICE_ATTR_RW(mcu_shader_pwroff_timeout);
5912
Jörg Wagnere61eb932023-08-31 17:27:24 +00005913/**
5914 * mcu_shader_pwroff_timeout_ns_show - Get the MCU shader Core power-off time value.
5915 *
5916 * @dev: The device this sysfs file is for.
5917 * @attr: The attributes of the sysfs file.
5918 * @buf: The output buffer for the sysfs file contents
5919 *
5920 * Get the internally recorded MCU shader Core power-off (nominal) timeout value.
5921 * The unit of the value is in nanoseconds.
5922 *
5923 * Return: The number of bytes output to @buf if the
5924 * function succeeded. A Negative value on failure.
5925 */
5926static ssize_t mcu_shader_pwroff_timeout_ns_show(struct device *dev, struct device_attribute *attr,
5927 char *const buf)
5928{
5929 struct kbase_device *kbdev = dev_get_drvdata(dev);
5930 u32 pwroff;
5931
5932 if (!kbdev)
5933 return -ENODEV;
5934
5935 pwroff = kbase_csf_firmware_get_mcu_core_pwroff_time(kbdev);
5936 return scnprintf(buf, PAGE_SIZE, "%u\n", pwroff);
5937}
5938
5939/**
5940 * mcu_shader_pwroff_timeout_ns_store - Set the MCU shader core power-off time value.
5941 *
5942 * @dev: The device with sysfs file is for
5943 * @attr: The attributes of the sysfs file
5944 * @buf: The value written to the sysfs file
5945 * @count: The number of bytes to write to the sysfs file
5946 *
5947 * The duration value (unit: nanoseconds) for configuring MCU Shader Core
5948 * timer, when the shader cores' power transitions are delegated to the
5949 * MCU (normal operational mode)
5950 *
5951 * Return: @count if the function succeeded. An error code on failure.
5952 */
5953static ssize_t mcu_shader_pwroff_timeout_ns_store(struct device *dev, struct device_attribute *attr,
5954 const char *buf, size_t count)
5955{
5956 struct kbase_device *kbdev = dev_get_drvdata(dev);
5957 u32 dur;
5958
5959 const struct kbase_pm_policy *current_policy;
5960 bool always_on;
5961
5962 if (!kbdev)
5963 return -ENODEV;
5964
5965 if (kstrtouint(buf, 0, &dur))
5966 return -EINVAL;
5967
5968 current_policy = kbase_pm_get_policy(kbdev);
5969 always_on = current_policy == &kbase_pm_always_on_policy_ops;
5970 if (dur == 0 && !always_on)
5971 return -EINVAL;
5972
Jack Divere19249e2022-11-07 12:13:47 +00005973 kbase_csf_firmware_set_mcu_core_pwroff_time(kbdev, dur);
5974
5975 return count;
5976}
5977
Jörg Wagnere61eb932023-08-31 17:27:24 +00005978static DEVICE_ATTR_RW(mcu_shader_pwroff_timeout_ns);
Jack Divere19249e2022-11-07 12:13:47 +00005979
Sidath Senanayake97483052021-01-29 15:03:53 +00005980#endif /* MALI_USE_CSF */
5981
Sidath Senanayaked4ca6eb2020-09-11 16:44:12 +01005982static struct attribute *kbase_scheduling_attrs[] = {
Sidath Senanayake72f24572020-10-27 11:38:49 +00005983#if !MALI_USE_CSF
Sidath Senanayaked4ca6eb2020-09-11 16:44:12 +01005984 &dev_attr_serialize_jobs.attr,
Sidath Senanayake72f24572020-10-27 11:38:49 +00005985#endif /* !MALI_USE_CSF */
Sidath Senanayaked4ca6eb2020-09-11 16:44:12 +01005986 NULL
5987};
5988
Ankit Goyalbb5eae02021-07-22 17:43:56 +08005989static ssize_t total_gpu_mem_show(
5990 struct device *dev,
5991 struct device_attribute *attr,
5992 char *const buf)
5993{
5994 struct kbase_device *kbdev;
5995 kbdev = to_kbase_device(dev);
5996 if (!kbdev)
5997 return -ENODEV;
5998
5999 return sysfs_emit(buf, "%lu\n",
6000 (unsigned long) kbdev->total_gpu_pages << PAGE_SHIFT);
6001}
6002static DEVICE_ATTR_RO(total_gpu_mem);
6003
6004static ssize_t dma_buf_gpu_mem_show(
6005 struct device *dev,
6006 struct device_attribute *attr,
6007 char *const buf)
6008{
6009 struct kbase_device *kbdev;
6010 kbdev = to_kbase_device(dev);
6011 if (!kbdev)
6012 return -ENODEV;
6013
6014 return sysfs_emit(buf, "%lu\n",
6015 (unsigned long) kbdev->dma_buf_pages << PAGE_SHIFT);
6016}
6017static DEVICE_ATTR_RO(dma_buf_gpu_mem);
6018
Sidath Senanayake823a7602016-06-29 16:03:55 +02006019static struct attribute *kbase_attrs[] = {
6020#ifdef CONFIG_MALI_DEBUG
6021 &dev_attr_debug_command.attr,
Sidath Senanayake72f24572020-10-27 11:38:49 +00006022#if !MALI_USE_CSF
Sidath Senanayake823a7602016-06-29 16:03:55 +02006023 &dev_attr_js_softstop_always.attr,
Sidath Senanayake72f24572020-10-27 11:38:49 +00006024#endif /* !MALI_USE_CSF */
Sidath Senanayake823a7602016-06-29 16:03:55 +02006025#endif
Sidath Senanayake72f24572020-10-27 11:38:49 +00006026#if !MALI_USE_CSF
Sidath Senanayake823a7602016-06-29 16:03:55 +02006027 &dev_attr_js_timeouts.attr,
Sidath Senanayake192bd792016-11-09 14:14:45 +01006028 &dev_attr_soft_job_timeout.attr,
Sidath Senanayake72f24572020-10-27 11:38:49 +00006029#endif /* !MALI_USE_CSF */
Sidath Senanayake823a7602016-06-29 16:03:55 +02006030 &dev_attr_gpuinfo.attr,
6031 &dev_attr_dvfs_period.attr,
6032 &dev_attr_pm_poweroff.attr,
6033 &dev_attr_reset_timeout.attr,
Sidath Senanayake72f24572020-10-27 11:38:49 +00006034#if !MALI_USE_CSF
Sidath Senanayake823a7602016-06-29 16:03:55 +02006035 &dev_attr_js_scheduling_period.attr,
Sidath Senanayake97483052021-01-29 15:03:53 +00006036#else
6037 &dev_attr_csg_scheduling_period.attr,
6038 &dev_attr_fw_timeout.attr,
Siddharth Kapoor88d7d982022-03-02 14:51:29 +08006039 &dev_attr_idle_hysteresis_time.attr,
Jörg Wagnere61eb932023-08-31 17:27:24 +00006040 &dev_attr_idle_hysteresis_time_ns.attr,
Jack Divere19249e2022-11-07 12:13:47 +00006041 &dev_attr_mcu_shader_pwroff_timeout.attr,
Jörg Wagnere61eb932023-08-31 17:27:24 +00006042 &dev_attr_mcu_shader_pwroff_timeout_ns.attr,
Sidath Senanayake72f24572020-10-27 11:38:49 +00006043#endif /* !MALI_USE_CSF */
Sidath Senanayake823a7602016-06-29 16:03:55 +02006044 &dev_attr_power_policy.attr,
Sidath Senanayake823a7602016-06-29 16:03:55 +02006045 &dev_attr_core_mask.attr,
6046 &dev_attr_mem_pool_size.attr,
6047 &dev_attr_mem_pool_max_size.attr,
Sidath Senanayakec19c6272017-09-19 18:23:58 +02006048 &dev_attr_lp_mem_pool_size.attr,
6049 &dev_attr_lp_mem_pool_max_size.attr,
Sidath Senanayake72f24572020-10-27 11:38:49 +00006050#if !MALI_USE_CSF
Sidath Senanayake8946bcd2018-03-19 13:26:23 +01006051 &dev_attr_js_ctx_scheduling_mode.attr,
Sidath Senanayake72f24572020-10-27 11:38:49 +00006052#endif /* !MALI_USE_CSF */
Ankit Goyalbb5eae02021-07-22 17:43:56 +08006053 &dev_attr_total_gpu_mem.attr,
6054 &dev_attr_dma_buf_gpu_mem.attr,
Sidath Senanayake823a7602016-06-29 16:03:55 +02006055 NULL
6056};
6057
Sidath Senanayake201c8bf2021-01-29 14:51:21 +00006058static struct attribute *kbase_mempool_attrs[] = {
6059 &dev_attr_max_size.attr,
6060 &dev_attr_lp_max_size.attr,
6061 &dev_attr_ctx_default_max_size.attr,
6062 NULL
6063};
6064
Sidath Senanayaked4ca6eb2020-09-11 16:44:12 +01006065#define SYSFS_SCHEDULING_GROUP "scheduling"
6066static const struct attribute_group kbase_scheduling_attr_group = {
6067 .name = SYSFS_SCHEDULING_GROUP,
6068 .attrs = kbase_scheduling_attrs,
6069};
6070
Sidath Senanayake201c8bf2021-01-29 14:51:21 +00006071#define SYSFS_MEMPOOL_GROUP "mempool"
6072static const struct attribute_group kbase_mempool_attr_group = {
6073 .name = SYSFS_MEMPOOL_GROUP,
6074 .attrs = kbase_mempool_attrs,
6075};
6076
Sidath Senanayake823a7602016-06-29 16:03:55 +02006077static const struct attribute_group kbase_attr_group = {
6078 .attrs = kbase_attrs,
6079};
6080
Sidath Senanayake7ed9a0b2020-02-03 12:15:52 +01006081int kbase_sysfs_init(struct kbase_device *kbdev)
6082{
6083 int err = 0;
6084
6085 kbdev->mdev.minor = MISC_DYNAMIC_MINOR;
6086 kbdev->mdev.name = kbdev->devname;
6087 kbdev->mdev.fops = &kbase_fops;
6088 kbdev->mdev.parent = get_device(kbdev->dev);
6089 kbdev->mdev.mode = 0666;
6090
6091 err = sysfs_create_group(&kbdev->dev->kobj, &kbase_attr_group);
Sidath Senanayake201c8bf2021-01-29 14:51:21 +00006092 if (err)
6093 return err;
6094
6095 err = sysfs_create_group(&kbdev->dev->kobj,
6096 &kbase_scheduling_attr_group);
6097 if (err) {
6098 dev_err(kbdev->dev, "Creation of %s sysfs group failed",
6099 SYSFS_SCHEDULING_GROUP);
6100 sysfs_remove_group(&kbdev->dev->kobj,
6101 &kbase_attr_group);
6102 return err;
6103 }
6104
6105 err = sysfs_create_group(&kbdev->dev->kobj,
6106 &kbase_mempool_attr_group);
6107 if (err) {
6108 dev_err(kbdev->dev, "Creation of %s sysfs group failed",
6109 SYSFS_MEMPOOL_GROUP);
6110 sysfs_remove_group(&kbdev->dev->kobj,
6111 &kbase_scheduling_attr_group);
6112 sysfs_remove_group(&kbdev->dev->kobj,
6113 &kbase_attr_group);
Sidath Senanayaked4ca6eb2020-09-11 16:44:12 +01006114 }
6115
Ankit Goyalbb5eae02021-07-22 17:43:56 +08006116 kbdev->proc_sysfs_node = kobject_create_and_add("kprcs",
6117 &kbdev->dev->kobj);
6118
Sidath Senanayake7ed9a0b2020-02-03 12:15:52 +01006119 return err;
6120}
6121
6122void kbase_sysfs_term(struct kbase_device *kbdev)
6123{
Sidath Senanayake201c8bf2021-01-29 14:51:21 +00006124 sysfs_remove_group(&kbdev->dev->kobj, &kbase_mempool_attr_group);
Sidath Senanayaked4ca6eb2020-09-11 16:44:12 +01006125 sysfs_remove_group(&kbdev->dev->kobj, &kbase_scheduling_attr_group);
Sidath Senanayake7ed9a0b2020-02-03 12:15:52 +01006126 sysfs_remove_group(&kbdev->dev->kobj, &kbase_attr_group);
Ankit Goyalbb5eae02021-07-22 17:43:56 +08006127 kobject_del(kbdev->proc_sysfs_node);
6128 kobject_put(kbdev->proc_sysfs_node);
Sidath Senanayake7ed9a0b2020-02-03 12:15:52 +01006129 put_device(kbdev->dev);
6130}
6131
Sidath Senanayake823a7602016-06-29 16:03:55 +02006132static int kbase_platform_device_remove(struct platform_device *pdev)
6133{
6134 struct kbase_device *kbdev = to_kbase_device(&pdev->dev);
Sidath Senanayake823a7602016-06-29 16:03:55 +02006135
6136 if (!kbdev)
6137 return -ENODEV;
6138
Sidath Senanayake7ed9a0b2020-02-03 12:15:52 +01006139 kbase_device_term(kbdev);
6140 dev_set_drvdata(kbdev->dev, NULL);
Sidath Senanayake823a7602016-06-29 16:03:55 +02006141 kbase_device_free(kbdev);
6142
6143 return 0;
6144}
6145
Sidath Senanayakeac90f0d2019-05-06 12:21:44 +02006146void kbase_backend_devfreq_term(struct kbase_device *kbdev)
6147{
6148#ifdef CONFIG_MALI_DEVFREQ
Sidath Senanayake7ed9a0b2020-02-03 12:15:52 +01006149 if (kbdev->devfreq)
Sidath Senanayakeac90f0d2019-05-06 12:21:44 +02006150 kbase_devfreq_term(kbdev);
Sidath Senanayakeac90f0d2019-05-06 12:21:44 +02006151#endif
6152}
6153
6154int kbase_backend_devfreq_init(struct kbase_device *kbdev)
6155{
6156#ifdef CONFIG_MALI_DEVFREQ
6157 /* Devfreq uses hardware counters, so must be initialized after it. */
6158 int err = kbase_devfreq_init(kbdev);
6159
Sidath Senanayake7ed9a0b2020-02-03 12:15:52 +01006160 if (err)
Sidath Senanayakeac90f0d2019-05-06 12:21:44 +02006161 dev_err(kbdev->dev, "Continuing without devfreq\n");
6162#endif /* CONFIG_MALI_DEVFREQ */
6163 return 0;
6164}
Sidath Senanayake92327782016-11-09 14:53:08 +01006165
Sidath Senanayake823a7602016-06-29 16:03:55 +02006166static int kbase_platform_device_probe(struct platform_device *pdev)
6167{
6168 struct kbase_device *kbdev;
Sidath Senanayake823a7602016-06-29 16:03:55 +02006169 int err = 0;
6170
Sidath Senanayake7ed9a0b2020-02-03 12:15:52 +01006171 mali_kbase_print_cs_experimental();
6172
Sidath Senanayake823a7602016-06-29 16:03:55 +02006173 kbdev = kbase_device_alloc();
6174 if (!kbdev) {
6175 dev_err(&pdev->dev, "Allocate device failed\n");
Sidath Senanayake823a7602016-06-29 16:03:55 +02006176 return -ENOMEM;
6177 }
6178
6179 kbdev->dev = &pdev->dev;
Debarshi Dutta20fff722023-06-02 13:36:22 +00006180
6181#if (KERNEL_VERSION(6, 0, 0) <= LINUX_VERSION_CODE)
6182 kbdev->token = -EPERM;
6183#endif /* (KERNEL_VERSION(6, 0, 0) <= LINUX_VERSION_CODE) */
6184
Sidath Senanayake823a7602016-06-29 16:03:55 +02006185 dev_set_drvdata(kbdev->dev, kbdev);
Jack Divere19249e2022-11-07 12:13:47 +00006186#if (KERNEL_VERSION(5, 3, 0) <= LINUX_VERSION_CODE)
6187 mutex_lock(&kbase_probe_mutex);
6188#endif
Sidath Senanayake823a7602016-06-29 16:03:55 +02006189 err = kbase_device_init(kbdev);
Sidath Senanayakeb64f5682020-04-14 14:55:25 +02006190
Sidath Senanayake823a7602016-06-29 16:03:55 +02006191 if (err) {
Sidath Senanayakeb64f5682020-04-14 14:55:25 +02006192 if (err == -EPROBE_DEFER)
Sidath Senanayakefca86132021-06-15 13:39:30 +01006193 dev_info(kbdev->dev,
6194 "Device initialization Deferred\n");
Sidath Senanayakeb64f5682020-04-14 14:55:25 +02006195 else
6196 dev_err(kbdev->dev, "Device initialization failed\n");
6197
Sidath Senanayake7ed9a0b2020-02-03 12:15:52 +01006198 dev_set_drvdata(kbdev->dev, NULL);
6199 kbase_device_free(kbdev);
Jack Divere19249e2022-11-07 12:13:47 +00006200#if (KERNEL_VERSION(5, 3, 0) <= LINUX_VERSION_CODE)
6201 mutex_unlock(&kbase_probe_mutex);
6202#endif
Sidath Senanayake823a7602016-06-29 16:03:55 +02006203 } else {
Jörg Wagnerdacf0042023-08-01 13:38:22 +00006204#if (KERNEL_VERSION(6, 1, 0) <= LINUX_VERSION_CODE)
6205 /* Since upstream is not exporting mmap_min_addr, kbase at the
6206 * moment is unable to track possible kernel changes via sysfs.
6207 * Flag this out in a device info message.
6208 */
6209 dev_info(kbdev->dev, KBASE_COMPILED_MMAP_MIN_ADDR_MSG);
6210#endif
6211
Sidath Senanayake7ed9a0b2020-02-03 12:15:52 +01006212 dev_info(kbdev->dev,
Sidath Senanayake823a7602016-06-29 16:03:55 +02006213 "Probed as %s\n", dev_name(kbdev->mdev.this_device));
Sidath Senanayakeb64f5682020-04-14 14:55:25 +02006214 kbase_increment_device_id();
Jack Divere19249e2022-11-07 12:13:47 +00006215#if (KERNEL_VERSION(5, 3, 0) <= LINUX_VERSION_CODE)
6216 mutex_unlock(&kbase_probe_mutex);
6217#endif
Sidath Senanayakebc3c01e2020-06-18 09:26:13 +02006218#ifdef CONFIG_MALI_ARBITER_SUPPORT
Debarshi Dutta20fff722023-06-02 13:36:22 +00006219 rt_mutex_lock(&kbdev->pm.lock);
Sidath Senanayakebc3c01e2020-06-18 09:26:13 +02006220 kbase_arbiter_pm_vm_event(kbdev, KBASE_VM_GPU_INITIALIZED_EVT);
Debarshi Dutta20fff722023-06-02 13:36:22 +00006221 rt_mutex_unlock(&kbdev->pm.lock);
Sidath Senanayakebc3c01e2020-06-18 09:26:13 +02006222#endif
Sidath Senanayake7ed9a0b2020-02-03 12:15:52 +01006223 }
Sidath Senanayake823a7602016-06-29 16:03:55 +02006224
6225 return err;
6226}
6227
Sidath Senanayake92327782016-11-09 14:53:08 +01006228#undef KBASEP_DEFAULT_REGISTER_HISTORY_SIZE
6229
Sidath Senanayake44e8be92017-01-24 10:48:35 +01006230/**
6231 * kbase_device_suspend - Suspend callback from the OS.
Sidath Senanayake823a7602016-06-29 16:03:55 +02006232 *
Sidath Senanayake44e8be92017-01-24 10:48:35 +01006233 * @dev: The device to suspend
Sidath Senanayake823a7602016-06-29 16:03:55 +02006234 *
Siddharth Kapoor88d7d982022-03-02 14:51:29 +08006235 * This is called by Linux when the device should suspend.
6236 *
Siddharth Kapoor0207d6c2022-01-07 19:09:01 +08006237 * Return: A standard Linux error code on failure, 0 otherwise.
Sidath Senanayake823a7602016-06-29 16:03:55 +02006238 */
6239static int kbase_device_suspend(struct device *dev)
6240{
6241 struct kbase_device *kbdev = to_kbase_device(dev);
6242
6243 if (!kbdev)
6244 return -ENODEV;
6245
Siddharth Kapoor0207d6c2022-01-07 19:09:01 +08006246 if (kbase_pm_suspend(kbdev)) {
6247 dev_warn(kbdev->dev, "Abort suspend as GPU suspension failed");
6248 return -EBUSY;
6249 }
Sidath Senanayakeac90f0d2019-05-06 12:21:44 +02006250
Sidath Senanayakeadb6c042020-10-01 02:25:53 +01006251#ifdef CONFIG_MALI_MIDGARD_DVFS
6252 kbase_pm_metrics_stop(kbdev);
6253#endif
6254
Sidath Senanayake97483052021-01-29 15:03:53 +00006255#ifdef CONFIG_MALI_DEVFREQ
Sidath Senanayakeac90f0d2019-05-06 12:21:44 +02006256 dev_dbg(dev, "Callback %s\n", __func__);
Sidath Senanayake7ed9a0b2020-02-03 12:15:52 +01006257 if (kbdev->devfreq) {
Sidath Senanayakeac90f0d2019-05-06 12:21:44 +02006258 kbase_devfreq_enqueue_work(kbdev, DEVFREQ_WORK_SUSPEND);
6259 flush_workqueue(kbdev->devfreq_queue.workq);
6260 }
Sidath Senanayake823a7602016-06-29 16:03:55 +02006261#endif
Sidath Senanayake823a7602016-06-29 16:03:55 +02006262 return 0;
6263}
6264
Sidath Senanayake44e8be92017-01-24 10:48:35 +01006265/**
6266 * kbase_device_resume - Resume callback from the OS.
Sidath Senanayake823a7602016-06-29 16:03:55 +02006267 *
Sidath Senanayake44e8be92017-01-24 10:48:35 +01006268 * @dev: The device to resume
Sidath Senanayake823a7602016-06-29 16:03:55 +02006269 *
Siddharth Kapoor88d7d982022-03-02 14:51:29 +08006270 * This is called by Linux when the device should resume from suspension.
6271 *
Sidath Senanayake44e8be92017-01-24 10:48:35 +01006272 * Return: A standard Linux error code
Sidath Senanayake823a7602016-06-29 16:03:55 +02006273 */
6274static int kbase_device_resume(struct device *dev)
6275{
6276 struct kbase_device *kbdev = to_kbase_device(dev);
6277
6278 if (!kbdev)
6279 return -ENODEV;
6280
6281 kbase_pm_resume(kbdev);
6282
Sidath Senanayakeadb6c042020-10-01 02:25:53 +01006283#ifdef CONFIG_MALI_MIDGARD_DVFS
6284 kbase_pm_metrics_start(kbdev);
6285#endif
6286
Sidath Senanayake97483052021-01-29 15:03:53 +00006287#ifdef CONFIG_MALI_DEVFREQ
Sidath Senanayakeac90f0d2019-05-06 12:21:44 +02006288 dev_dbg(dev, "Callback %s\n", __func__);
Jack Divere19249e2022-11-07 12:13:47 +00006289 if (kbdev->devfreq)
6290 kbase_devfreq_enqueue_work(kbdev, DEVFREQ_WORK_RESUME);
Sidath Senanayake823a7602016-06-29 16:03:55 +02006291#endif
6292 return 0;
6293}
6294
Sidath Senanayake44e8be92017-01-24 10:48:35 +01006295/**
6296 * kbase_device_runtime_suspend - Runtime suspend callback from the OS.
Sidath Senanayake823a7602016-06-29 16:03:55 +02006297 *
Siddharth Kapoor88d7d982022-03-02 14:51:29 +08006298 * @dev: The device to suspend
6299 *
Sidath Senanayake44e8be92017-01-24 10:48:35 +01006300 * This is called by Linux when the device should prepare for a condition in
6301 * which it will not be able to communicate with the CPU(s) and RAM due to
6302 * power management.
Sidath Senanayake823a7602016-06-29 16:03:55 +02006303 *
Sidath Senanayake44e8be92017-01-24 10:48:35 +01006304 * Return: A standard Linux error code
Sidath Senanayake823a7602016-06-29 16:03:55 +02006305 */
6306#ifdef KBASE_PM_RUNTIME
6307static int kbase_device_runtime_suspend(struct device *dev)
6308{
6309 struct kbase_device *kbdev = to_kbase_device(dev);
Jesse Hall0c596dc2021-11-23 14:38:46 -08006310 int ret = 0;
Sidath Senanayake823a7602016-06-29 16:03:55 +02006311
6312 if (!kbdev)
6313 return -ENODEV;
6314
Sidath Senanayakeac90f0d2019-05-06 12:21:44 +02006315 dev_dbg(dev, "Callback %s\n", __func__);
Jesse Hall0c596dc2021-11-23 14:38:46 -08006316 KBASE_KTRACE_ADD(kbdev, PM_RUNTIME_SUSPEND_CALLBACK, NULL, 0);
6317
6318#if MALI_USE_CSF
6319 ret = kbase_pm_handle_runtime_suspend(kbdev);
6320 if (ret)
6321 return ret;
6322#endif
Sidath Senanayake97483052021-01-29 15:03:53 +00006323
Sidath Senanayakeadb6c042020-10-01 02:25:53 +01006324#ifdef CONFIG_MALI_MIDGARD_DVFS
6325 kbase_pm_metrics_stop(kbdev);
6326#endif
6327
Sidath Senanayake97483052021-01-29 15:03:53 +00006328#ifdef CONFIG_MALI_DEVFREQ
Sidath Senanayake7ed9a0b2020-02-03 12:15:52 +01006329 if (kbdev->devfreq)
Sidath Senanayakeac90f0d2019-05-06 12:21:44 +02006330 kbase_devfreq_enqueue_work(kbdev, DEVFREQ_WORK_SUSPEND);
Sidath Senanayake823a7602016-06-29 16:03:55 +02006331#endif
6332
6333 if (kbdev->pm.backend.callback_power_runtime_off) {
6334 kbdev->pm.backend.callback_power_runtime_off(kbdev);
6335 dev_dbg(dev, "runtime suspend\n");
6336 }
Jesse Hall0c596dc2021-11-23 14:38:46 -08006337 return ret;
Sidath Senanayake823a7602016-06-29 16:03:55 +02006338}
6339#endif /* KBASE_PM_RUNTIME */
6340
Sidath Senanayake44e8be92017-01-24 10:48:35 +01006341/**
6342 * kbase_device_runtime_resume - Runtime resume callback from the OS.
Sidath Senanayake823a7602016-06-29 16:03:55 +02006343 *
Sidath Senanayake44e8be92017-01-24 10:48:35 +01006344 * @dev: The device to suspend
Sidath Senanayake823a7602016-06-29 16:03:55 +02006345 *
Siddharth Kapoor88d7d982022-03-02 14:51:29 +08006346 * This is called by Linux when the device should go into a fully active state.
6347 *
Sidath Senanayake44e8be92017-01-24 10:48:35 +01006348 * Return: A standard Linux error code
Sidath Senanayake823a7602016-06-29 16:03:55 +02006349 */
6350
6351#ifdef KBASE_PM_RUNTIME
6352static int kbase_device_runtime_resume(struct device *dev)
6353{
6354 int ret = 0;
6355 struct kbase_device *kbdev = to_kbase_device(dev);
6356
6357 if (!kbdev)
6358 return -ENODEV;
6359
Sidath Senanayakeac90f0d2019-05-06 12:21:44 +02006360 dev_dbg(dev, "Callback %s\n", __func__);
Jesse Hall0c596dc2021-11-23 14:38:46 -08006361 KBASE_KTRACE_ADD(kbdev, PM_RUNTIME_RESUME_CALLBACK, NULL, 0);
Sidath Senanayake823a7602016-06-29 16:03:55 +02006362 if (kbdev->pm.backend.callback_power_runtime_on) {
6363 ret = kbdev->pm.backend.callback_power_runtime_on(kbdev);
6364 dev_dbg(dev, "runtime resume\n");
6365 }
6366
Sidath Senanayakeadb6c042020-10-01 02:25:53 +01006367#ifdef CONFIG_MALI_MIDGARD_DVFS
6368 kbase_pm_metrics_start(kbdev);
6369#endif
6370
Sidath Senanayake97483052021-01-29 15:03:53 +00006371#ifdef CONFIG_MALI_DEVFREQ
Sidath Senanayake7ed9a0b2020-02-03 12:15:52 +01006372 if (kbdev->devfreq)
Sidath Senanayakeac90f0d2019-05-06 12:21:44 +02006373 kbase_devfreq_enqueue_work(kbdev, DEVFREQ_WORK_RESUME);
Sidath Senanayake823a7602016-06-29 16:03:55 +02006374#endif
6375
6376 return ret;
6377}
6378#endif /* KBASE_PM_RUNTIME */
6379
6380
6381#ifdef KBASE_PM_RUNTIME
6382/**
6383 * kbase_device_runtime_idle - Runtime idle callback from the OS.
6384 * @dev: The device to suspend
6385 *
6386 * This is called by Linux when the device appears to be inactive and it might
6387 * be placed into a low power state.
6388 *
6389 * Return: 0 if device can be suspended, non-zero to avoid runtime autosuspend,
6390 * otherwise a standard Linux error code
6391 */
6392static int kbase_device_runtime_idle(struct device *dev)
6393{
6394 struct kbase_device *kbdev = to_kbase_device(dev);
6395
6396 if (!kbdev)
6397 return -ENODEV;
6398
Sidath Senanayakeac90f0d2019-05-06 12:21:44 +02006399 dev_dbg(dev, "Callback %s\n", __func__);
Sidath Senanayake823a7602016-06-29 16:03:55 +02006400 /* Use platform specific implementation if it exists. */
6401 if (kbdev->pm.backend.callback_power_runtime_idle)
6402 return kbdev->pm.backend.callback_power_runtime_idle(kbdev);
6403
Sidath Senanayakee972f652019-04-10 14:37:00 +02006404 /* Just need to update the device's last busy mark. Kernel will respect
6405 * the autosuspend delay and so won't suspend the device immediately.
6406 */
6407 pm_runtime_mark_last_busy(kbdev->dev);
Sidath Senanayake823a7602016-06-29 16:03:55 +02006408 return 0;
6409}
6410#endif /* KBASE_PM_RUNTIME */
6411
Sidath Senanayake44e8be92017-01-24 10:48:35 +01006412/* The power management operations for the platform driver.
Sidath Senanayake823a7602016-06-29 16:03:55 +02006413 */
6414static const struct dev_pm_ops kbase_pm_ops = {
6415 .suspend = kbase_device_suspend,
6416 .resume = kbase_device_resume,
6417#ifdef KBASE_PM_RUNTIME
6418 .runtime_suspend = kbase_device_runtime_suspend,
6419 .runtime_resume = kbase_device_runtime_resume,
6420 .runtime_idle = kbase_device_runtime_idle,
6421#endif /* KBASE_PM_RUNTIME */
6422};
6423
Sidath Senanayake2bfaaa52021-06-17 17:58:22 +01006424#if IS_ENABLED(CONFIG_OF)
Debarshi Dutta20fff722023-06-02 13:36:22 +00006425static const struct of_device_id kbase_dt_ids[] = { { .compatible = "arm,malit6xx" },
6426 { .compatible = "arm,mali-midgard" },
6427 { .compatible = "arm,mali-bifrost" },
6428 { .compatible = "arm,mali-valhall" },
6429 { /* sentinel */ } };
Sidath Senanayake823a7602016-06-29 16:03:55 +02006430MODULE_DEVICE_TABLE(of, kbase_dt_ids);
6431#endif
6432
6433static struct platform_driver kbase_platform_driver = {
6434 .probe = kbase_platform_device_probe,
6435 .remove = kbase_platform_device_remove,
6436 .driver = {
Jörg Wagnerdacf0042023-08-01 13:38:22 +00006437 .name = KBASE_DRV_NAME,
Sidath Senanayake823a7602016-06-29 16:03:55 +02006438 .pm = &kbase_pm_ops,
6439 .of_match_table = of_match_ptr(kbase_dt_ids),
Sidath Senanayake2bfaaa52021-06-17 17:58:22 +01006440 .probe_type = PROBE_PREFER_ASYNCHRONOUS,
Sidath Senanayake823a7602016-06-29 16:03:55 +02006441 },
6442};
6443
Jack Divere19249e2022-11-07 12:13:47 +00006444#if (KERNEL_VERSION(5, 3, 0) > LINUX_VERSION_CODE) && IS_ENABLED(CONFIG_OF)
Sidath Senanayake823a7602016-06-29 16:03:55 +02006445module_platform_driver(kbase_platform_driver);
6446#else
Sidath Senanayake823a7602016-06-29 16:03:55 +02006447static int __init kbase_driver_init(void)
6448{
6449 int ret;
6450
Jack Divere19249e2022-11-07 12:13:47 +00006451#if (KERNEL_VERSION(5, 3, 0) <= LINUX_VERSION_CODE)
6452 mutex_init(&kbase_probe_mutex);
6453#endif
6454
6455#ifndef CONFIG_OF
Sidath Senanayakec19c6272017-09-19 18:23:58 +02006456 ret = kbase_platform_register();
Sidath Senanayake823a7602016-06-29 16:03:55 +02006457 if (ret)
6458 return ret;
Jack Divere19249e2022-11-07 12:13:47 +00006459#endif
Sidath Senanayake823a7602016-06-29 16:03:55 +02006460 ret = platform_driver_register(&kbase_platform_driver);
Jack Divere19249e2022-11-07 12:13:47 +00006461#ifndef CONFIG_OF
6462 if (ret) {
Sidath Senanayakec19c6272017-09-19 18:23:58 +02006463 kbase_platform_unregister();
Jack Divere19249e2022-11-07 12:13:47 +00006464 return ret;
6465 }
6466#endif
Debarshi Dutta20fff722023-06-02 13:36:22 +00006467
Sidath Senanayake823a7602016-06-29 16:03:55 +02006468 return ret;
6469}
6470
6471static void __exit kbase_driver_exit(void)
6472{
6473 platform_driver_unregister(&kbase_platform_driver);
Jack Divere19249e2022-11-07 12:13:47 +00006474#ifndef CONFIG_OF
Sidath Senanayakec19c6272017-09-19 18:23:58 +02006475 kbase_platform_unregister();
Jack Divere19249e2022-11-07 12:13:47 +00006476#endif
Sidath Senanayake823a7602016-06-29 16:03:55 +02006477}
6478
6479module_init(kbase_driver_init);
6480module_exit(kbase_driver_exit);
Jack Divere19249e2022-11-07 12:13:47 +00006481#endif
Sidath Senanayake823a7602016-06-29 16:03:55 +02006482MODULE_LICENSE("GPL");
6483MODULE_VERSION(MALI_RELEASE_NAME " (UK version " \
6484 __stringify(BASE_UK_VERSION_MAJOR) "." \
6485 __stringify(BASE_UK_VERSION_MINOR) ")");
Jesse Hall29a48cd2021-03-23 08:02:57 -07006486MODULE_SOFTDEP("pre: mali_pixel");
chungkaibbd5f042022-07-19 06:28:59 +00006487MODULE_SOFTDEP("pre: exynos-pd-dbg");
Siddharth Kapoor0207d6c2022-01-07 19:09:01 +08006488MODULE_INFO(import_ns, "DMA_BUF");
Sidath Senanayake823a7602016-06-29 16:03:55 +02006489
Sidath Senanayakeb64f5682020-04-14 14:55:25 +02006490#define CREATE_TRACE_POINTS
Sidath Senanayake823a7602016-06-29 16:03:55 +02006491/* Create the trace points (otherwise we just get code to call a tracepoint) */
6492#include "mali_linux_trace.h"
6493
Sidath Senanayakeb64f5682020-04-14 14:55:25 +02006494#ifdef CONFIG_MALI_GATOR_SUPPORT
Sidath Senanayake823a7602016-06-29 16:03:55 +02006495EXPORT_TRACEPOINT_SYMBOL_GPL(mali_job_slots_event);
6496EXPORT_TRACEPOINT_SYMBOL_GPL(mali_pm_status);
Sidath Senanayake823a7602016-06-29 16:03:55 +02006497EXPORT_TRACEPOINT_SYMBOL_GPL(mali_page_fault_insert_pages);
Sidath Senanayake823a7602016-06-29 16:03:55 +02006498EXPORT_TRACEPOINT_SYMBOL_GPL(mali_total_alloc_pages_change);
Sidath Senanayake823a7602016-06-29 16:03:55 +02006499
Sidath Senanayakee972f652019-04-10 14:37:00 +02006500void kbase_trace_mali_pm_status(u32 dev_id, u32 event, u64 value)
Sidath Senanayake823a7602016-06-29 16:03:55 +02006501{
Sidath Senanayakee972f652019-04-10 14:37:00 +02006502 trace_mali_pm_status(dev_id, event, value);
Sidath Senanayake823a7602016-06-29 16:03:55 +02006503}
6504
Sidath Senanayakee972f652019-04-10 14:37:00 +02006505void kbase_trace_mali_job_slots_event(u32 dev_id, u32 event, const struct kbase_context *kctx, u8 atom_id)
Sidath Senanayake823a7602016-06-29 16:03:55 +02006506{
Sidath Senanayakee972f652019-04-10 14:37:00 +02006507 trace_mali_job_slots_event(dev_id, event,
6508 (kctx != NULL ? kctx->tgid : 0),
6509 (kctx != NULL ? kctx->pid : 0),
6510 atom_id);
Sidath Senanayake823a7602016-06-29 16:03:55 +02006511}
6512
Sidath Senanayakee972f652019-04-10 14:37:00 +02006513void kbase_trace_mali_page_fault_insert_pages(u32 dev_id, int event, u32 value)
Sidath Senanayake823a7602016-06-29 16:03:55 +02006514{
Sidath Senanayakee972f652019-04-10 14:37:00 +02006515 trace_mali_page_fault_insert_pages(dev_id, event, value);
Sidath Senanayake823a7602016-06-29 16:03:55 +02006516}
6517
Siddharth Kapoor88d7d982022-03-02 14:51:29 +08006518void kbase_trace_mali_total_alloc_pages_change(u32 dev_id, long long event)
Sidath Senanayake823a7602016-06-29 16:03:55 +02006519{
Sidath Senanayakee972f652019-04-10 14:37:00 +02006520 trace_mali_total_alloc_pages_change(dev_id, event);
Sidath Senanayake823a7602016-06-29 16:03:55 +02006521}
6522#endif /* CONFIG_MALI_GATOR_SUPPORT */