blob: 646587ab45da07949cad1c6bc1e0362d43d5b865 [file] [log] [blame]
Abhishek Rastogi9da47472014-03-18 19:46:15 +05301/*
2 **************************************************************************
Cemil Coskunff83d562019-07-30 11:05:37 -07003 * Copyright (c) 2013, 2015-2019 The Linux Foundation. All rights reserved.
Abhishek Rastogi9da47472014-03-18 19:46:15 +05304 * Permission to use, copy, modify, and/or distribute this software for
5 * any purpose with or without fee is hereby granted, provided that the
6 * above copyright notice and this permission notice appear in all copies.
7 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
8 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
9 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
10 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
11 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
12 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
13 * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
14 **************************************************************************
15 */
16
17/*
Sundarajan Srinivasandedd8e42014-10-06 11:59:34 -070018 * nss_freq.c
Abhishek Rastogi9da47472014-03-18 19:46:15 +053019 * NSS frequency change APIs
20 */
21
Gaurao Chaudhari7c5d01e2019-09-16 10:34:48 -070022#include "nss_stats.h"
Abhishek Rastogi9da47472014-03-18 19:46:15 +053023#include "nss_tx_rx_common.h"
Sachin Shashidhar6dd9cfc2018-07-31 14:44:37 -070024#include "nss_freq_log.h"
Gaurao Chaudhari7c5d01e2019-09-16 10:34:48 -070025#include "nss_freq_stats.h"
Abhishek Rastogi9da47472014-03-18 19:46:15 +053026
27#define NSS_ACK_STARTED 0
28#define NSS_ACK_FINISHED 1
29
Gaurao Chaudhari7c5d01e2019-09-16 10:34:48 -070030#define NSS_FREQ_USG_AVG_FREQUENCY 1000 /* Time in ms over which CPU Usage is averaged */
31#define NSS_FREQ_CPU_USAGE_MAX_BOUND 75 /* MAX CPU usage equivalent to running max instructions excluding all the hazards */
32#define NSS_FREQ_CPU_USAGE_MAX 100 /* MAX CPU usage equivalent to running max instructions including all the hazards.
33 This is also the ideal maximum usage value. */
34
35/*
36 * Spinlock to protect the global data structure nss_freq_cpu_status
37 */
38DEFINE_SPINLOCK(nss_freq_cpu_usage_lock);
39
40/*
41 * At any point, this object has the latest data about CPU utilization.
42 */
43struct nss_freq_cpu_usage nss_freq_cpu_status;
44
Abhishek Rastogi9da47472014-03-18 19:46:15 +053045extern struct nss_runtime_sampling nss_runtime_samples;
46extern struct workqueue_struct *nss_wq;
47extern nss_work_t *nss_work;
Abhishek Rastogi9da47472014-03-18 19:46:15 +053048
49/*
Sundarajan Srinivasan02e6c2b2014-10-06 11:51:12 -070050 * nss_freq_msg_init()
Thomas Wu0e2fc4f2015-03-04 15:39:14 -080051 * Initialize the freq message
Sundarajan Srinivasan02e6c2b2014-10-06 11:51:12 -070052 */
53static void nss_freq_msg_init(struct nss_corefreq_msg *ncm, uint16_t if_num, uint32_t type, uint32_t len,
54 void *cb, void *app_data)
55{
56 nss_cmn_msg_init(&ncm->cm, if_num, type, len, cb, app_data);
57}
58
59/*
Sundarajan Srinivasandedd8e42014-10-06 11:59:34 -070060 * nss_freq_handle_ack()
Abhishek Rastogi9da47472014-03-18 19:46:15 +053061 * Handle the nss ack of frequency change.
62 */
Sundarajan Srinivasandedd8e42014-10-06 11:59:34 -070063static void nss_freq_handle_ack(struct nss_ctx_instance *nss_ctx, struct nss_freq_msg *nfa)
Abhishek Rastogi9da47472014-03-18 19:46:15 +053064{
Thomas Wu168ca262014-03-21 16:20:27 -070065 if (nfa->ack == NSS_ACK_STARTED) {
Abhishek Rastogi9da47472014-03-18 19:46:15 +053066 /*
67 * NSS finished start noficiation - HW change clocks and send end notification
68 */
Thomas Wu168ca262014-03-21 16:20:27 -070069 nss_info("%p: NSS ACK Received: %d - Change HW CLK/Send Finish to NSS\n", nss_ctx, nfa->ack);
Abhishek Rastogi9da47472014-03-18 19:46:15 +053070
71 return;
72 }
73
Thomas Wu168ca262014-03-21 16:20:27 -070074 if (nfa->ack == NSS_ACK_FINISHED) {
Abhishek Rastogi9da47472014-03-18 19:46:15 +053075 /*
76 * NSS finished end notification - Done
77 */
Thomas Wu168ca262014-03-21 16:20:27 -070078 nss_info("%p: NSS ACK Received: %d - End Notification ACK - Running: %dmhz\n", nss_ctx, nfa->ack, nfa->freq_current);
Abhishek Rastogi9da47472014-03-18 19:46:15 +053079 nss_runtime_samples.freq_scale_ready = 1;
80 return;
81 }
82
83 nss_info("%p: NSS had an error - Running: %dmhz\n", nss_ctx, nfa->freq_current);
84}
85
86/*
Sundarajan Srinivasandedd8e42014-10-06 11:59:34 -070087 * nss_freq_queue_work()
Abhishek Rastogi9da47472014-03-18 19:46:15 +053088 * Queue Work to the NSS Workqueue based on Current index.
89 */
Tanmay V Jagdalef6b2bce2017-03-03 14:31:07 +053090static bool nss_freq_queue_work(void)
Abhishek Rastogi9da47472014-03-18 19:46:15 +053091{
Samarjeet Banerjeeb126e0f2016-08-05 20:58:27 +053092 nss_freq_scales_t index = nss_runtime_samples.freq_scale_index;
93
Abhishek Rastogi9da47472014-03-18 19:46:15 +053094 BUG_ON(!nss_wq);
95
Thomas Wu0e2fc4f2015-03-04 15:39:14 -080096 nss_info("frequency:%d index:%d sample count:%x\n", nss_runtime_samples.freq_scale[index].frequency,
97 index, nss_runtime_samples.average);
Abhishek Rastogi9da47472014-03-18 19:46:15 +053098
Thomas Wu0e2fc4f2015-03-04 15:39:14 -080099 /*
Samarjeet Banerjeeb126e0f2016-08-05 20:58:27 +0530100 * schedule freq change with autoscale ON
Thomas Wu0e2fc4f2015-03-04 15:39:14 -0800101 */
Tanmay V Jagdalef6b2bce2017-03-03 14:31:07 +0530102 return nss_freq_sched_change(index, true);
Abhishek Rastogi9da47472014-03-18 19:46:15 +0530103}
104
105/*
Gaurao Chaudhari7c5d01e2019-09-16 10:34:48 -0700106 * nss_freq_get_cpu_usage()
107 * Returns the CPU usage value in percentage at any instance for a required core. Returns -1 in case of an error.
108 *
109 * Calculation frequency is 1 second. Range of usage is 0-100. This API returns -1 if CPU usage is requested for core 1.
110 * TODO: Extend this API to get CPU usage for core 1.
Abhishek Rastogi9da47472014-03-18 19:46:15 +0530111 */
Gaurao Chaudhari7c5d01e2019-09-16 10:34:48 -0700112int8_t nss_freq_get_cpu_usage(uint32_t core_id)
113{
114 int8_t usage;
115
116 if (core_id == 0) {
117 spin_lock_bh(&nss_freq_cpu_usage_lock);
118 usage = nss_freq_cpu_status.used;
119 spin_unlock_bh(&nss_freq_cpu_usage_lock);
120
121 return usage;
122 }
123
124 nss_warning("CPU usage functionality is not supported for core %u\n", core_id);
125 return -1;
126}
127
128/*
129 * nss_freq_compute_cpu_usage()
130 * Computes the CPU utilization and maximum-minumun cpu utilization since boot.
131 */
132static void nss_freq_compute_cpu_usage(struct nss_ctx_instance *nss_ctx, uint32_t inst_cnt)
133{
134 uint32_t estimated_ins_capacity;
135 uint8_t actual_usage;
136 uint8_t usage;
137
138 spin_lock_bh(&nss_freq_cpu_usage_lock);
139
140 /*
141 * If actual CPU usage turns up higher than 100, there is something wrong with the received data.
142 * Upper bound average varies between 80% usage to 100% usage.
143 *
144 * TODO: To improve estimation algorithm for calculating how many actual instructions are executed.
145 */
146 actual_usage = (inst_cnt * 100) / nss_freq_cpu_status.max_ins;
147 if ((actual_usage > NSS_FREQ_CPU_USAGE_MAX) || (actual_usage == 0)) {
148 spin_unlock_bh(&nss_freq_cpu_usage_lock);
149 return;
150 }
151
152 /*
153 * Simpler version of below math: This is calculating the reduced number of maximum instructions
154 * estimated_ins_capacity = nss_freq_cpu_status.avg_up% of nss_freq_cpu_status.max_ins
155 * Calculating usage percentage: usage = (inst_cnt/estimated_ins_capacity) * 100
156 */
157 estimated_ins_capacity = ((NSS_FREQ_CPU_USAGE_MAX_BOUND * nss_freq_cpu_status.max_ins) / 100);
158 if (estimated_ins_capacity == 0) {
159 spin_unlock_bh(&nss_freq_cpu_usage_lock);
160 return;
161 }
162 usage = (inst_cnt * 100) / estimated_ins_capacity;
163
164 /*
165 * Average the instructions over NSS_FREQ_USG_AVG_FREQUENCY ms
166 */
167 if (nss_freq_cpu_status.avg_ctr == NSS_FREQ_USG_AVG_FREQUENCY) {
168 nss_freq_cpu_status.used = nss_freq_cpu_status.total / NSS_FREQ_USG_AVG_FREQUENCY;
169
170 /*
171 * Due to our estimation, this could go beyond the end limit of 100%
172 */
173 if (nss_freq_cpu_status.used > NSS_FREQ_CPU_USAGE_MAX) {
174 nss_freq_cpu_status.used = NSS_FREQ_CPU_USAGE_MAX;
175 }
176
177 /*
178 * Getting the all time max and min usage
179 */
180 if (nss_freq_cpu_status.used > nss_freq_cpu_status.max) {
181 nss_freq_cpu_status.max = nss_freq_cpu_status.used;
182 }
183
184 if (nss_freq_cpu_status.used < nss_freq_cpu_status.min) {
185 nss_freq_cpu_status.min = nss_freq_cpu_status.used;
186 }
187
188 nss_trace("%p: max_instructions:%d cpu_usage:%d max_usage:%d min_usage:%d\n", nss_ctx,
189 nss_freq_cpu_status.max_ins, nss_freq_cpu_status.used, nss_freq_cpu_status.max, nss_freq_cpu_status.min);
190
191 nss_freq_cpu_status.total = 0;
192 nss_freq_cpu_status.avg_ctr = 0;
193 }
194
195 nss_freq_cpu_status.total += usage;
196 nss_freq_cpu_status.avg_ctr++;
197
198 spin_unlock_bh(&nss_freq_cpu_usage_lock);
199}
200
201/*
202 * nss_freq_scale_frequency()
203 * Frequency scaling algorithm to scale frequency.
204 */
205void nss_freq_scale_frequency(struct nss_ctx_instance *nss_ctx, uint32_t inst_cnt)
Abhishek Rastogi9da47472014-03-18 19:46:15 +0530206{
207 uint32_t b_index;
208 uint32_t minimum;
209 uint32_t maximum;
Thomas Wu0e2fc4f2015-03-04 15:39:14 -0800210 uint32_t index = nss_runtime_samples.freq_scale_index;
Abhishek Rastogi9da47472014-03-18 19:46:15 +0530211
212 /*
213 * We do not accept any statistics if auto scaling is off,
214 * we start with a fresh sample set when scaling is
215 * eventually turned on.
216 */
217 if (!nss_cmd_buf.auto_scale && nss_runtime_samples.initialized) {
218 return;
219 }
220
221 /*
222 * Delete Current Index Value, Add New Value, Recalculate new Sum, Shift Index
223 */
224 b_index = nss_runtime_samples.buffer_index;
225
226 nss_runtime_samples.sum = nss_runtime_samples.sum - nss_runtime_samples.buffer[b_index];
Gaurao Chaudhari7c5d01e2019-09-16 10:34:48 -0700227 nss_runtime_samples.buffer[b_index] = inst_cnt;
Abhishek Rastogi9da47472014-03-18 19:46:15 +0530228 nss_runtime_samples.sum = nss_runtime_samples.sum + nss_runtime_samples.buffer[b_index];
229 nss_runtime_samples.buffer_index = (b_index + 1) & NSS_SAMPLE_BUFFER_MASK;
230
231 if (nss_runtime_samples.sample_count < NSS_SAMPLE_BUFFER_SIZE) {
232 nss_runtime_samples.sample_count++;
233
234 /*
235 * Samples Are All Ready, Start Auto Scale
236 */
237 if (nss_runtime_samples.sample_count == NSS_SAMPLE_BUFFER_SIZE ) {
238 nss_cmd_buf.auto_scale = 1;
239 nss_runtime_samples.freq_scale_ready = 1;
240 nss_runtime_samples.initialized = 1;
241 }
242
243 return;
244 }
245
246 nss_runtime_samples.average = nss_runtime_samples.sum / nss_runtime_samples.sample_count;
247
248 /*
Thomas Wu0e2fc4f2015-03-04 15:39:14 -0800249 * Print out statistics every 10 samples
Abhishek Rastogi9da47472014-03-18 19:46:15 +0530250 */
Thomas Wu0e2fc4f2015-03-04 15:39:14 -0800251 if (nss_runtime_samples.message_rate_limit++ >= NSS_MESSAGE_RATE_LIMIT) {
Gaurao Chaudhari7c5d01e2019-09-16 10:34:48 -0700252 nss_trace("%p: Running AVG:%x Sample:%x Divider:%d\n", nss_ctx, nss_runtime_samples.average, inst_cnt, nss_runtime_samples.sample_count);
Thomas Wu0e2fc4f2015-03-04 15:39:14 -0800253 nss_trace("%p: Current Frequency Index:%d\n", nss_ctx, index);
Cemil Coskunff83d562019-07-30 11:05:37 -0700254 nss_trace("%p: Auto Scale Ready:%d Auto Scale:%d\n", nss_ctx, nss_runtime_samples.freq_scale_ready, nss_cmd_buf.auto_scale);
Sakthi Vignesh Radhakrishnanaa378102014-04-07 13:52:28 -0700255 nss_trace("%p: Current Rate:%x\n", nss_ctx, nss_runtime_samples.average);
Abhishek Rastogi9da47472014-03-18 19:46:15 +0530256
257 nss_runtime_samples.message_rate_limit = 0;
Abhishek Rastogi9da47472014-03-18 19:46:15 +0530258 }
259
260 /*
Thomas Wu0e2fc4f2015-03-04 15:39:14 -0800261 * Don't scale if we are not ready or auto scale is disabled.
Abhishek Rastogi9da47472014-03-18 19:46:15 +0530262 */
Thomas Wu0e2fc4f2015-03-04 15:39:14 -0800263 if ((nss_runtime_samples.freq_scale_ready != 1) || (nss_cmd_buf.auto_scale != 1)) {
264 return;
265 }
Abhishek Rastogi9da47472014-03-18 19:46:15 +0530266
Thomas Wu0e2fc4f2015-03-04 15:39:14 -0800267 /*
268 * Scale Algorithmn
269 * Algorithmn will limit how fast it will transition each scale, by the number of samples seen.
270 * If any sample is out of scale during the idle count, the rate_limit will reset to 0.
271 * Scales are limited to the max number of cpu scales we support.
272 */
273 if (nss_runtime_samples.freq_scale_rate_limit_up++ >= NSS_FREQUENCY_SCALE_RATE_LIMIT_UP) {
274 maximum = nss_runtime_samples.freq_scale[index].maximum;
Thomas Wu1fa26092016-03-30 14:10:03 -0700275 if ((nss_runtime_samples.average > maximum) && (index < (NSS_FREQ_MAX_SCALE - 1))) {
Thomas Wu0e2fc4f2015-03-04 15:39:14 -0800276 nss_runtime_samples.freq_scale_index++;
277 nss_runtime_samples.freq_scale_ready = 0;
Abhishek Rastogi9da47472014-03-18 19:46:15 +0530278
Thomas Wu0e2fc4f2015-03-04 15:39:14 -0800279 /*
280 * If fail to increase frequency, decrease index
281 */
Gaurao Chaudhari7c5d01e2019-09-16 10:34:48 -0700282 nss_trace("frequency increase to %d inst:%x > maximum:%x\n", nss_runtime_samples.freq_scale[nss_runtime_samples.freq_scale_index].frequency, inst_cnt, maximum);
Tanmay V Jagdalef6b2bce2017-03-03 14:31:07 +0530283 if (!nss_freq_queue_work()) {
Abhishek Rastogi9da47472014-03-18 19:46:15 +0530284 nss_runtime_samples.freq_scale_index--;
Abhishek Rastogi9da47472014-03-18 19:46:15 +0530285 }
Abhishek Rastogi9da47472014-03-18 19:46:15 +0530286 }
Thomas Wu4640e562015-06-10 10:23:04 -0700287
288 /*
Cemil Coskunff83d562019-07-30 11:05:37 -0700289 * Reset the down scale counter based on running average, so can idle properly
Thomas Wu4640e562015-06-10 10:23:04 -0700290 */
Thomas Wuc151f2e2015-09-08 10:59:44 -0700291 if (nss_runtime_samples.average > maximum) {
Thomas Wu4640e562015-06-10 10:23:04 -0700292 nss_trace("down scale timeout reset running average:%x\n", nss_runtime_samples.average);
293 nss_runtime_samples.freq_scale_rate_limit_down = 0;
294 }
295
Thomas Wu0e2fc4f2015-03-04 15:39:14 -0800296 nss_runtime_samples.freq_scale_rate_limit_up = 0;
297 return;
298 }
299
300 if (nss_runtime_samples.freq_scale_rate_limit_down++ >= NSS_FREQUENCY_SCALE_RATE_LIMIT_DOWN) {
301 minimum = nss_runtime_samples.freq_scale[index].minimum;
302 if ((nss_runtime_samples.average < minimum) && (index > 0)) {
303 nss_runtime_samples.freq_scale_index--;
304 nss_runtime_samples.freq_scale_ready = 0;
305
306 /*
307 * If fail to decrease frequency, increase index
308 */
Thomas Wuc151f2e2015-09-08 10:59:44 -0700309 nss_trace("frequency decrease to %d inst:%x < minumum:%x\n", nss_runtime_samples.freq_scale[nss_runtime_samples.freq_scale_index].frequency, nss_runtime_samples.average, minimum);
Tanmay V Jagdalef6b2bce2017-03-03 14:31:07 +0530310 if (!nss_freq_queue_work()) {
Thomas Wu0e2fc4f2015-03-04 15:39:14 -0800311 nss_runtime_samples.freq_scale_index++;
312 }
313 }
314 nss_runtime_samples.freq_scale_rate_limit_down = 0;
315 return;
Abhishek Rastogi9da47472014-03-18 19:46:15 +0530316 }
317}
318
319/*
Gaurao Chaudhari7c5d01e2019-09-16 10:34:48 -0700320 * nss_freq_handle_core_stats()
321 * Handle the core stats.
322 */
323static void nss_freq_handle_core_stats(struct nss_ctx_instance *nss_ctx, struct nss_core_stats *core_stats)
324{
325 uint32_t inst_cnt = core_stats->inst_cnt_total;
326
327 /*
328 * compute CPU utilization by using the instruction count
329 */
330 nss_freq_compute_cpu_usage(nss_ctx, inst_cnt);
331
332 /*
333 * Perform frequency scaling
334 */
335 nss_freq_scale_frequency(nss_ctx, inst_cnt);
336}
337
338/*
Sundarajan Srinivasandedd8e42014-10-06 11:59:34 -0700339 * nss_freq_interface_handler()
Gaurao Chaudhari7c5d01e2019-09-16 10:34:48 -0700340 * Handle NSS -> HLOS messages for Frequency Changes and Statistics.
Thomas Wuc07d8702014-03-19 15:46:19 -0700341 */
Sundarajan Srinivasandedd8e42014-10-06 11:59:34 -0700342static void nss_freq_interface_handler(struct nss_ctx_instance *nss_ctx, struct nss_cmn_msg *ncm, __attribute__((unused))void *app_data) {
Thomas Wuc07d8702014-03-19 15:46:19 -0700343
344 struct nss_corefreq_msg *ncfm = (struct nss_corefreq_msg *)ncm;
345
Sachin Shashidhar6dd9cfc2018-07-31 14:44:37 -0700346 /*
347 * Trace Messages
348 */
349 nss_freq_log_rx_msg(ncfm);
350
Thomas Wu168ca262014-03-21 16:20:27 -0700351 switch (ncfm->cm.type) {
Thomas Wuc07d8702014-03-19 15:46:19 -0700352 case COREFREQ_METADATA_TYPE_TX_FREQ_ACK:
Sundarajan Srinivasandedd8e42014-10-06 11:59:34 -0700353 nss_freq_handle_ack(nss_ctx, &ncfm->msg.nfc);
Thomas Wuc07d8702014-03-19 15:46:19 -0700354 break;
355 case COREFREQ_METADATA_TYPE_TX_CORE_STATS:
Sundarajan Srinivasandedd8e42014-10-06 11:59:34 -0700356 nss_freq_handle_core_stats(nss_ctx, &ncfm->msg.ncs);
Thomas Wuc07d8702014-03-19 15:46:19 -0700357 break;
358
359 default:
360 if (ncm->response != NSS_CMN_RESPONSE_ACK) {
361 /*
362 * Check response
363 */
Thomas Wu68250352014-04-02 18:59:40 -0700364 nss_info("%p: Received response %d for type %d, interface %d", nss_ctx, ncm->response, ncm->type, ncm->interface);
Thomas Wuc07d8702014-03-19 15:46:19 -0700365 }
366 }
367}
368
369/*
Samarjeet Banerjeeb126e0f2016-08-05 20:58:27 +0530370 * nss_freq_change()
371 * NSS frequency change API.
372 */
373nss_tx_status_t nss_freq_change(struct nss_ctx_instance *nss_ctx, uint32_t eng, uint32_t stats_enable, uint32_t start_or_end)
374{
Stephen Wang3e2dbd12018-03-14 17:28:17 -0700375 struct nss_corefreq_msg ncm;
Samarjeet Banerjeeb126e0f2016-08-05 20:58:27 +0530376 struct nss_freq_msg *nfc;
377
378 nss_info("%p: frequency changing to: %d\n", nss_ctx, eng);
379
Gaurao Chaudhari7c5d01e2019-09-16 10:34:48 -0700380 /*
381 * Update the max instruction count for a frequency during down scaling.
382 * Better to update this as late as possible in the frequency update call.
383 */
384 spin_lock_bh(&nss_freq_cpu_usage_lock);
385 nss_freq_cpu_status.max_ins = eng / 1000;
386 spin_unlock_bh(&nss_freq_cpu_usage_lock);
387
Stephen Wang3e2dbd12018-03-14 17:28:17 -0700388 nss_freq_msg_init(&ncm, NSS_COREFREQ_INTERFACE, NSS_TX_METADATA_TYPE_NSS_FREQ_CHANGE,
Samarjeet Banerjeeb126e0f2016-08-05 20:58:27 +0530389 sizeof(struct nss_freq_msg), NULL, NULL);
Stephen Wang3e2dbd12018-03-14 17:28:17 -0700390 nfc = &ncm.msg.nfc;
Samarjeet Banerjeeb126e0f2016-08-05 20:58:27 +0530391 nfc->frequency = eng;
392 nfc->start_or_end = start_or_end;
393 nfc->stats_enable = stats_enable;
394
Stephen Wang3e2dbd12018-03-14 17:28:17 -0700395 return nss_core_send_cmd(nss_ctx, &ncm, sizeof(ncm), NSS_NBUF_PAYLOAD_SIZE);
Samarjeet Banerjeeb126e0f2016-08-05 20:58:27 +0530396}
397
398/*
399 * nss_freq_sched_change()
Gaurao Chaudhari7c5d01e2019-09-16 10:34:48 -0700400 * Schedule a frequency work.
Samarjeet Banerjeeb126e0f2016-08-05 20:58:27 +0530401 */
Tanmay V Jagdalef6b2bce2017-03-03 14:31:07 +0530402bool nss_freq_sched_change(nss_freq_scales_t index, bool auto_scale)
Samarjeet Banerjeeb126e0f2016-08-05 20:58:27 +0530403{
404 if (index >= NSS_FREQ_MAX_SCALE) {
405 nss_info("NSS freq scale beyond limit\n");
Tanmay V Jagdalef6b2bce2017-03-03 14:31:07 +0530406 return false;
Samarjeet Banerjeeb126e0f2016-08-05 20:58:27 +0530407 }
408
409 nss_work = (nss_work_t *)kmalloc(sizeof(nss_work_t), GFP_ATOMIC);
410 if (!nss_work) {
411 nss_info("NSS Freq WQ kmalloc fail");
Tanmay V Jagdalef6b2bce2017-03-03 14:31:07 +0530412 return false;
Samarjeet Banerjeeb126e0f2016-08-05 20:58:27 +0530413 }
414
Thomas Wud6af3772017-09-01 13:42:28 -0700415 INIT_WORK((struct work_struct *)nss_work, nss_hal_wq_function);
Samarjeet Banerjeeb126e0f2016-08-05 20:58:27 +0530416
417 nss_work->frequency = nss_runtime_samples.freq_scale[index].frequency;
418
419 nss_work->stats_enable = auto_scale;
420 nss_cmd_buf.current_freq = nss_work->frequency;
421 queue_work(nss_wq, (struct work_struct *)nss_work);
Tanmay V Jagdalef6b2bce2017-03-03 14:31:07 +0530422
423 return true;
Samarjeet Banerjeeb126e0f2016-08-05 20:58:27 +0530424}
425
426/*
Thomas Wu91f4bdf2017-06-09 12:03:02 -0700427 * nss_freq_get_context()
Gaurao Chaudhari7c5d01e2019-09-16 10:34:48 -0700428 * Get NSS context instance for frequency.
Thomas Wu91f4bdf2017-06-09 12:03:02 -0700429 */
430struct nss_ctx_instance *nss_freq_get_context(void)
431{
432 return (struct nss_ctx_instance *)&nss_top_main.nss[nss_top_main.frequency_handler_id];
433}
434EXPORT_SYMBOL(nss_freq_get_context);
435
436/*
Sundarajan Srinivasandedd8e42014-10-06 11:59:34 -0700437 * nss_freq_register_handler()
Thomas Wuc07d8702014-03-19 15:46:19 -0700438 */
Sundarajan Srinivasandedd8e42014-10-06 11:59:34 -0700439void nss_freq_register_handler(void)
Thomas Wuc07d8702014-03-19 15:46:19 -0700440{
Thomas Wu91f4bdf2017-06-09 12:03:02 -0700441 struct nss_ctx_instance *nss_ctx = nss_freq_get_context();
Thomas Wu91f4bdf2017-06-09 12:03:02 -0700442 nss_core_register_handler(nss_ctx, NSS_COREFREQ_INTERFACE, nss_freq_interface_handler, NULL);
Thomas Wuc07d8702014-03-19 15:46:19 -0700443}
Gaurao Chaudhari7c5d01e2019-09-16 10:34:48 -0700444
445/*
446 * nss_freq_cpu_usage_init()
447 * Initialize cpu usage computing.
448 *
449 * TODO: Add support to retrieve CPU usage even if frequency scaling is disabled.
450 */
451void nss_freq_init_cpu_usage(void)
452{
453 nss_freq_cpu_status.used = 0;
454 nss_freq_cpu_status.max_ins = nss_runtime_samples.freq_scale[nss_runtime_samples.freq_scale_index].frequency / 1000;
455 nss_freq_cpu_status.total = 0;
456 nss_freq_cpu_status.max = 0; /* Initial value is 0 to capture the highest most value during the run */
457 nss_freq_cpu_status.min = NSS_FREQ_CPU_USAGE_MAX; /* Initial value is 100 to capture the lowest most value during the run */
458 nss_freq_cpu_status.avg_up = NSS_FREQ_CPU_USAGE_MAX_BOUND;
459 nss_freq_cpu_status.avg_ctr = 0;
460
461 nss_freq_stats_dentry_create();
462}