summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/Documentation/firmware_updater/request_firmware.txt22
-rw-r--r--kernel/Documentation/firmware_updater/synaptics_fw_updaterbin0 -> 666266 bytes
-rw-r--r--kernel/Documentation/firmware_updater/synaptics_fw_updater.c753
-rw-r--r--kernel/Documentation/firmware_updater/synaptics_fw_updater_readme.txt41
-rw-r--r--kernel/arch/arm/configs/omap3_beagle_android_defconfig2419
-rw-r--r--kernel/arch/arm/configs/panda_defconfig331
-rw-r--r--kernel/arch/arm/mach-omap2/board-omap3beagle.c1038
-rw-r--r--kernel/arch/arm/mach-omap2/board-omap4panda.c1053
-rw-r--r--kernel/cgroup.c2
-rw-r--r--kernel/cpu.c15
-rw-r--r--kernel/cpu_pm.c31
-rw-r--r--kernel/cpuset.c56
-rw-r--r--kernel/drivers/input/touchscreen/Kconfig721
-rw-r--r--kernel/drivers/input/touchscreen/Makefile68
-rw-r--r--kernel/drivers/input/touchscreen/synaptics_fw_update.c1698
-rw-r--r--kernel/drivers/input/touchscreen/synaptics_i2c_rmi4.c2162
-rw-r--r--kernel/drivers/input/touchscreen/synaptics_i2c_rmi4.h286
-rw-r--r--kernel/drivers/input/touchscreen/synaptics_rmi_dev.c710
-rw-r--r--kernel/events/core.c151
-rw-r--r--kernel/events/hw_breakpoint.c2
-rw-r--r--kernel/exit.c19
-rw-r--r--kernel/fork.c7
-rw-r--r--kernel/include/linux/input/synaptics_dsx.h59
-rw-r--r--kernel/irq/manage.c3
-rw-r--r--kernel/locking/mutex.c12
-rw-r--r--kernel/locking/spinlock_debug.c14
-rw-r--r--kernel/module.c10
-rw-r--r--kernel/panic.c8
-rw-r--r--kernel/power/qos.c195
-rw-r--r--kernel/printk/printk.c14
-rw-r--r--kernel/resource.c2
-rw-r--r--kernel/sched/Makefile3
-rw-r--r--kernel/sched/clock.c2
-rw-r--r--kernel/sched/core.c320
-rw-r--r--kernel/sched/cputime.c13
-rw-r--r--kernel/sched/deadline.c48
-rw-r--r--kernel/sched/debug.c45
-rw-r--r--kernel/sched/fair.c1678
-rw-r--r--kernel/sched/features.h3
-rw-r--r--kernel/sched/hmp.c4019
-rw-r--r--kernel/sched/idle.c1
-rw-r--r--kernel/sched/idle_task.c25
-rw-r--r--kernel/sched/rt.c189
-rw-r--r--kernel/sched/sched.h711
-rw-r--r--kernel/sched/sched_avg.c128
-rw-r--r--kernel/sched/stop_task.c42
-rw-r--r--kernel/smp.c35
-rw-r--r--kernel/smpboot.c3
-rw-r--r--kernel/sysctl.c211
-rw-r--r--kernel/sysctl_binary.c3
-rw-r--r--kernel/time/Makefile2
-rw-r--r--kernel/time/alarmtimer.c270
-rw-r--r--kernel/time/sched_clock.c8
-rw-r--r--kernel/time/tick-sched.c92
-rw-r--r--kernel/time/timer.c64
-rw-r--r--kernel/trace/Kconfig38
-rw-r--r--kernel/trace/Makefile6
-rw-r--r--kernel/trace/blktrace.c80
-rw-r--r--kernel/trace/ipc_logging.c876
-rw-r--r--kernel/trace/ipc_logging_debug.c184
-rw-r--r--kernel/trace/ipc_logging_private.h165
-rw-r--r--kernel/trace/msm_rtb.c329
-rw-r--r--kernel/trace/power-traces.c3
-rw-r--r--kernel/trace/trace.c13
-rw-r--r--kernel/trace/trace_cpu_freq_switch.c312
-rw-r--r--kernel/trace/trace_event_perf.c3
-rw-r--r--kernel/trace/trace_events.c5
-rw-r--r--kernel/trace/trace_sched_wakeup.c3
-rw-r--r--kernel/trace/trace_syscalls.c4
-rw-r--r--kernel/trace/trace_uprobe.c2
-rw-r--r--kernel/watchdog.c20
-rw-r--r--kernel/workqueue.c176
72 files changed, 21795 insertions, 241 deletions
diff --git a/kernel/Documentation/firmware_updater/request_firmware.txt b/kernel/Documentation/firmware_updater/request_firmware.txt
new file mode 100644
index 000000000000..317f04ac5684
--- /dev/null
+++ b/kernel/Documentation/firmware_updater/request_firmware.txt
@@ -0,0 +1,22 @@
+Firmware Update Function
+========================
+
+Call export function "synaptics_fw_updater" in rmi_fw_update.c to start
+firmware updating process in the driver.
+
+The RMI4 driver uses the kernel's request_firmware() feature to obtain
+firmware for the touch sensor. The firmware is expected to live in
+the file firmware/<firmware_name>.img.ihex.
+
+To prepare Synaptics provided .img file for reflashing, convert it to .ihex
+format using the following command:
+
+ objcopy -I binary -O ihex <firmware_name>.img firmware/<firmware_name>.img.ihex
+
+Then make sure to add the image file name to the
+CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_RMI4_FW_UPDATE entry in firmware/Makefile.
+If you don't do this, the image file won't be included, and
+the firmware loader class will delay for 60 seconds waiting for a non-existent
+userspace response to the firmware load request.
+
+Firmware updates for multichip solutions (aka LTS) are not supported.
diff --git a/kernel/Documentation/firmware_updater/synaptics_fw_updater b/kernel/Documentation/firmware_updater/synaptics_fw_updater
new file mode 100644
index 000000000000..b0c1b4d9e770
--- /dev/null
+++ b/kernel/Documentation/firmware_updater/synaptics_fw_updater
Binary files differ
diff --git a/kernel/Documentation/firmware_updater/synaptics_fw_updater.c b/kernel/Documentation/firmware_updater/synaptics_fw_updater.c
new file mode 100644
index 000000000000..7409dd424109
--- /dev/null
+++ b/kernel/Documentation/firmware_updater/synaptics_fw_updater.c
@@ -0,0 +1,753 @@
+/* ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+ *
+ * Copyright © 2011, 2012 Synaptics Incorporated. All rights reserved.
+ *
+ * The information in this file is confidential under the terms
+ * of a non-disclosure agreement with Synaptics and is provided
+ * AS IS without warranties or guarantees of any kind.
+ *
+ * The information in this file shall remain the exclusive property
+ * of Synaptics and may be the subject of Synaptics patents, in
+ * whole or part. Synaptics intellectual property rights in the
+ * information in this file are not expressly or implicitly licensed
+ * or otherwise transferred to you as a result of such information
+ * being made available to you.
+ *
+ * File: synaptics_fw_updater.c
+ *
+ * Description: command line reflash implimentation using command
+ * line args. This file should not be OS dependant and should build and
+ * run under any Linux based OS that utilizes the Synaptice rmi driver
+ * built into the kernel (kernel/drivers/input/rmi4).
+ *
+ * ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+ */
+#include <errno.h>
+#include <stddef.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <time.h>
+#include <string.h>
+#include <sys/stat.h>
+#include <unistd.h>
+#include <sys/time.h>
+
+#define DEFAULT_SENSOR "/sys/class/input/input1"
+
+#define MAX_STRING_LEN 256
+#define MAX_INT_LEN 33
+
+#define DATA_FILENAME "data"
+#define IMAGESIZE_FILENAME "imagesize"
+#define DOREFLASH_FILENAME "doreflash"
+#define CONFIGAREA_FILENAME "configarea"
+#define READCONFIG_FILENAME "readconfig"
+#define WRITECONFIG_FILENAME "writeconfig"
+#define BLOCKSIZE_FILENAME "blocksize"
+#define IMAGEBLOCKCOUNT_FILENAME "fwblockcount"
+#define CONFIGBLOCKCOUNT_FILENAME "configblockcount"
+#define PMCONFIGBLOCKCOUNT_FILENAME "permconfigblockcount"
+#define BUILDID_FILENAME "buildid"
+#define FLASHPROG_FILENAME "flashprog"
+
+#define UI_CONFIG_AREA 0
+#define PERM_CONFIG_AREA 1
+#define BL_CONFIG_AREA 2
+#define DISP_CONFIG_AREA 3
+
+#define IMAGE_FILE_CHECKSUM_SIZE 4
+
+unsigned char *firmware = NULL;
+int fileSize;
+int firmwareBlockSize;
+int firmwareBlockCount;
+int firmwareImgSize;
+int configBlockSize;
+int configBlockCount;
+int configImgSize;
+int totalBlockCount;
+int readConfig = 0;
+int writeConfig = 0;
+int uiConfig = 0;
+int pmConfig = 0;
+int blConfig = 0;
+int dpConfig = 0;
+int force = 0;
+int verbose = 0;
+
+char mySensor[MAX_STRING_LEN];
+char imageFileName[MAX_STRING_LEN];
+
+static void usage(char *name)
+{
+ printf("Usage: %s [-b {image_file}] [-d {sysfs_entry}] [-r] [-ui] [-pm] [-bl] [-dp] [-f] [-v]\n", name);
+ printf("\t[-b {image_file}] - Name of image file\n");
+ printf("\t[-d {sysfs_entry}] - Path to sysfs entry of sensor\n");
+ printf("\t[-r] - Read config area\n");
+ printf("\t[-ui] - UI config area\n");
+ printf("\t[-pm] - Permanent config area\n");
+ printf("\t[-bl] - BL config area\n");
+ printf("\t[-dp] - Display config area\n");
+ printf("\t[-f] - Force reflash\n");
+ printf("\t[-v] - Verbose output\n");
+
+ return;
+}
+
+static void TimeSubtract(struct timeval *result, struct timeval *x, struct timeval *y)
+{
+ if (x->tv_usec < y->tv_usec) {
+ result->tv_sec = x->tv_sec - y->tv_sec - 1;
+ result->tv_usec = y->tv_usec - x->tv_usec;
+ } else {
+ result->tv_sec = x->tv_sec - y->tv_sec;
+ result->tv_usec = x->tv_usec - y->tv_usec;
+ }
+
+ return;
+}
+
+static int CheckSysfsEntry(char *sensorName)
+{
+ int retval;
+ struct stat st;
+
+ retval = stat(sensorName, &st);
+ if (retval)
+ printf("ERROR: sensor sysfs entry %s not found\n", sensorName);
+
+ return retval;
+}
+
+static void WriteBinData(char *fname, unsigned char *buf, int len)
+{
+ int numBytesWritten;
+ FILE *fp;
+
+ fp = fopen(fname, "wb");
+ if (!fp) {
+ printf("ERROR: failed to open %s for writing data\n", fname);
+ exit(EIO);
+ }
+
+ numBytesWritten = fwrite(buf, 1, len, fp);
+
+ if (numBytesWritten != len) {
+ printf("ERROR: failed to write all data to bin file\n");
+ fclose(fp);
+ exit(EIO);
+ }
+
+ fclose(fp);
+
+ return;
+}
+
+static void ReadBinData(char *fname, unsigned char *buf, int len)
+{
+ int numBytesRead;
+ FILE *fp;
+
+ fp = fopen(fname, "rb");
+ if (!fp) {
+ printf("ERROR: failed to open %s for reading data\n", fname);
+ exit(EIO);
+ }
+
+ numBytesRead = fread(buf, 1, len, fp);
+
+ if (numBytesRead != len) {
+ printf("ERROR: failed to read all data from bin file\n");
+ fclose(fp);
+ exit(EIO);
+ }
+
+ fclose(fp);
+
+ return;
+}
+
+static void WriteValueToFp(FILE *fp, unsigned int value)
+{
+ int numBytesWritten;
+ char buf[MAX_INT_LEN];
+
+ snprintf(buf, MAX_INT_LEN, "%u", value);
+
+ fseek(fp, 0, 0);
+
+ numBytesWritten = fwrite(buf, 1, strlen(buf) + 1, fp);
+ if (numBytesWritten != ((int)(strlen(buf) + 1))) {
+ printf("ERROR: failed to write value to file pointer\n");
+ fclose(fp);
+ exit(EIO);
+ }
+
+ return;
+}
+
+static void WriteValueToSysfsFile(char *fname, unsigned int value)
+{
+ FILE *fp;
+
+ fp = fopen(fname, "w");
+ if (!fp) {
+ printf("ERROR: failed to open %s for writing value\n", fname);
+ exit(EIO);
+ }
+
+ WriteValueToFp(fp, value);
+
+ fclose(fp);
+
+ return;
+}
+
+static void ReadValueFromFp(FILE *fp, unsigned int *value)
+{
+ int retVal;
+ char buf[MAX_INT_LEN];
+
+ fseek(fp, 0, 0);
+
+ retVal = fread(buf, 1, sizeof(buf), fp);
+ if (retVal == -1) {
+ printf("ERROR: failed to read value from file pointer\n");
+ exit(EIO);
+ }
+
+ *value = strtoul(buf, NULL, 0);
+
+ return;
+}
+
+static void ReadValueFromSysfsFile(char *fname, unsigned int *value)
+{
+ FILE *fp;
+
+ fp = fopen(fname, "r");
+ if (!fp) {
+ printf("ERROR: failed to open %s for reading value\n", fname);
+ exit(EIO);
+ }
+
+ ReadValueFromFp(fp, value);
+
+ fclose(fp);
+
+ return;
+}
+
+static void WriteBlockData(char *buf, int len)
+{
+ char tmpfname[MAX_STRING_LEN];
+
+ snprintf(tmpfname, MAX_STRING_LEN, "%s/%s", mySensor, DATA_FILENAME);
+
+ WriteBinData(tmpfname, (unsigned char *)buf, len);
+
+ return;
+}
+
+static void ReadBlockData(char *buf, int len)
+{
+ char tmpfname[MAX_STRING_LEN];
+
+ snprintf(tmpfname, MAX_STRING_LEN, "%s/%s", mySensor, DATA_FILENAME);
+
+ ReadBinData(tmpfname, (unsigned char *)buf, len);
+
+ return;
+}
+
+static void SetImageSize(int value)
+{
+ char tmpfname[MAX_STRING_LEN];
+
+ snprintf(tmpfname, MAX_STRING_LEN, "%s/%s", mySensor, IMAGESIZE_FILENAME);
+
+ WriteValueToSysfsFile(tmpfname, value);
+
+ return;
+}
+
+static void StartReflash(int value)
+{
+ char tmpfname[MAX_STRING_LEN];
+
+ snprintf(tmpfname, MAX_STRING_LEN, "%s/%s", mySensor, DOREFLASH_FILENAME);
+
+ WriteValueToSysfsFile(tmpfname, value);
+
+ return;
+}
+
+static void SetConfigArea(int value)
+{
+ char tmpfname[MAX_STRING_LEN];
+
+ snprintf(tmpfname, MAX_STRING_LEN, "%s/%s", mySensor, CONFIGAREA_FILENAME);
+
+ WriteValueToSysfsFile(tmpfname, value);
+
+ return;
+}
+
+static void StartWriteConfig(int value)
+{
+ char tmpfname[MAX_STRING_LEN];
+
+ snprintf(tmpfname, MAX_STRING_LEN, "%s/%s", mySensor, WRITECONFIG_FILENAME);
+
+ WriteValueToSysfsFile(tmpfname, value);
+
+ return;
+}
+
+static void StartReadConfig(int value)
+{
+ char tmpfname[MAX_STRING_LEN];
+
+ snprintf(tmpfname, MAX_STRING_LEN, "%s/%s", mySensor, READCONFIG_FILENAME);
+
+ WriteValueToSysfsFile(tmpfname, value);
+
+ return;
+}
+
+static int ReadBlockSize(void)
+{
+ unsigned int blockSize;
+ char tmpfname[MAX_STRING_LEN];
+
+ snprintf(tmpfname, MAX_STRING_LEN, "%s/%s", mySensor, BLOCKSIZE_FILENAME);
+
+ ReadValueFromSysfsFile(tmpfname, &blockSize);
+
+ return blockSize;
+}
+
+static int ReadFirmwareBlockCount(void)
+{
+ unsigned int imageBlockCount;
+ char tmpfname[MAX_STRING_LEN];
+
+ snprintf(tmpfname, MAX_STRING_LEN, "%s/%s", mySensor, IMAGEBLOCKCOUNT_FILENAME);
+
+ ReadValueFromSysfsFile(tmpfname, &imageBlockCount);
+
+ return imageBlockCount;
+}
+
+static int ReadConfigBlockCount(void)
+{
+ unsigned int configBlockCount;
+ char tmpfname[MAX_STRING_LEN];
+
+ snprintf(tmpfname, MAX_STRING_LEN, "%s/%s", mySensor, CONFIGBLOCKCOUNT_FILENAME);
+
+ ReadValueFromSysfsFile(tmpfname, &configBlockCount);
+
+ return configBlockCount;
+}
+
+static int ReadPmConfigBlockCount(void)
+{
+ unsigned int configBlockCount;
+ char tmpfname[MAX_STRING_LEN];
+
+ snprintf(tmpfname, MAX_STRING_LEN, "%s/%s", mySensor, PMCONFIGBLOCKCOUNT_FILENAME);
+
+ ReadValueFromSysfsFile(tmpfname, &configBlockCount);
+
+ return configBlockCount;
+}
+
+static int ReadBuildID(void)
+{
+ unsigned int buildID;
+ char tmpfname[MAX_STRING_LEN];
+
+ snprintf(tmpfname, MAX_STRING_LEN, "%s/%s", mySensor, BUILDID_FILENAME);
+
+ ReadValueFromSysfsFile(tmpfname, &buildID);
+
+ return buildID;
+}
+
+static int ReadFlashProg(void)
+{
+ unsigned int flashProg;
+ char tmpfname[MAX_STRING_LEN];
+
+ snprintf(tmpfname, MAX_STRING_LEN, "%s/%s", mySensor, FLASHPROG_FILENAME);
+
+ ReadValueFromSysfsFile(tmpfname, &flashProg);
+
+ return flashProg;
+}
+
+static void ReadFirmwareInfo(void)
+{
+ firmwareBlockSize = ReadBlockSize();
+ firmwareBlockCount = ReadFirmwareBlockCount();
+ firmwareImgSize = firmwareBlockCount * firmwareBlockSize;
+
+ return;
+}
+
+static void ReadConfigInfo(void)
+{
+ configBlockSize = ReadBlockSize();
+ configBlockCount = ReadConfigBlockCount();
+ configImgSize = configBlockSize * configBlockCount;
+
+ return;
+}
+
+static void CalculateChecksum(unsigned short *data, unsigned short len, unsigned long *result)
+{
+ unsigned long temp;
+ unsigned long sum1 = 0xffff;
+ unsigned long sum2 = 0xffff;
+
+ *result = 0xffffffff;
+
+ while (len--) {
+ temp = *data;
+ sum1 += temp;
+ sum2 += sum1;
+ sum1 = (sum1 & 0xffff) + (sum1 >> 16);
+ sum2 = (sum2 & 0xffff) + (sum2 >> 16);
+ data++;
+ }
+
+ *result = sum2 << 16 | sum1;
+
+ return;
+}
+
+static int CompareChecksum(void)
+{
+ unsigned long headerChecksum;
+ unsigned long computedChecksum;
+
+ headerChecksum = (unsigned long)firmware[0] +
+ (unsigned long)firmware[1] * 0x100 +
+ (unsigned long)firmware[2] * 0x10000 +
+ (unsigned long)firmware[3] * 0x1000000;
+
+ CalculateChecksum((unsigned short *)&firmware[IMAGE_FILE_CHECKSUM_SIZE],
+ ((fileSize - IMAGE_FILE_CHECKSUM_SIZE) / 2), &computedChecksum);
+
+ if (verbose) {
+ printf("Checksum in image file header = 0x%08x\n", (unsigned int)headerChecksum);
+ printf("Checksum computed from image file = 0x%08x\n", (unsigned int)computedChecksum);
+ }
+
+ if (headerChecksum == computedChecksum)
+ return 1;
+ else
+ return 0;
+}
+
+static int ProceedWithReflash(void)
+{
+ int index = 0;
+ int deviceBuildID;
+ int imageBuildID;
+ char imagePR[MAX_STRING_LEN];
+ char *strptr;
+
+ if (force) {
+ printf("Force reflash...\n");
+ return 1;
+ }
+
+ if (ReadFlashProg()) {
+ printf("Force reflash (device in flash prog mode)...\n");
+ return 1;
+ }
+
+ strptr = strstr(imageFileName, "PR");
+ if (!strptr) {
+ printf("No valid PR number (PRxxxxxxx) found in image file name...\n");
+ return 0;
+ }
+
+ strptr += 2;
+ while (strptr[index] >= '0' && strptr[index] <= '9') {
+ imagePR[index] = strptr[index];
+ index++;
+ }
+ imagePR[index] = 0;
+
+ imageBuildID = strtoul(imagePR, NULL, 0);
+ deviceBuildID = ReadBuildID();
+ printf("Image file PR = %d\n", imageBuildID);
+ printf("Device PR = %d\n", deviceBuildID);
+
+ if (imageBuildID > deviceBuildID) {
+ printf("Proceed with reflash...\n");
+ return 1;
+ } else {
+ printf("No need to do reflash...\n");
+ return 0;
+ }
+}
+
+static void DoReadConfig(void)
+{
+ int ii;
+ int jj;
+ int index = 0;
+ int configSize;
+ int blockCount;
+ unsigned char *buffer;
+
+ if (uiConfig) {
+ SetConfigArea(UI_CONFIG_AREA);
+ StartReadConfig(1);
+ blockCount = configBlockCount;
+ configSize = configImgSize;
+ buffer = malloc(configSize);
+ if (!buffer)
+ exit(ENOMEM);
+ ReadBlockData((char *)&buffer[0], configSize);
+ } else if (pmConfig) {
+ SetConfigArea(PERM_CONFIG_AREA);
+ StartReadConfig(1);
+ blockCount = ReadPmConfigBlockCount();
+ configSize = configBlockSize * blockCount;
+ buffer = malloc(configSize);
+ if (!buffer)
+ exit(ENOMEM);
+ ReadBlockData((char *)&buffer[0], configSize);
+ } else {
+ return;
+ }
+
+ for (ii = 0; ii < blockCount; ii++) {
+ for (jj = 0; jj < configBlockSize; jj++) {
+ printf("0x%02x ", buffer[index]);
+ index++;
+ }
+ printf("\n");
+ }
+
+ free(buffer);
+
+ return;
+}
+
+static void DoWriteConfig(void)
+{
+ printf("Starting config programming...\n");
+
+ if (uiConfig)
+ SetConfigArea(UI_CONFIG_AREA);
+ else if (pmConfig)
+ SetConfigArea(PERM_CONFIG_AREA);
+ else if (blConfig)
+ SetConfigArea(BL_CONFIG_AREA);
+ else if (dpConfig)
+ SetConfigArea(DISP_CONFIG_AREA);
+ else
+ return;
+
+ SetImageSize(fileSize);
+ WriteBlockData((char *)&firmware[0], fileSize);
+ StartWriteConfig(1);
+
+ printf("Config programming completed...\n");
+
+ return;
+}
+
+static void DoReflash(void)
+{
+ if (verbose)
+ printf("Blocks: %d (firmware: %d, config: %d)\n", totalBlockCount, firmwareBlockCount, configBlockCount);
+
+ if (!ProceedWithReflash())
+ return;
+
+ printf("Starting reflash...\n");
+
+ SetImageSize(fileSize);
+ WriteBlockData((char *)&firmware[0], fileSize);
+ StartReflash(1);
+
+ printf("Reflash completed...\n");
+
+ return;
+}
+
+static int InitFirmwareImage(void)
+{
+ int numBytesRead;
+ FILE *fp;
+
+ if (!readConfig) {
+ fp = fopen(imageFileName, "rb");
+
+ if (!fp) {
+ printf("ERROR: image file %s not found\n", imageFileName);
+ exit(ENODEV);
+ }
+
+ fseek(fp, 0L, SEEK_END);
+ fileSize = ftell(fp);
+ if (fileSize == -1) {
+ printf("ERROR: failed to determine size of %s\n", imageFileName);
+ exit(EIO);
+ }
+
+ fseek(fp, 0L, SEEK_SET);
+
+ firmware = malloc(fileSize + 1);
+ if (!firmware) {
+ exit(ENOMEM);
+ } else {
+ numBytesRead = fread(firmware, 1, fileSize, fp);
+ if (numBytesRead != fileSize) {
+ printf("ERROR: failed to read entire content of image file\n");
+ exit(EIO);
+ }
+ }
+
+ fclose(fp);
+
+ if (!(pmConfig || blConfig || dpConfig)) {
+ if (!CompareChecksum()) {
+ printf("ERROR: failed to validate checksum of image file\n");
+ exit(EINVAL);
+ }
+ }
+ }
+
+ return 0;
+}
+
+int main(int argc, char* argv[])
+{
+ int retVal;
+ int this_arg = 1;
+ struct stat st;
+ struct timeval start_time;
+ struct timeval end_time;
+ struct timeval elapsed_time;
+
+ if (argc == 1) {
+ usage(argv[0]);
+ exit(EINVAL);
+ }
+
+ while (this_arg < argc) {
+ if (!strcmp((const char *)argv[this_arg], "-b")) {
+ /* Image file */
+ FILE *file;
+
+ this_arg++;
+ if (this_arg >= argc) {
+ printf("ERROR: image file missing\n");
+ exit(EINVAL);
+ }
+
+ /* check for presence of image file */
+ file = fopen(argv[this_arg], "rb");
+ if (file == 0) {
+ printf("ERROR: image file %s not found\n", argv[this_arg]);
+ exit(EINVAL);
+ }
+ fclose(file);
+
+ strncpy(imageFileName, argv[this_arg], MAX_STRING_LEN);
+ } else if (!strcmp((const char *)argv[this_arg], "-d")) {
+ /* path to sensor sysfs entry */
+ this_arg++;
+
+ if (stat(argv[this_arg], &st) == 0) {
+ strncpy(mySensor, argv[this_arg], MAX_STRING_LEN);
+ } else {
+ printf("ERROR: sensor sysfs entry %s not found\n", argv[this_arg]);
+ exit(EINVAL);
+ }
+ } else if (!strcmp((const char *)argv[this_arg], "-r")) {
+ readConfig = 1;
+ } else if (!strcmp((const char *)argv[this_arg], "-ui")) {
+ uiConfig = 1;
+ } else if (!strcmp((const char *)argv[this_arg], "-pm")) {
+ pmConfig = 1;
+ } else if (!strcmp((const char *)argv[this_arg], "-bl")) {
+ blConfig = 1;
+ } else if (!strcmp((const char *)argv[this_arg], "-dp")) {
+ dpConfig = 1;
+ } else if (!strcmp((const char *)argv[this_arg], "-f")) {
+ force = 1;
+ } else if (!strcmp((const char *)argv[this_arg], "-v")) {
+ verbose = 1;
+ } else {
+ usage(argv[0]);
+ printf("ERROR: invalid parameter %s supplied\n", argv[this_arg]);
+ exit(EINVAL);
+ }
+ this_arg++;
+ }
+
+ if ((uiConfig + pmConfig + blConfig + dpConfig) > 1) {
+ printf("ERROR: too many parameters\n");
+ exit(EINVAL);
+ }
+
+ if (uiConfig || pmConfig || blConfig || dpConfig)
+ writeConfig = 1;
+
+ if (!readConfig && !strlen(imageFileName)) {
+ printf("ERROR: no image file specified\n");
+ exit(EINVAL);
+ }
+
+ if (!strlen(mySensor))
+ strncpy(mySensor, DEFAULT_SENSOR, MAX_STRING_LEN);
+
+ if (CheckSysfsEntry(mySensor))
+ exit(ENODEV);
+
+ InitFirmwareImage();
+
+ ReadFirmwareInfo();
+ ReadConfigInfo();
+ totalBlockCount = configBlockCount + firmwareBlockCount;
+
+ retVal = gettimeofday(&start_time, NULL);
+ if (retVal)
+ printf("WARNING: failed to get start time\n");
+
+ if (verbose) {
+ if (!readConfig)
+ printf("Image file: %s\n", imageFileName);
+ printf("Sensor sysfs entry: %s\n", mySensor);
+ }
+
+ if (readConfig)
+ DoReadConfig();
+ else if (writeConfig)
+ DoWriteConfig();
+ else
+ DoReflash();
+
+ retVal = gettimeofday(&end_time, NULL);
+ if (retVal)
+ printf("WARNING: failed to get end time\n");
+
+ TimeSubtract(&elapsed_time, &end_time, &start_time);
+
+ if (verbose) {
+ printf("Elapsed time = %ld.%06ld seconds\n",
+ (long)elapsed_time.tv_sec,
+ (long)elapsed_time.tv_usec);
+ }
+
+ return 0;
+}
diff --git a/kernel/Documentation/firmware_updater/synaptics_fw_updater_readme.txt b/kernel/Documentation/firmware_updater/synaptics_fw_updater_readme.txt
new file mode 100644
index 000000000000..66f71922995a
--- /dev/null
+++ b/kernel/Documentation/firmware_updater/synaptics_fw_updater_readme.txt
@@ -0,0 +1,41 @@
+Use ADB (Android Debug Bridge) to do command-line reflash
+- Power on device.
+- Connect device to host via USB.
+- Open command prompt on host and go to directory where adb, synaptics_fw_updater, and FW image (e.g. PR1234567.img) reside.
+- Run "adb devices" to ensure connection with device.
+- Run "adb root" to have root privileges.
+- Run "adb push synaptics_fw_updater /data" to copy synaptics_fw_updater to /data directory on device.
+- Run "adb push firmware.img /data" to copy firmware.img to /data directory on device.
+- Run "adb shell chmod 777 /data/synaptics_fw_updater" to make synaptics_fw_updater executable.
+- Run "adb shell /data/synaptics_fw_updater -b /data/PR1234567.img -f -v" to start reflash process.
+
+Parameters
+[-b {image_file}] - Name of image file
+[-d {sysfs_entry}] - Path to sysfs entry of sensor
+[-r] - Read config area
+[-ui] - UI config area
+[-pm] - Permanent config area
+[-bl] - BL config area
+[-dp] - Display config area
+[-f] - Force reflash
+[-v] - Verbose output
+
+Procedures for checking whether to proceed with reflash
+- If [-f] flag is set, proceed with reflash
+- If device is in flash prog (bootloader) mode, proceed with reflash
+- If PR number contained in name of new FW image is greater than PR number of FW on device, proceed with reflash.
+- Otherwise, no reflash is performed
+
+Usage examples
+- Perform reflash using PR1234567.img regardless of PR number of FW on device
+ synaptics_fw_updater -b PR1234567.img -f
+- Perform reflash using PR1234567.img only if 1234567 is greater than PR number of FW on device.
+ synaptics_fw_updater -b PR1234567.img
+- Write UI config area from PR1234567.img (parsing UI config area from firmware image file)
+ synaptics_fw_updater -b PR1234567.img -ui
+- Write permanent config area from pmconfig.img (binary file containing permanent config data)
+ synaptics_fw_updater -b pmconfig.img -pm
+- Read UI config area
+ synaptics_fw_updater -r -ui
+- Read permanent config area
+ synaptics_fw_updater -r -pm \ No newline at end of file
diff --git a/kernel/arch/arm/configs/omap3_beagle_android_defconfig b/kernel/arch/arm/configs/omap3_beagle_android_defconfig
new file mode 100644
index 000000000000..4fc62c4fa440
--- /dev/null
+++ b/kernel/arch/arm/configs/omap3_beagle_android_defconfig
@@ -0,0 +1,2419 @@
+#
+# Automatically generated make config: don't edit
+# Linux/arm 2.6.37 Kernel Configuration
+# Mon Apr 16 13:58:06 2012
+#
+CONFIG_ARM=y
+CONFIG_SYS_SUPPORTS_APM_EMULATION=y
+CONFIG_GENERIC_GPIO=y
+# CONFIG_ARCH_USES_GETTIMEOFFSET is not set
+CONFIG_GENERIC_CLOCKEVENTS=y
+CONFIG_HAVE_PROC_CPU=y
+CONFIG_GENERIC_HARDIRQS=y
+CONFIG_STACKTRACE_SUPPORT=y
+CONFIG_HAVE_LATENCYTOP_SUPPORT=y
+CONFIG_LOCKDEP_SUPPORT=y
+CONFIG_TRACE_IRQFLAGS_SUPPORT=y
+CONFIG_HARDIRQS_SW_RESEND=y
+CONFIG_GENERIC_IRQ_PROBE=y
+CONFIG_RWSEM_GENERIC_SPINLOCK=y
+CONFIG_ARCH_HAS_CPUFREQ=y
+CONFIG_ARCH_HAS_CPU_IDLE_WAIT=y
+CONFIG_GENERIC_HWEIGHT=y
+CONFIG_GENERIC_CALIBRATE_DELAY=y
+CONFIG_NEED_DMA_MAP_STATE=y
+CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
+CONFIG_ARM_L1_CACHE_SHIFT_6=y
+CONFIG_VECTORS_BASE=0xffff0000
+CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+CONFIG_CONSTRUCTORS=y
+CONFIG_HAVE_IRQ_WORK=y
+CONFIG_IRQ_WORK=y
+
+#
+# General setup
+#
+CONFIG_EXPERIMENTAL=y
+CONFIG_BROKEN_ON_SMP=y
+CONFIG_INIT_ENV_ARG_LIMIT=32
+CONFIG_CROSS_COMPILE=""
+CONFIG_LOCALVERSION=""
+CONFIG_LOCALVERSION_AUTO=y
+CONFIG_HAVE_KERNEL_GZIP=y
+CONFIG_HAVE_KERNEL_LZMA=y
+CONFIG_HAVE_KERNEL_LZO=y
+CONFIG_KERNEL_GZIP=y
+# CONFIG_KERNEL_LZMA is not set
+# CONFIG_KERNEL_LZO is not set
+CONFIG_SWAP=y
+CONFIG_SYSVIPC=y
+CONFIG_SYSVIPC_SYSCTL=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_POSIX_MQUEUE_SYSCTL=y
+CONFIG_BSD_PROCESS_ACCT=y
+# CONFIG_BSD_PROCESS_ACCT_V3 is not set
+# CONFIG_TASKSTATS is not set
+# CONFIG_AUDIT is not set
+# CONFIG_HAVE_GENERIC_HARDIRQS is not set
+# CONFIG_SPARSE_IRQ is not set
+
+#
+# RCU Subsystem
+#
+CONFIG_TINY_RCU=y
+# CONFIG_PREEMPT_RCU is not set
+# CONFIG_TREE_RCU_TRACE is not set
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_BUF_SHIFT=16
+# CONFIG_CGROUPS is not set
+# CONFIG_NAMESPACES is not set
+# CONFIG_SYSFS_DEPRECATED is not set
+# CONFIG_RELAY is not set
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_INITRAMFS_SOURCE=""
+CONFIG_RD_GZIP=y
+# CONFIG_RD_BZIP2 is not set
+# CONFIG_RD_LZMA is not set
+# CONFIG_RD_LZO is not set
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+CONFIG_SYSCTL=y
+CONFIG_ANON_INODES=y
+CONFIG_PANIC_TIMEOUT=0
+CONFIG_EMBEDDED=y
+CONFIG_UID16=y
+# CONFIG_SYSCTL_SYSCALL is not set
+CONFIG_KALLSYMS=y
+CONFIG_KALLSYMS_EXTRA_PASS=y
+CONFIG_HOTPLUG=y
+CONFIG_PRINTK=y
+CONFIG_BUG=y
+CONFIG_ELF_CORE=y
+CONFIG_BASE_FULL=y
+CONFIG_FUTEX=y
+CONFIG_EPOLL=y
+CONFIG_SIGNALFD=y
+CONFIG_TIMERFD=y
+CONFIG_EVENTFD=y
+CONFIG_SHMEM=y
+CONFIG_ASHMEM=y
+CONFIG_AIO=y
+CONFIG_HAVE_PERF_EVENTS=y
+CONFIG_PERF_USE_VMALLOC=y
+
+#
+# Kernel Performance Events And Counters
+#
+CONFIG_PERF_EVENTS=y
+# CONFIG_PERF_COUNTERS is not set
+CONFIG_VM_EVENT_COUNTERS=y
+CONFIG_COMPAT_BRK=y
+CONFIG_SLAB=y
+# CONFIG_SLUB is not set
+# CONFIG_SLOB is not set
+CONFIG_PROFILING=y
+CONFIG_TRACEPOINTS=y
+CONFIG_OPROFILE=y
+CONFIG_HAVE_OPROFILE=y
+CONFIG_KPROBES=y
+CONFIG_KRETPROBES=y
+CONFIG_HAVE_KPROBES=y
+CONFIG_HAVE_KRETPROBES=y
+CONFIG_HAVE_REGS_AND_STACK_ACCESS_API=y
+CONFIG_HAVE_CLK=y
+CONFIG_HAVE_HW_BREAKPOINT=y
+
+#
+# GCOV-based kernel profiling
+#
+# CONFIG_GCOV_KERNEL is not set
+CONFIG_HAVE_GENERIC_DMA_COHERENT=y
+CONFIG_SLABINFO=y
+CONFIG_RT_MUTEXES=y
+CONFIG_BASE_SMALL=0
+CONFIG_MODULES=y
+# CONFIG_MODULE_FORCE_LOAD is not set
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_MODULE_FORCE_UNLOAD is not set
+CONFIG_MODVERSIONS=y
+CONFIG_MODULE_SRCVERSION_ALL=y
+CONFIG_BLOCK=y
+CONFIG_LBDAF=y
+# CONFIG_BLK_DEV_BSG is not set
+# CONFIG_BLK_DEV_INTEGRITY is not set
+
+#
+# IO Schedulers
+#
+CONFIG_IOSCHED_NOOP=y
+CONFIG_IOSCHED_DEADLINE=y
+CONFIG_IOSCHED_CFQ=y
+# CONFIG_DEFAULT_DEADLINE is not set
+CONFIG_DEFAULT_CFQ=y
+# CONFIG_DEFAULT_NOOP is not set
+CONFIG_DEFAULT_IOSCHED="cfq"
+# CONFIG_INLINE_SPIN_TRYLOCK is not set
+# CONFIG_INLINE_SPIN_TRYLOCK_BH is not set
+# CONFIG_INLINE_SPIN_LOCK is not set
+# CONFIG_INLINE_SPIN_LOCK_BH is not set
+# CONFIG_INLINE_SPIN_LOCK_IRQ is not set
+# CONFIG_INLINE_SPIN_LOCK_IRQSAVE is not set
+CONFIG_INLINE_SPIN_UNLOCK=y
+# CONFIG_INLINE_SPIN_UNLOCK_BH is not set
+CONFIG_INLINE_SPIN_UNLOCK_IRQ=y
+# CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE is not set
+# CONFIG_INLINE_READ_TRYLOCK is not set
+# CONFIG_INLINE_READ_LOCK is not set
+# CONFIG_INLINE_READ_LOCK_BH is not set
+# CONFIG_INLINE_READ_LOCK_IRQ is not set
+# CONFIG_INLINE_READ_LOCK_IRQSAVE is not set
+CONFIG_INLINE_READ_UNLOCK=y
+# CONFIG_INLINE_READ_UNLOCK_BH is not set
+CONFIG_INLINE_READ_UNLOCK_IRQ=y
+# CONFIG_INLINE_READ_UNLOCK_IRQRESTORE is not set
+# CONFIG_INLINE_WRITE_TRYLOCK is not set
+# CONFIG_INLINE_WRITE_LOCK is not set
+# CONFIG_INLINE_WRITE_LOCK_BH is not set
+# CONFIG_INLINE_WRITE_LOCK_IRQ is not set
+# CONFIG_INLINE_WRITE_LOCK_IRQSAVE is not set
+CONFIG_INLINE_WRITE_UNLOCK=y
+# CONFIG_INLINE_WRITE_UNLOCK_BH is not set
+CONFIG_INLINE_WRITE_UNLOCK_IRQ=y
+# CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE is not set
+# CONFIG_MUTEX_SPIN_ON_OWNER is not set
+CONFIG_FREEZER=y
+
+#
+# System Type
+#
+CONFIG_MMU=y
+# CONFIG_ARCH_AAEC2000 is not set
+# CONFIG_ARCH_INTEGRATOR is not set
+# CONFIG_ARCH_REALVIEW is not set
+# CONFIG_ARCH_VERSATILE is not set
+# CONFIG_ARCH_VEXPRESS is not set
+# CONFIG_ARCH_AT91 is not set
+# CONFIG_ARCH_BCMRING is not set
+# CONFIG_ARCH_CLPS711X is not set
+# CONFIG_ARCH_CNS3XXX is not set
+# CONFIG_ARCH_GEMINI is not set
+# CONFIG_ARCH_EBSA110 is not set
+# CONFIG_ARCH_EP93XX is not set
+# CONFIG_ARCH_FOOTBRIDGE is not set
+# CONFIG_ARCH_MXC is not set
+# CONFIG_ARCH_STMP3XXX is not set
+# CONFIG_ARCH_NETX is not set
+# CONFIG_ARCH_H720X is not set
+# CONFIG_ARCH_IOP13XX is not set
+# CONFIG_ARCH_IOP32X is not set
+# CONFIG_ARCH_IOP33X is not set
+# CONFIG_ARCH_IXP23XX is not set
+# CONFIG_ARCH_IXP2000 is not set
+# CONFIG_ARCH_IXP4XX is not set
+# CONFIG_ARCH_DOVE is not set
+# CONFIG_ARCH_KIRKWOOD is not set
+# CONFIG_ARCH_LOKI is not set
+# CONFIG_ARCH_LPC32XX is not set
+# CONFIG_ARCH_MV78XX0 is not set
+# CONFIG_ARCH_ORION5X is not set
+# CONFIG_ARCH_MMP is not set
+# CONFIG_ARCH_KS8695 is not set
+# CONFIG_ARCH_NS9XXX is not set
+# CONFIG_ARCH_W90X900 is not set
+# CONFIG_ARCH_NUC93X is not set
+# CONFIG_ARCH_TEGRA is not set
+# CONFIG_ARCH_PNX4008 is not set
+# CONFIG_ARCH_PXA is not set
+# CONFIG_ARCH_MSM is not set
+# CONFIG_ARCH_SHMOBILE is not set
+# CONFIG_ARCH_RPC is not set
+# CONFIG_ARCH_SA1100 is not set
+# CONFIG_ARCH_S3C2410 is not set
+# CONFIG_ARCH_S3C64XX is not set
+# CONFIG_ARCH_S5P64X0 is not set
+# CONFIG_ARCH_S5P6442 is not set
+# CONFIG_ARCH_S5PC100 is not set
+# CONFIG_ARCH_S5PV210 is not set
+# CONFIG_ARCH_S5PV310 is not set
+# CONFIG_ARCH_SHARK is not set
+# CONFIG_ARCH_TCC_926 is not set
+# CONFIG_ARCH_LH7A40X is not set
+# CONFIG_ARCH_U300 is not set
+# CONFIG_ARCH_U8500 is not set
+# CONFIG_ARCH_NOMADIK is not set
+# CONFIG_ARCH_DAVINCI is not set
+CONFIG_ARCH_OMAP=y
+# CONFIG_PLAT_SPEAR is not set
+
+#
+# TI OMAP Common Features
+#
+CONFIG_ARCH_OMAP_OTG=y
+# CONFIG_ARCH_OMAP1 is not set
+CONFIG_ARCH_OMAP2PLUS=y
+
+#
+# OMAP Feature Selections
+#
+CONFIG_OMAP_SMARTREFLEX=y
+CONFIG_OMAP_SMARTREFLEX_CLASS3=y
+CONFIG_OMAP_RESET_CLOCKS=y
+CONFIG_OMAP_MUX=y
+CONFIG_OMAP_MUX_DEBUG=y
+CONFIG_OMAP_MUX_WARNINGS=y
+CONFIG_OMAP_MCBSP=y
+# CONFIG_OMAP_MBOX_FWK is not set
+CONFIG_OMAP_IOMMU=y
+# CONFIG_OMAP_IOMMU_DEBUG is not set
+# CONFIG_OMAP_MPU_TIMER is not set
+CONFIG_OMAP_32K_TIMER=y
+# CONFIG_OMAP3_L2_AUX_SECURE_SAVE_RESTORE is not set
+CONFIG_OMAP_32K_TIMER_HZ=128
+CONFIG_OMAP_DM_TIMER=y
+# CONFIG_OMAP_PM_NONE is not set
+CONFIG_OMAP_PM_NOOP=y
+
+#
+# TI OMAP2/3/4 Specific Features
+#
+CONFIG_ARCH_OMAP2PLUS_TYPICAL=y
+# CONFIG_ARCH_OMAP2 is not set
+CONFIG_ARCH_OMAP3=y
+# CONFIG_ARCH_OMAP4 is not set
+# CONFIG_ARCH_TI81XX is not set
+CONFIG_ARCH_OMAP3430=y
+CONFIG_OMAP_PACKAGE_CBB=y
+
+#
+# OMAP Board Type
+#
+CONFIG_MACH_OMAP3_BEAGLE=y
+# CONFIG_MACH_DEVKIT8000 is not set
+# CONFIG_MACH_OMAP_LDP is not set
+# CONFIG_MACH_OMAP3530_LV_SOM is not set
+# CONFIG_MACH_OMAP3_TORPEDO is not set
+# CONFIG_MACH_OVERO is not set
+# CONFIG_MACH_OMAP3EVM is not set
+# CONFIG_MACH_FLASHBOARD is not set
+# CONFIG_MACH_OMAP3517EVM is not set
+# CONFIG_MACH_CRANEBOARD is not set
+# CONFIG_MACH_OMAP3_PANDORA is not set
+# CONFIG_MACH_OMAP3_TOUCHBOOK is not set
+# CONFIG_MACH_OMAP_3430SDP is not set
+# CONFIG_MACH_NOKIA_RM680 is not set
+# CONFIG_MACH_NOKIA_RX51 is not set
+# CONFIG_MACH_OMAP_ZOOM2 is not set
+# CONFIG_MACH_OMAP_ZOOM3 is not set
+# CONFIG_MACH_CM_T35 is not set
+# CONFIG_MACH_CM_T3517 is not set
+# CONFIG_MACH_IGEP0020 is not set
+# CONFIG_MACH_IGEP0030 is not set
+# CONFIG_MACH_SBC3530 is not set
+# CONFIG_MACH_OMAP_3630SDP is not set
+# CONFIG_OMAP3_EMU is not set
+CONFIG_OMAP3_PM_DISABLE_VT_SWITCH=y
+# CONFIG_OMAP3_SDRC_AC_TIMING is not set
+
+#
+# Processor Type
+#
+CONFIG_CPU_32v6K=y
+CONFIG_CPU_V7=y
+CONFIG_CPU_32v7=y
+CONFIG_CPU_ABRT_EV7=y
+CONFIG_CPU_PABRT_V7=y
+CONFIG_CPU_CACHE_V7=y
+CONFIG_CPU_CACHE_VIPT=y
+CONFIG_CPU_COPY_V6=y
+CONFIG_CPU_TLB_V7=y
+CONFIG_CPU_HAS_ASID=y
+CONFIG_CPU_CP15=y
+CONFIG_CPU_CP15_MMU=y
+
+#
+# Processor Features
+#
+CONFIG_ARM_THUMB=y
+CONFIG_ARM_THUMBEE=y
+# CONFIG_CPU_ICACHE_DISABLE is not set
+# CONFIG_CPU_DCACHE_DISABLE is not set
+# CONFIG_CPU_BPREDICT_DISABLE is not set
+CONFIG_ARM_L1_CACHE_SHIFT=6
+CONFIG_ARM_DMA_MEM_BUFFERABLE=y
+# CONFIG_ARM_ERRATA_430973 is not set
+# CONFIG_ARM_ERRATA_458693 is not set
+# CONFIG_ARM_ERRATA_460075 is not set
+# CONFIG_ARM_ERRATA_743622 is not set
+CONFIG_COMMON_CLKDEV=y
+# CONFIG_FIQ_DEBUGGER is not set
+
+#
+# Bus support
+#
+# CONFIG_PCI_SYSCALL is not set
+# CONFIG_ARCH_SUPPORTS_MSI is not set
+# CONFIG_PCCARD is not set
+
+#
+# Kernel Features
+#
+CONFIG_TICK_ONESHOT=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
+CONFIG_VMSPLIT_3G=y
+# CONFIG_VMSPLIT_2G is not set
+# CONFIG_VMSPLIT_1G is not set
+CONFIG_PAGE_OFFSET=0xC0000000
+CONFIG_PREEMPT_NONE=y
+# CONFIG_PREEMPT_VOLUNTARY is not set
+# CONFIG_PREEMPT is not set
+CONFIG_HZ=128
+# CONFIG_THUMB2_KERNEL is not set
+CONFIG_AEABI=y
+CONFIG_OABI_COMPAT=y
+CONFIG_ARCH_HAS_HOLES_MEMORYMODEL=y
+# CONFIG_ARCH_SPARSEMEM_DEFAULT is not set
+# CONFIG_ARCH_SELECT_MEMORY_MODEL is not set
+# CONFIG_HIGHMEM is not set
+CONFIG_SELECT_MEMORY_MODEL=y
+CONFIG_FLATMEM_MANUAL=y
+CONFIG_FLATMEM=y
+CONFIG_FLAT_NODE_MEM_MAP=y
+CONFIG_HAVE_MEMBLOCK=y
+CONFIG_PAGEFLAGS_EXTENDED=y
+CONFIG_SPLIT_PTLOCK_CPUS=4
+# CONFIG_PHYS_ADDR_T_64BIT is not set
+CONFIG_ZONE_DMA_FLAG=0
+CONFIG_VIRT_TO_BUS=y
+# CONFIG_KSM is not set
+CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
+CONFIG_NEED_PER_CPU_KM=y
+CONFIG_FORCE_MAX_ZONEORDER=11
+# CONFIG_LEDS is not set
+CONFIG_ALIGNMENT_TRAP=y
+# CONFIG_UACCESS_WITH_MEMCPY is not set
+# CONFIG_SECCOMP is not set
+# CONFIG_CC_STACKPROTECTOR is not set
+# CONFIG_DEPRECATED_PARAM_STRUCT is not set
+
+#
+# Boot options
+#
+CONFIG_ZBOOT_ROM_TEXT=0x0
+CONFIG_ZBOOT_ROM_BSS=0x0
+CONFIG_CMDLINE="root=/dev/mmcblk0p2 rootwait console=ttyO2,115200"
+# CONFIG_CMDLINE_FORCE is not set
+# CONFIG_XIP_KERNEL is not set
+CONFIG_KEXEC=y
+CONFIG_ATAGS_PROC=y
+# CONFIG_AUTO_ZRELADDR is not set
+
+#
+# CPU Power Management
+#
+CONFIG_CPU_FREQ=y
+CONFIG_CPU_FREQ_TABLE=y
+# CONFIG_CPU_FREQ_DEBUG is not set
+CONFIG_CPU_FREQ_STAT=y
+CONFIG_CPU_FREQ_STAT_DETAILS=y
+CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE=y
+# CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE is not set
+# CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE is not set
+# CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND is not set
+# CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE is not set
+# CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE is not set
+CONFIG_CPU_FREQ_GOV_PERFORMANCE=y
+# CONFIG_CPU_FREQ_GOV_POWERSAVE is not set
+CONFIG_CPU_FREQ_GOV_USERSPACE=y
+CONFIG_CPU_FREQ_GOV_ONDEMAND=y
+# CONFIG_CPU_FREQ_GOV_INTERACTIVE is not set
+# CONFIG_CPU_FREQ_GOV_CONSERVATIVE is not set
+CONFIG_CPU_IDLE=y
+CONFIG_CPU_IDLE_GOV_LADDER=y
+CONFIG_CPU_IDLE_GOV_MENU=y
+
+#
+# Floating point emulation
+#
+
+#
+# At least one emulation must be selected
+#
+CONFIG_FPE_NWFPE=y
+# CONFIG_FPE_NWFPE_XP is not set
+# CONFIG_FPE_FASTFPE is not set
+CONFIG_VFP=y
+CONFIG_VFPv3=y
+CONFIG_NEON=y
+
+#
+# Userspace binary formats
+#
+CONFIG_BINFMT_ELF=y
+CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y
+CONFIG_HAVE_AOUT=y
+# CONFIG_BINFMT_AOUT is not set
+CONFIG_BINFMT_MISC=y
+
+#
+# Power management options
+#
+CONFIG_PM=y
+CONFIG_PM_DEBUG=y
+# CONFIG_PM_ADVANCED_DEBUG is not set
+# CONFIG_PM_VERBOSE is not set
+CONFIG_CAN_PM_TRACE=y
+CONFIG_PM_SLEEP=y
+CONFIG_SUSPEND_NVS=y
+CONFIG_SUSPEND=y
+# CONFIG_PM_TEST_SUSPEND is not set
+CONFIG_SUSPEND_FREEZER=y
+CONFIG_HAS_WAKELOCK=y
+CONFIG_HAS_EARLYSUSPEND=y
+CONFIG_WAKELOCK=y
+CONFIG_WAKELOCK_STAT=y
+CONFIG_USER_WAKELOCK=y
+CONFIG_EARLYSUSPEND=y
+# CONFIG_NO_USER_SPACE_SCREEN_ACCESS_CONTROL is not set
+# CONFIG_CONSOLE_EARLYSUSPEND is not set
+CONFIG_FB_EARLYSUSPEND=y
+# CONFIG_APM_EMULATION is not set
+CONFIG_PM_RUNTIME=y
+CONFIG_PM_OPS=y
+CONFIG_ARCH_HAS_OPP=y
+CONFIG_PM_OPP=y
+CONFIG_ARCH_SUSPEND_POSSIBLE=y
+CONFIG_NET=y
+
+#
+# Networking options
+#
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_XFRM=y
+CONFIG_XFRM_USER=y
+# CONFIG_XFRM_SUB_POLICY is not set
+CONFIG_XFRM_MIGRATE=y
+# CONFIG_XFRM_STATISTICS is not set
+CONFIG_NET_KEY=y
+CONFIG_NET_KEY_MIGRATE=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+# CONFIG_IP_ADVANCED_ROUTER is not set
+CONFIG_IP_FIB_HASH=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_IP_PNP_RARP=y
+# CONFIG_NET_IPIP is not set
+# CONFIG_NET_IPGRE_DEMUX is not set
+# CONFIG_IP_MROUTE is not set
+# CONFIG_ARPD is not set
+# CONFIG_SYN_COOKIES is not set
+# CONFIG_INET_AH is not set
+# CONFIG_INET_ESP is not set
+# CONFIG_INET_IPCOMP is not set
+# CONFIG_INET_XFRM_TUNNEL is not set
+CONFIG_INET_TUNNEL=y
+CONFIG_INET_XFRM_MODE_TRANSPORT=y
+CONFIG_INET_XFRM_MODE_TUNNEL=y
+CONFIG_INET_XFRM_MODE_BEET=y
+# CONFIG_INET_LRO is not set
+CONFIG_INET_DIAG=y
+CONFIG_INET_TCP_DIAG=y
+# CONFIG_TCP_CONG_ADVANCED is not set
+CONFIG_TCP_CONG_CUBIC=y
+CONFIG_DEFAULT_TCP_CONG="cubic"
+# CONFIG_TCP_MD5SIG is not set
+CONFIG_IPV6=y
+# CONFIG_IPV6_PRIVACY is not set
+# CONFIG_IPV6_ROUTER_PREF is not set
+# CONFIG_IPV6_OPTIMISTIC_DAD is not set
+# CONFIG_INET6_AH is not set
+# CONFIG_INET6_ESP is not set
+# CONFIG_INET6_IPCOMP is not set
+# CONFIG_IPV6_MIP6 is not set
+# CONFIG_INET6_XFRM_TUNNEL is not set
+# CONFIG_INET6_TUNNEL is not set
+CONFIG_INET6_XFRM_MODE_TRANSPORT=y
+CONFIG_INET6_XFRM_MODE_TUNNEL=y
+CONFIG_INET6_XFRM_MODE_BEET=y
+# CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION is not set
+CONFIG_IPV6_SIT=y
+# CONFIG_IPV6_SIT_6RD is not set
+CONFIG_IPV6_NDISC_NODETYPE=y
+# CONFIG_IPV6_TUNNEL is not set
+# CONFIG_IPV6_MULTIPLE_TABLES is not set
+# CONFIG_IPV6_MROUTE is not set
+# CONFIG_NETLABEL is not set
+CONFIG_ANDROID_PARANOID_NETWORK=y
+CONFIG_NET_ACTIVITY_STATS=y
+# CONFIG_NETWORK_SECMARK is not set
+# CONFIG_NETWORK_PHY_TIMESTAMPING is not set
+# CONFIG_NETFILTER is not set
+# CONFIG_IP_DCCP is not set
+# CONFIG_IP_SCTP is not set
+# CONFIG_RDS is not set
+# CONFIG_TIPC is not set
+# CONFIG_ATM is not set
+# CONFIG_L2TP is not set
+# CONFIG_BRIDGE is not set
+# CONFIG_NET_DSA is not set
+# CONFIG_VLAN_8021Q is not set
+# CONFIG_DECNET is not set
+# CONFIG_LLC2 is not set
+# CONFIG_IPX is not set
+# CONFIG_ATALK is not set
+# CONFIG_X25 is not set
+# CONFIG_LAPB is not set
+# CONFIG_ECONET is not set
+# CONFIG_WAN_ROUTER is not set
+# CONFIG_PHONET is not set
+# CONFIG_IEEE802154 is not set
+# CONFIG_NET_SCHED is not set
+# CONFIG_DCB is not set
+CONFIG_DNS_RESOLVER=y
+
+#
+# Network testing
+#
+# CONFIG_NET_PKTGEN is not set
+# CONFIG_NET_TCPPROBE is not set
+# CONFIG_NET_DROP_MONITOR is not set
+# CONFIG_HAMRADIO is not set
+# CONFIG_CAN is not set
+# CONFIG_IRDA is not set
+# CONFIG_BT is not set
+# CONFIG_AF_RXRPC is not set
+CONFIG_WIRELESS=y
+CONFIG_WIRELESS_EXT=y
+CONFIG_WEXT_CORE=y
+CONFIG_WEXT_PROC=y
+# CONFIG_CFG80211 is not set
+CONFIG_WIRELESS_EXT_SYSFS=y
+# CONFIG_LIB80211 is not set
+
+#
+# CFG80211 needs to be enabled for MAC80211
+#
+
+#
+# Some wireless drivers require a rate control algorithm
+#
+# CONFIG_WIMAX is not set
+# CONFIG_RFKILL is not set
+# CONFIG_NET_9P is not set
+# CONFIG_CAIF is not set
+# CONFIG_CEPH_LIB is not set
+
+#
+# Device Drivers
+#
+
+#
+# Generic Driver Options
+#
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+# CONFIG_DEVTMPFS is not set
+CONFIG_STANDALONE=y
+CONFIG_PREVENT_FIRMWARE_BUILD=y
+CONFIG_FW_LOADER=y
+CONFIG_FIRMWARE_IN_KERNEL=y
+CONFIG_EXTRA_FIRMWARE=""
+# CONFIG_SYS_HYPERVISOR is not set
+# CONFIG_CONNECTOR is not set
+CONFIG_MTD=y
+# CONFIG_MTD_DEBUG is not set
+# CONFIG_MTD_TESTS is not set
+CONFIG_MTD_CONCAT=y
+CONFIG_MTD_PARTITIONS=y
+# CONFIG_MTD_REDBOOT_PARTS is not set
+CONFIG_MTD_CMDLINE_PARTS=y
+# CONFIG_MTD_AFS_PARTS is not set
+# CONFIG_MTD_AR7_PARTS is not set
+
+#
+# User Modules And Translation Layers
+#
+CONFIG_MTD_CHAR=y
+CONFIG_MTD_BLKDEVS=y
+CONFIG_MTD_BLOCK=y
+# CONFIG_FTL is not set
+# CONFIG_NFTL is not set
+# CONFIG_INFTL is not set
+# CONFIG_RFD_FTL is not set
+# CONFIG_SSFDC is not set
+# CONFIG_SM_FTL is not set
+CONFIG_MTD_OOPS=y
+
+#
+# RAM/ROM/Flash chip drivers
+#
+CONFIG_MTD_CFI=y
+# CONFIG_MTD_JEDECPROBE is not set
+CONFIG_MTD_GEN_PROBE=y
+# CONFIG_MTD_CFI_ADV_OPTIONS is not set
+CONFIG_MTD_MAP_BANK_WIDTH_1=y
+CONFIG_MTD_MAP_BANK_WIDTH_2=y
+CONFIG_MTD_MAP_BANK_WIDTH_4=y
+# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set
+# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set
+# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set
+CONFIG_MTD_CFI_I1=y
+CONFIG_MTD_CFI_I2=y
+# CONFIG_MTD_CFI_I4 is not set
+# CONFIG_MTD_CFI_I8 is not set
+CONFIG_MTD_CFI_INTELEXT=y
+# CONFIG_MTD_CFI_AMDSTD is not set
+# CONFIG_MTD_CFI_STAA is not set
+CONFIG_MTD_CFI_UTIL=y
+# CONFIG_MTD_RAM is not set
+# CONFIG_MTD_ROM is not set
+# CONFIG_MTD_ABSENT is not set
+
+#
+# Mapping drivers for chip access
+#
+# CONFIG_MTD_COMPLEX_MAPPINGS is not set
+# CONFIG_MTD_PHYSMAP is not set
+# CONFIG_MTD_ARM_INTEGRATOR is not set
+# CONFIG_MTD_PLATRAM is not set
+
+#
+# Self-contained MTD device drivers
+#
+# CONFIG_MTD_DATAFLASH is not set
+# CONFIG_MTD_M25P80 is not set
+# CONFIG_MTD_SST25L is not set
+# CONFIG_MTD_SLRAM is not set
+# CONFIG_MTD_PHRAM is not set
+# CONFIG_MTD_MTDRAM is not set
+# CONFIG_MTD_BLOCK2MTD is not set
+
+#
+# Disk-On-Chip Device Drivers
+#
+# CONFIG_MTD_DOC2000 is not set
+# CONFIG_MTD_DOC2001 is not set
+# CONFIG_MTD_DOC2001PLUS is not set
+CONFIG_MTD_NAND_IDS=y
+CONFIG_MTD_NAND_ECC=y
+# CONFIG_MTD_NAND_ECC_SMC is not set
+CONFIG_MTD_NAND=y
+# CONFIG_MTD_NAND_VERIFY_WRITE is not set
+# CONFIG_MTD_SM_COMMON is not set
+# CONFIG_MTD_NAND_MUSEUM_IDS is not set
+# CONFIG_MTD_NAND_GPIO is not set
+CONFIG_MTD_NAND_OMAP2=y
+# CONFIG_MTD_NAND_DISKONCHIP is not set
+# CONFIG_MTD_NAND_NANDSIM is not set
+# CONFIG_MTD_NAND_PLATFORM is not set
+# CONFIG_MTD_ALAUDA is not set
+CONFIG_MTD_ONENAND=y
+CONFIG_MTD_ONENAND_VERIFY_WRITE=y
+# CONFIG_MTD_ONENAND_GENERIC is not set
+CONFIG_MTD_ONENAND_OMAP2=y
+# CONFIG_MTD_ONENAND_OTP is not set
+# CONFIG_MTD_ONENAND_2X_PROGRAM is not set
+# CONFIG_MTD_ONENAND_SIM is not set
+
+#
+# LPDDR flash memory drivers
+#
+# CONFIG_MTD_LPDDR is not set
+# CONFIG_MTD_UBI is not set
+# CONFIG_PARPORT is not set
+CONFIG_BLK_DEV=y
+# CONFIG_BLK_DEV_COW_COMMON is not set
+CONFIG_BLK_DEV_LOOP=y
+# CONFIG_BLK_DEV_CRYPTOLOOP is not set
+
+#
+# DRBD disabled because PROC_FS, INET or CONNECTOR not selected
+#
+# CONFIG_BLK_DEV_NBD is not set
+# CONFIG_BLK_DEV_UB is not set
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_COUNT=16
+CONFIG_BLK_DEV_RAM_SIZE=16384
+# CONFIG_BLK_DEV_XIP is not set
+# CONFIG_CDROM_PKTCDVD is not set
+# CONFIG_ATA_OVER_ETH is not set
+# CONFIG_MG_DISK is not set
+# CONFIG_BLK_DEV_RBD is not set
+# CONFIG_MISC_DEVICES is not set
+CONFIG_HAVE_IDE=y
+# CONFIG_IDE is not set
+
+#
+# SCSI device support
+#
+CONFIG_SCSI_MOD=y
+# CONFIG_RAID_ATTRS is not set
+CONFIG_SCSI=y
+CONFIG_SCSI_DMA=y
+# CONFIG_SCSI_TGT is not set
+# CONFIG_SCSI_NETLINK is not set
+CONFIG_SCSI_PROC_FS=y
+
+#
+# SCSI support type (disk, tape, CD-ROM)
+#
+CONFIG_BLK_DEV_SD=y
+# CONFIG_CHR_DEV_ST is not set
+# CONFIG_CHR_DEV_OSST is not set
+# CONFIG_BLK_DEV_SR is not set
+# CONFIG_CHR_DEV_SG is not set
+# CONFIG_CHR_DEV_SCH is not set
+CONFIG_SCSI_MULTI_LUN=y
+# CONFIG_SCSI_CONSTANTS is not set
+# CONFIG_SCSI_LOGGING is not set
+CONFIG_SCSI_SCAN_ASYNC=y
+CONFIG_SCSI_WAIT_SCAN=m
+
+#
+# SCSI Transports
+#
+# CONFIG_SCSI_SPI_ATTRS is not set
+# CONFIG_SCSI_FC_ATTRS is not set
+# CONFIG_SCSI_ISCSI_ATTRS is not set
+# CONFIG_SCSI_SAS_ATTRS is not set
+# CONFIG_SCSI_SAS_LIBSAS is not set
+# CONFIG_SCSI_SRP_ATTRS is not set
+CONFIG_SCSI_LOWLEVEL=y
+# CONFIG_ISCSI_TCP is not set
+# CONFIG_ISCSI_BOOT_SYSFS is not set
+# CONFIG_LIBFC is not set
+# CONFIG_LIBFCOE is not set
+# CONFIG_SCSI_DEBUG is not set
+# CONFIG_SCSI_DH is not set
+# CONFIG_SCSI_OSD_INITIATOR is not set
+# CONFIG_ATA is not set
+CONFIG_MD=y
+# CONFIG_BLK_DEV_MD is not set
+CONFIG_BLK_DEV_DM=y
+# CONFIG_DM_DEBUG is not set
+CONFIG_DM_CRYPT=y
+# CONFIG_DM_SNAPSHOT is not set
+# CONFIG_DM_MIRROR is not set
+# CONFIG_DM_ZERO is not set
+# CONFIG_DM_MULTIPATH is not set
+# CONFIG_DM_DELAY is not set
+CONFIG_DM_UEVENT=y
+CONFIG_NETDEVICES=y
+# CONFIG_DUMMY is not set
+# CONFIG_BONDING is not set
+# CONFIG_MACVLAN is not set
+# CONFIG_EQUALIZER is not set
+# CONFIG_TUN is not set
+# CONFIG_VETH is not set
+CONFIG_MII=y
+CONFIG_PHYLIB=y
+
+#
+# MII PHY device drivers
+#
+# CONFIG_MARVELL_PHY is not set
+# CONFIG_DAVICOM_PHY is not set
+# CONFIG_QSEMI_PHY is not set
+# CONFIG_LXT_PHY is not set
+# CONFIG_CICADA_PHY is not set
+# CONFIG_VITESSE_PHY is not set
+CONFIG_SMSC_PHY=y
+# CONFIG_BROADCOM_PHY is not set
+# CONFIG_BCM63XX_PHY is not set
+# CONFIG_ICPLUS_PHY is not set
+# CONFIG_REALTEK_PHY is not set
+# CONFIG_NATIONAL_PHY is not set
+# CONFIG_STE10XP is not set
+# CONFIG_LSI_ET1011C_PHY is not set
+# CONFIG_MICREL_PHY is not set
+# CONFIG_FIXED_PHY is not set
+# CONFIG_MDIO_BITBANG is not set
+CONFIG_NET_ETHERNET=y
+# CONFIG_AX88796 is not set
+# CONFIG_SMC91X is not set
+# CONFIG_DM9000 is not set
+# CONFIG_ENC28J60 is not set
+# CONFIG_ETHOC is not set
+CONFIG_SMC911X=y
+CONFIG_SMSC911X=y
+# CONFIG_SMSC911X_ARCH_HOOKS is not set
+# CONFIG_DNET is not set
+# CONFIG_IBM_NEW_EMAC_ZMII is not set
+# CONFIG_IBM_NEW_EMAC_RGMII is not set
+# CONFIG_IBM_NEW_EMAC_TAH is not set
+# CONFIG_IBM_NEW_EMAC_EMAC4 is not set
+# CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL is not set
+# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set
+# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set
+# CONFIG_B44 is not set
+# CONFIG_KS8851 is not set
+# CONFIG_KS8851_MLL is not set
+CONFIG_NETDEV_1000=y
+CONFIG_TI_DAVINCI_EMAC=y
+CONFIG_TI_DAVINCI_MDIO=y
+CONFIG_TI_DAVINCI_CPDMA=y
+# CONFIG_STMMAC_ETH is not set
+CONFIG_NETDEV_10000=y
+CONFIG_WLAN=y
+# CONFIG_USB_ZD1201 is not set
+# CONFIG_BCM4329 is not set
+# CONFIG_HOSTAP is not set
+CONFIG_WL12XX_PLATFORM_DATA=y
+
+#
+# Enable WiMAX (Networking options) to see the WiMAX drivers
+#
+
+#
+# USB Network Adapters
+#
+# CONFIG_USB_CATC is not set
+# CONFIG_USB_KAWETH is not set
+# CONFIG_USB_PEGASUS is not set
+# CONFIG_USB_RTL8150 is not set
+CONFIG_USB_USBNET=y
+CONFIG_USB_NET_AX8817X=y
+CONFIG_USB_NET_CDCETHER=y
+# CONFIG_USB_NET_CDC_EEM is not set
+# CONFIG_USB_NET_DM9601 is not set
+# CONFIG_USB_NET_SMSC75XX is not set
+CONFIG_USB_NET_SMSC95XX=y
+# CONFIG_USB_NET_GL620A is not set
+CONFIG_USB_NET_NET1080=y
+# CONFIG_USB_NET_PLUSB is not set
+# CONFIG_USB_NET_MCS7830 is not set
+# CONFIG_USB_NET_RNDIS_HOST is not set
+CONFIG_USB_NET_CDC_SUBSET=y
+CONFIG_USB_ALI_M5632=y
+CONFIG_USB_AN2720=y
+CONFIG_USB_BELKIN=y
+CONFIG_USB_ARMLINUX=y
+CONFIG_USB_EPSON2888=y
+CONFIG_USB_KC2190=y
+CONFIG_USB_NET_ZAURUS=y
+# CONFIG_USB_NET_CX82310_ETH is not set
+# CONFIG_USB_NET_INT51X1 is not set
+# CONFIG_USB_IPHETH is not set
+# CONFIG_USB_SIERRA_NET is not set
+# CONFIG_WAN is not set
+
+#
+# CAIF transport drivers
+#
+# CONFIG_PPP is not set
+# CONFIG_SLIP is not set
+# CONFIG_NETCONSOLE is not set
+# CONFIG_NETPOLL is not set
+# CONFIG_NET_POLL_CONTROLLER is not set
+# CONFIG_ISDN is not set
+# CONFIG_PHONE is not set
+
+#
+# Input device support
+#
+CONFIG_INPUT=y
+# CONFIG_INPUT_FF_MEMLESS is not set
+# CONFIG_INPUT_POLLDEV is not set
+# CONFIG_INPUT_SPARSEKMAP is not set
+
+#
+# Userland interfaces
+#
+CONFIG_INPUT_MOUSEDEV=y
+CONFIG_INPUT_MOUSEDEV_PSAUX=y
+CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
+CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
+CONFIG_INPUT_JOYDEV=y
+CONFIG_INPUT_EVDEV=y
+# CONFIG_INPUT_EVBUG is not set
+# CONFIG_INPUT_KEYRESET is not set
+
+#
+# Input Device Drivers
+#
+CONFIG_INPUT_KEYBOARD=y
+# CONFIG_KEYBOARD_ADP5588 is not set
+CONFIG_KEYBOARD_ATKBD=y
+# CONFIG_KEYBOARD_QT2160 is not set
+# CONFIG_KEYBOARD_LKKBD is not set
+CONFIG_KEYBOARD_GPIO=y
+# CONFIG_KEYBOARD_GPIO_POLLED is not set
+# CONFIG_KEYBOARD_TCA6416 is not set
+# CONFIG_KEYBOARD_MATRIX is not set
+# CONFIG_KEYBOARD_MAX7359 is not set
+# CONFIG_KEYBOARD_MCS is not set
+# CONFIG_KEYBOARD_NEWTON is not set
+# CONFIG_KEYBOARD_OPENCORES is not set
+# CONFIG_KEYBOARD_STOWAWAY is not set
+# CONFIG_KEYBOARD_SUNKBD is not set
+CONFIG_KEYBOARD_TWL4030=y
+# CONFIG_KEYBOARD_XTKBD is not set
+CONFIG_INPUT_MOUSE=y
+CONFIG_MOUSE_PS2=y
+CONFIG_MOUSE_PS2_ALPS=y
+CONFIG_MOUSE_PS2_LOGIPS2PP=y
+CONFIG_MOUSE_PS2_SYNAPTICS=y
+CONFIG_MOUSE_PS2_TRACKPOINT=y
+# CONFIG_MOUSE_PS2_ELANTECH is not set
+# CONFIG_MOUSE_PS2_SENTELIC is not set
+# CONFIG_MOUSE_PS2_TOUCHKIT is not set
+# CONFIG_MOUSE_SERIAL is not set
+# CONFIG_MOUSE_APPLETOUCH is not set
+# CONFIG_MOUSE_BCM5974 is not set
+# CONFIG_MOUSE_VSXXXAA is not set
+# CONFIG_MOUSE_GPIO is not set
+# CONFIG_MOUSE_SYNAPTICS_I2C is not set
+# CONFIG_INPUT_JOYSTICK is not set
+# CONFIG_INPUT_TABLET is not set
+CONFIG_INPUT_TOUCHSCREEN=y
+# CONFIG_TOUCHSCREEN_ADS7846 is not set
+# CONFIG_TOUCHSCREEN_AD7877 is not set
+# CONFIG_TOUCHSCREEN_AD7879 is not set
+# CONFIG_TOUCHSCREEN_BU21013 is not set
+# CONFIG_TOUCHSCREEN_CY8CTMG110 is not set
+# CONFIG_TOUCHSCREEN_DYNAPRO is not set
+# CONFIG_TOUCHSCREEN_HAMPSHIRE is not set
+# CONFIG_TOUCHSCREEN_EETI is not set
+# CONFIG_TOUCHSCREEN_FUJITSU is not set
+# CONFIG_TOUCHSCREEN_GUNZE is not set
+# CONFIG_TOUCHSCREEN_ELO is not set
+# CONFIG_TOUCHSCREEN_WACOM_W8001 is not set
+# CONFIG_TOUCHSCREEN_MCS5000 is not set
+# CONFIG_TOUCHSCREEN_MTOUCH is not set
+# CONFIG_TOUCHSCREEN_INEXIO is not set
+# CONFIG_TOUCHSCREEN_MK712 is not set
+# CONFIG_TOUCHSCREEN_PENMOUNT is not set
+# CONFIG_TOUCHSCREEN_QT602240 is not set
+CONFIG_TOUCHSCREEN_SYNAPTICS_I2C_RMI4=y
+CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_RMI4_DEV=y
+CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_FW_UPDATE=y
+# CONFIG_TOUCHSCREEN_TOUCHRIGHT is not set
+# CONFIG_TOUCHSCREEN_TOUCHWIN is not set
+# CONFIG_TOUCHSCREEN_USB_COMPOSITE is not set
+# CONFIG_TOUCHSCREEN_TOUCHIT213 is not set
+# CONFIG_TOUCHSCREEN_TSC2007 is not set
+# CONFIG_TOUCHSCREEN_TSC2004 is not set
+# CONFIG_TOUCHSCREEN_W90X900 is not set
+# CONFIG_TOUCHSCREEN_TPS6507X is not set
+CONFIG_INPUT_MISC=y
+# CONFIG_INPUT_AD714X is not set
+# CONFIG_INPUT_ATI_REMOTE is not set
+# CONFIG_INPUT_ATI_REMOTE2 is not set
+# CONFIG_INPUT_KEYCHORD is not set
+# CONFIG_INPUT_KEYSPAN_REMOTE is not set
+# CONFIG_INPUT_POWERMATE is not set
+# CONFIG_INPUT_YEALINK is not set
+# CONFIG_INPUT_CM109 is not set
+CONFIG_INPUT_TWL4030_PWRBUTTON=y
+# CONFIG_INPUT_TWL4030_VIBRA is not set
+# CONFIG_INPUT_UINPUT is not set
+# CONFIG_INPUT_GPIO is not set
+# CONFIG_INPUT_PCF8574 is not set
+# CONFIG_INPUT_GPIO_ROTARY_ENCODER is not set
+# CONFIG_INPUT_ADXL34X is not set
+
+#
+# Hardware I/O ports
+#
+CONFIG_SERIO=y
+CONFIG_SERIO_SERPORT=y
+CONFIG_SERIO_LIBPS2=y
+# CONFIG_SERIO_RAW is not set
+# CONFIG_SERIO_ALTERA_PS2 is not set
+# CONFIG_SERIO_PS2MULT is not set
+# CONFIG_GAMEPORT is not set
+
+#
+# Character devices
+#
+CONFIG_VT=y
+CONFIG_CONSOLE_TRANSLATIONS=y
+CONFIG_VT_CONSOLE=y
+CONFIG_HW_CONSOLE=y
+CONFIG_VT_HW_CONSOLE_BINDING=y
+CONFIG_DEVMEM=y
+CONFIG_DEVKMEM=y
+# CONFIG_SERIAL_NONSTANDARD is not set
+# CONFIG_N_GSM is not set
+
+#
+# Serial drivers
+#
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_NR_UARTS=32
+CONFIG_SERIAL_8250_RUNTIME_UARTS=4
+CONFIG_SERIAL_8250_EXTENDED=y
+CONFIG_SERIAL_8250_MANY_PORTS=y
+CONFIG_SERIAL_8250_SHARE_IRQ=y
+CONFIG_SERIAL_8250_DETECT_IRQ=y
+CONFIG_SERIAL_8250_RSA=y
+
+#
+# Non-8250 serial port support
+#
+# CONFIG_SERIAL_MAX3100 is not set
+# CONFIG_SERIAL_MAX3107 is not set
+CONFIG_SERIAL_CORE=y
+CONFIG_SERIAL_CORE_CONSOLE=y
+CONFIG_SERIAL_OMAP=y
+CONFIG_SERIAL_OMAP_CONSOLE=y
+# CONFIG_SERIAL_TIMBERDALE is not set
+# CONFIG_SERIAL_ALTERA_JTAGUART is not set
+# CONFIG_SERIAL_ALTERA_UART is not set
+CONFIG_UNIX98_PTYS=y
+# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
+# CONFIG_LEGACY_PTYS is not set
+# CONFIG_TTY_PRINTK is not set
+# CONFIG_IPMI_HANDLER is not set
+CONFIG_HW_RANDOM=y
+# CONFIG_HW_RANDOM_TIMERIOMEM is not set
+# CONFIG_R3964 is not set
+# CONFIG_RAW_DRIVER is not set
+# CONFIG_TCG_TPM is not set
+# CONFIG_TI81XX_HDMI is not set
+# CONFIG_DCC_TTY is not set
+# CONFIG_RAMOOPS is not set
+CONFIG_I2C=y
+CONFIG_I2C_BOARDINFO=y
+CONFIG_I2C_COMPAT=y
+CONFIG_I2C_CHARDEV=y
+# CONFIG_I2C_MUX is not set
+CONFIG_I2C_HELPER_AUTO=y
+
+#
+# I2C Hardware Bus support
+#
+
+#
+# I2C system bus drivers (mostly embedded / system-on-chip)
+#
+# CONFIG_I2C_DESIGNWARE is not set
+# CONFIG_I2C_GPIO is not set
+# CONFIG_I2C_OCORES is not set
+CONFIG_I2C_OMAP=y
+# CONFIG_I2C_PCA_PLATFORM is not set
+# CONFIG_I2C_SIMTEC is not set
+# CONFIG_I2C_XILINX is not set
+
+#
+# External I2C/SMBus adapter drivers
+#
+# CONFIG_I2C_PARPORT_LIGHT is not set
+# CONFIG_I2C_TAOS_EVM is not set
+# CONFIG_I2C_TINY_USB is not set
+
+#
+# Other I2C/SMBus bus drivers
+#
+# CONFIG_I2C_STUB is not set
+# CONFIG_I2C_DEBUG_CORE is not set
+# CONFIG_I2C_DEBUG_ALGO is not set
+# CONFIG_I2C_DEBUG_BUS is not set
+CONFIG_SPI=y
+CONFIG_SPI_MASTER=y
+
+#
+# SPI Master Controller Drivers
+#
+# CONFIG_SPI_BITBANG is not set
+# CONFIG_SPI_GPIO is not set
+CONFIG_SPI_OMAP24XX=y
+# CONFIG_SPI_XILINX is not set
+# CONFIG_SPI_DESIGNWARE is not set
+
+#
+# SPI Protocol Masters
+#
+# CONFIG_SPI_SPIDEV is not set
+# CONFIG_SPI_TLE62X0 is not set
+
+#
+# PPS support
+#
+# CONFIG_PPS is not set
+CONFIG_ARCH_REQUIRE_GPIOLIB=y
+CONFIG_GPIOLIB=y
+CONFIG_GPIO_SYSFS=y
+
+#
+# Memory mapped GPIO expanders:
+#
+# CONFIG_GPIO_BASIC_MMIO is not set
+# CONFIG_GPIO_IT8761E is not set
+# CONFIG_GPIO_VX855 is not set
+
+#
+# I2C GPIO expanders:
+#
+# CONFIG_GPIO_MAX7300 is not set
+# CONFIG_GPIO_MAX732X is not set
+# CONFIG_GPIO_PCA953X is not set
+# CONFIG_GPIO_PCF857X is not set
+# CONFIG_GPIO_SX150X is not set
+CONFIG_GPIO_TWL4030=y
+# CONFIG_GPIO_ADP5588 is not set
+
+#
+# PCI GPIO expanders:
+#
+
+#
+# SPI GPIO expanders:
+#
+# CONFIG_GPIO_MAX7301 is not set
+# CONFIG_GPIO_MCP23S08 is not set
+# CONFIG_GPIO_MC33880 is not set
+# CONFIG_GPIO_74X164 is not set
+
+#
+# AC97 GPIO expanders:
+#
+
+#
+# MODULbus GPIO expanders:
+#
+# CONFIG_W1 is not set
+CONFIG_POWER_SUPPLY=y
+# CONFIG_POWER_SUPPLY_DEBUG is not set
+# CONFIG_PDA_POWER is not set
+# CONFIG_TEST_POWER is not set
+# CONFIG_BATTERY_DS2782 is not set
+# CONFIG_BATTERY_BQ20Z75 is not set
+# CONFIG_BATTERY_BQ27x00 is not set
+# CONFIG_BATTERY_MAX17040 is not set
+# CONFIG_CHARGER_ISP1704 is not set
+# CONFIG_CHARGER_TWL4030 is not set
+CONFIG_HWMON=y
+# CONFIG_HWMON_VID is not set
+# CONFIG_HWMON_DEBUG_CHIP is not set
+
+#
+# Native drivers
+#
+# CONFIG_SENSORS_AD7414 is not set
+# CONFIG_SENSORS_AD7418 is not set
+# CONFIG_SENSORS_ADCXX is not set
+# CONFIG_SENSORS_ADM1021 is not set
+# CONFIG_SENSORS_ADM1025 is not set
+# CONFIG_SENSORS_ADM1026 is not set
+# CONFIG_SENSORS_ADM1029 is not set
+# CONFIG_SENSORS_ADM1031 is not set
+# CONFIG_SENSORS_ADM9240 is not set
+# CONFIG_SENSORS_ADT7411 is not set
+# CONFIG_SENSORS_ADT7462 is not set
+# CONFIG_SENSORS_ADT7470 is not set
+# CONFIG_SENSORS_ADT7475 is not set
+# CONFIG_SENSORS_ASC7621 is not set
+# CONFIG_SENSORS_ATXP1 is not set
+# CONFIG_SENSORS_DS1621 is not set
+# CONFIG_SENSORS_F71805F is not set
+# CONFIG_SENSORS_F71882FG is not set
+# CONFIG_SENSORS_F75375S is not set
+# CONFIG_SENSORS_G760A is not set
+# CONFIG_SENSORS_GL518SM is not set
+# CONFIG_SENSORS_GL520SM is not set
+# CONFIG_SENSORS_GPIO_FAN is not set
+# CONFIG_SENSORS_IT87 is not set
+# CONFIG_SENSORS_JC42 is not set
+# CONFIG_SENSORS_LM63 is not set
+# CONFIG_SENSORS_LM70 is not set
+# CONFIG_SENSORS_LM73 is not set
+# CONFIG_SENSORS_LM75 is not set
+# CONFIG_SENSORS_LM77 is not set
+# CONFIG_SENSORS_LM78 is not set
+# CONFIG_SENSORS_LM80 is not set
+# CONFIG_SENSORS_LM83 is not set
+# CONFIG_SENSORS_LM85 is not set
+# CONFIG_SENSORS_LM87 is not set
+# CONFIG_SENSORS_LM90 is not set
+# CONFIG_SENSORS_LM92 is not set
+# CONFIG_SENSORS_LM93 is not set
+# CONFIG_SENSORS_LTC4215 is not set
+# CONFIG_SENSORS_LTC4245 is not set
+# CONFIG_SENSORS_LTC4261 is not set
+# CONFIG_SENSORS_LM95241 is not set
+# CONFIG_SENSORS_MAX1111 is not set
+# CONFIG_SENSORS_MAX1619 is not set
+# CONFIG_SENSORS_MAX6650 is not set
+# CONFIG_SENSORS_PC87360 is not set
+# CONFIG_SENSORS_PC87427 is not set
+# CONFIG_SENSORS_PCF8591 is not set
+# CONFIG_SENSORS_SHT15 is not set
+# CONFIG_SENSORS_SMM665 is not set
+# CONFIG_SENSORS_DME1737 is not set
+# CONFIG_SENSORS_EMC1403 is not set
+# CONFIG_SENSORS_EMC2103 is not set
+# CONFIG_SENSORS_SMSC47M1 is not set
+# CONFIG_SENSORS_SMSC47M192 is not set
+# CONFIG_SENSORS_SMSC47B397 is not set
+# CONFIG_SENSORS_ADS7828 is not set
+# CONFIG_SENSORS_ADS7871 is not set
+# CONFIG_SENSORS_AMC6821 is not set
+# CONFIG_SENSORS_THMC50 is not set
+# CONFIG_SENSORS_TMP102 is not set
+# CONFIG_SENSORS_TMP401 is not set
+# CONFIG_SENSORS_TMP421 is not set
+# CONFIG_SENSORS_VT1211 is not set
+# CONFIG_SENSORS_W83781D is not set
+# CONFIG_SENSORS_W83791D is not set
+# CONFIG_SENSORS_W83792D is not set
+# CONFIG_SENSORS_W83793 is not set
+# CONFIG_SENSORS_W83795 is not set
+# CONFIG_SENSORS_W83L785TS is not set
+# CONFIG_SENSORS_W83L786NG is not set
+# CONFIG_SENSORS_W83627HF is not set
+# CONFIG_SENSORS_W83627EHF is not set
+# CONFIG_SENSORS_LIS3_SPI is not set
+# CONFIG_SENSORS_LIS3_I2C is not set
+# CONFIG_THERMAL is not set
+CONFIG_WATCHDOG=y
+# CONFIG_WATCHDOG_NOWAYOUT is not set
+
+#
+# Watchdog Device Drivers
+#
+# CONFIG_SOFT_WATCHDOG is not set
+CONFIG_OMAP_WATCHDOG=y
+CONFIG_TWL4030_WATCHDOG=y
+# CONFIG_MAX63XX_WATCHDOG is not set
+
+#
+# USB-based Watchdog Cards
+#
+# CONFIG_USBPCWATCHDOG is not set
+CONFIG_SSB_POSSIBLE=y
+
+#
+# Sonics Silicon Backplane
+#
+# CONFIG_SSB is not set
+CONFIG_MFD_SUPPORT=y
+CONFIG_MFD_CORE=y
+# CONFIG_MFD_88PM860X is not set
+# CONFIG_MFD_SM501 is not set
+# CONFIG_MFD_ASIC3 is not set
+# CONFIG_HTC_EGPIO is not set
+# CONFIG_HTC_PASIC3 is not set
+# CONFIG_HTC_I2CPLD is not set
+# CONFIG_TPS65010 is not set
+# CONFIG_TPS6507X is not set
+CONFIG_TWL4030_CORE=y
+CONFIG_TWL4030_POWER=y
+CONFIG_TWL4030_SCRIPT=y
+CONFIG_TWL4030_CODEC=y
+# CONFIG_TWL6030_PWM is not set
+# CONFIG_MFD_STMPE is not set
+# CONFIG_MFD_TC35892 is not set
+# CONFIG_MFD_TMIO is not set
+# CONFIG_MFD_T7L66XB is not set
+# CONFIG_MFD_TC6387XB is not set
+# CONFIG_MFD_TC6393XB is not set
+# CONFIG_PMIC_DA903X is not set
+# CONFIG_PMIC_ADP5520 is not set
+# CONFIG_MFD_MAX8925 is not set
+# CONFIG_MFD_MAX8998 is not set
+# CONFIG_MFD_WM8400 is not set
+# CONFIG_MFD_WM831X_I2C is not set
+# CONFIG_MFD_WM831X_SPI is not set
+# CONFIG_MFD_WM8350_I2C is not set
+# CONFIG_MFD_WM8994 is not set
+# CONFIG_MFD_PCF50633 is not set
+# CONFIG_MFD_MC13XXX is not set
+# CONFIG_ABX500_CORE is not set
+# CONFIG_EZX_PCAP is not set
+# CONFIG_MFD_TPS6586X is not set
+CONFIG_REGULATOR=y
+# CONFIG_REGULATOR_DEBUG is not set
+CONFIG_REGULATOR_DUMMY=y
+# CONFIG_REGULATOR_FIXED_VOLTAGE is not set
+# CONFIG_REGULATOR_VIRTUAL_CONSUMER is not set
+# CONFIG_REGULATOR_USERSPACE_CONSUMER is not set
+# CONFIG_REGULATOR_BQ24022 is not set
+# CONFIG_REGULATOR_MAX1586 is not set
+# CONFIG_REGULATOR_MAX8649 is not set
+# CONFIG_REGULATOR_MAX8660 is not set
+# CONFIG_REGULATOR_MAX8952 is not set
+CONFIG_REGULATOR_TWL4030=y
+# CONFIG_REGULATOR_LP3971 is not set
+# CONFIG_REGULATOR_LP3972 is not set
+# CONFIG_REGULATOR_TPS65023 is not set
+# CONFIG_REGULATOR_TPS6507X is not set
+# CONFIG_REGULATOR_ISL6271A is not set
+# CONFIG_REGULATOR_AD5398 is not set
+CONFIG_MEDIA_SUPPORT=y
+
+#
+# Multimedia core support
+#
+CONFIG_MEDIA_CONTROLLER=y
+CONFIG_VIDEO_DEV=y
+CONFIG_VIDEO_V4L2_COMMON=y
+CONFIG_VIDEO_ALLOW_V4L1=y
+CONFIG_VIDEO_V4L1_COMPAT=y
+CONFIG_VIDEO_V4L2_SUBDEV_API=y
+# CONFIG_DVB_CORE is not set
+CONFIG_VIDEO_MEDIA=y
+
+#
+# Multimedia drivers
+#
+# CONFIG_IR_CORE is not set
+# CONFIG_MEDIA_ATTACH is not set
+CONFIG_MEDIA_TUNER=y
+# CONFIG_MEDIA_TUNER_CUSTOMISE is not set
+CONFIG_MEDIA_TUNER_SIMPLE=y
+CONFIG_MEDIA_TUNER_TDA8290=y
+CONFIG_MEDIA_TUNER_TDA827X=y
+CONFIG_MEDIA_TUNER_TDA18271=y
+CONFIG_MEDIA_TUNER_TDA9887=y
+CONFIG_MEDIA_TUNER_TEA5761=y
+CONFIG_MEDIA_TUNER_TEA5767=y
+CONFIG_MEDIA_TUNER_MT20XX=y
+CONFIG_MEDIA_TUNER_XC2028=y
+CONFIG_MEDIA_TUNER_XC5000=y
+CONFIG_MEDIA_TUNER_MC44S803=y
+CONFIG_VIDEO_V4L2=y
+CONFIG_VIDEO_V4L1=y
+CONFIG_VIDEOBUF_GEN=y
+CONFIG_VIDEOBUF_DMA_CONTIG=y
+CONFIG_VIDEO_CAPTURE_DRIVERS=y
+# CONFIG_VIDEO_ADV_DEBUG is not set
+# CONFIG_VIDEO_FIXED_MINOR_RANGES is not set
+# CONFIG_VIDEO_HELPER_CHIPS_AUTO is not set
+
+#
+# Encoders/decoders and other helper chips
+#
+
+#
+# Audio decoders
+#
+# CONFIG_VIDEO_TVAUDIO is not set
+# CONFIG_VIDEO_TDA7432 is not set
+# CONFIG_VIDEO_TDA9840 is not set
+# CONFIG_VIDEO_TDA9875 is not set
+# CONFIG_VIDEO_TEA6415C is not set
+# CONFIG_VIDEO_TEA6420 is not set
+# CONFIG_VIDEO_MSP3400 is not set
+# CONFIG_VIDEO_CS5345 is not set
+# CONFIG_VIDEO_CS53L32A is not set
+# CONFIG_VIDEO_M52790 is not set
+# CONFIG_VIDEO_TLV320AIC23B is not set
+# CONFIG_VIDEO_WM8775 is not set
+# CONFIG_VIDEO_WM8739 is not set
+# CONFIG_VIDEO_VP27SMPX is not set
+
+#
+# RDS decoders
+#
+# CONFIG_VIDEO_SAA6588 is not set
+
+#
+# Video decoders
+#
+# CONFIG_VIDEO_ADV7180 is not set
+# CONFIG_VIDEO_BT819 is not set
+# CONFIG_VIDEO_BT856 is not set
+# CONFIG_VIDEO_BT866 is not set
+# CONFIG_VIDEO_KS0127 is not set
+# CONFIG_VIDEO_OV7670 is not set
+# CONFIG_VIDEO_MT9T001 is not set
+CONFIG_VIDEO_MT9V011=y
+# CONFIG_VIDEO_MT9V032 is not set
+CONFIG_VIDEO_MT9V113=y
+# CONFIG_VIDEO_MT9T111 is not set
+# CONFIG_VIDEO_TCM825X is not set
+# CONFIG_VIDEO_SAA7110 is not set
+# CONFIG_VIDEO_SAA711X is not set
+# CONFIG_VIDEO_SAA717X is not set
+# CONFIG_VIDEO_SAA7191 is not set
+# CONFIG_VIDEO_TVP514X is not set
+# CONFIG_VIDEO_TVP5150 is not set
+# CONFIG_VIDEO_TVP7002 is not set
+# CONFIG_VIDEO_VPX3220 is not set
+
+#
+# Video and audio decoders
+#
+# CONFIG_VIDEO_CX25840 is not set
+
+#
+# MPEG video encoders
+#
+# CONFIG_VIDEO_CX2341X is not set
+
+#
+# Video encoders
+#
+# CONFIG_VIDEO_SAA7127 is not set
+# CONFIG_VIDEO_SAA7185 is not set
+# CONFIG_VIDEO_ADV7170 is not set
+# CONFIG_VIDEO_ADV7175 is not set
+# CONFIG_VIDEO_THS7303 is not set
+# CONFIG_VIDEO_ADV7343 is not set
+# CONFIG_VIDEO_AK881X is not set
+
+#
+# Video improvement chips
+#
+# CONFIG_VIDEO_UPD64031A is not set
+# CONFIG_VIDEO_UPD64083 is not set
+# CONFIG_VIDEO_VPSS_SYSTEM is not set
+# CONFIG_VIDEO_VPFE_CAPTURE is not set
+CONFIG_VIDEO_OMAP2_VOUT=y
+# CONFIG_VIDEO_CPIA2 is not set
+# CONFIG_VIDEO_SR030PC30 is not set
+CONFIG_VIDEO_OMAP3=y
+CONFIG_VIDEO_OMAP3_DEBUG=y
+# CONFIG_SOC_CAMERA is not set
+CONFIG_V4L_USB_DRIVERS=y
+CONFIG_USB_VIDEO_CLASS=y
+CONFIG_USB_VIDEO_CLASS_INPUT_EVDEV=y
+# CONFIG_USB_GSPCA is not set
+# CONFIG_VIDEO_PVRUSB2 is not set
+# CONFIG_VIDEO_HDPVR is not set
+# CONFIG_VIDEO_USBVISION is not set
+# CONFIG_USB_VICAM is not set
+# CONFIG_USB_IBMCAM is not set
+# CONFIG_USB_KONICAWC is not set
+# CONFIG_USB_ET61X251 is not set
+# CONFIG_USB_SE401 is not set
+# CONFIG_USB_SN9C102 is not set
+# CONFIG_USB_PWC is not set
+# CONFIG_USB_ZR364XX is not set
+# CONFIG_USB_STKWEBCAM is not set
+# CONFIG_USB_S2255 is not set
+# CONFIG_V4L_MEM2MEM_DRIVERS is not set
+# CONFIG_RADIO_ADAPTERS is not set
+# CONFIG_DAB is not set
+
+#
+# Graphics support
+#
+# CONFIG_DRM is not set
+# CONFIG_VGASTATE is not set
+# CONFIG_VIDEO_OUTPUT_CONTROL is not set
+CONFIG_FB=y
+CONFIG_FIRMWARE_EDID=y
+# CONFIG_FB_DDC is not set
+# CONFIG_FB_BOOT_VESA_SUPPORT is not set
+CONFIG_FB_CFB_FILLRECT=y
+CONFIG_FB_CFB_COPYAREA=y
+CONFIG_FB_CFB_IMAGEBLIT=y
+# CONFIG_FB_CFB_REV_PIXELS_IN_BYTE is not set
+# CONFIG_FB_SYS_FILLRECT is not set
+# CONFIG_FB_SYS_COPYAREA is not set
+# CONFIG_FB_SYS_IMAGEBLIT is not set
+# CONFIG_FB_FOREIGN_ENDIAN is not set
+# CONFIG_FB_SYS_FOPS is not set
+# CONFIG_FB_SVGALIB is not set
+# CONFIG_FB_MACMODES is not set
+# CONFIG_FB_BACKLIGHT is not set
+CONFIG_FB_MODE_HELPERS=y
+CONFIG_FB_TILEBLITTING=y
+
+#
+# Frame buffer hardware drivers
+#
+# CONFIG_FB_S1D13XXX is not set
+# CONFIG_FB_TMIO is not set
+# CONFIG_FB_VIRTUAL is not set
+# CONFIG_FB_METRONOME is not set
+# CONFIG_FB_MB862XX is not set
+# CONFIG_FB_BROADSHEET is not set
+# CONFIG_FB_OMAP_BOOTLOADER_INIT is not set
+CONFIG_OMAP2_VRAM=y
+CONFIG_OMAP2_VRFB=y
+CONFIG_OMAP2_DSS=y
+CONFIG_OMAP2_VRAM_SIZE=4
+CONFIG_OMAP2_DSS_DEBUG_SUPPORT=y
+# CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS is not set
+CONFIG_OMAP2_DSS_DPI=y
+# CONFIG_OMAP2_DSS_RFBI is not set
+CONFIG_OMAP2_DSS_VENC=y
+CONFIG_OMAP2_VENC_OUT_TYPE_SVIDEO=y
+# CONFIG_OMAP2_VENC_OUT_TYPE_COMPOSITE is not set
+# CONFIG_OMAP2_DSS_SDI is not set
+CONFIG_OMAP2_DSS_DSI=y
+CONFIG_OMAP2_DSS_USE_DSI_PLL=y
+# CONFIG_OMAP2_DSS_FAKE_VSYNC is not set
+CONFIG_OMAP2_DSS_MIN_FCK_PER_PCK=1
+CONFIG_FB_OMAP2=y
+CONFIG_FB_OMAP2_DEBUG_SUPPORT=y
+CONFIG_FB_OMAP2_NUM_FBS=1
+
+#
+# OMAP2/3 Display Device Drivers
+#
+CONFIG_PANEL_GENERIC=y
+# CONFIG_PANEL_LGPHILIPS_LB035Q02 is not set
+# CONFIG_PANEL_SAMSUNG_LTE430WQ_F0C is not set
+CONFIG_PANEL_SHARP_LS037V7DW01=y
+# CONFIG_PANEL_SHARP_LQ043T1DG01 is not set
+# CONFIG_PANEL_SAMSUNG_LMS700KF23 is not set
+# CONFIG_PANEL_TAAL is not set
+# CONFIG_PANEL_TOPPOLY_TDO35S is not set
+# CONFIG_PANEL_TPO_TD043MTEA1 is not set
+CONFIG_BACKLIGHT_LCD_SUPPORT=y
+CONFIG_LCD_CLASS_DEVICE=y
+# CONFIG_LCD_L4F00242T03 is not set
+# CONFIG_LCD_LMS283GF05 is not set
+# CONFIG_LCD_LTV350QV is not set
+# CONFIG_LCD_TDO24M is not set
+# CONFIG_LCD_VGG2432A4 is not set
+CONFIG_LCD_PLATFORM=y
+# CONFIG_LCD_S6E63M0 is not set
+CONFIG_BACKLIGHT_CLASS_DEVICE=y
+CONFIG_BACKLIGHT_GENERIC=m
+# CONFIG_BACKLIGHT_ADP8860 is not set
+
+#
+# Display device support
+#
+CONFIG_DISPLAY_SUPPORT=y
+
+#
+# Display hardware drivers
+#
+
+#
+# Console display driver support
+#
+CONFIG_DUMMY_CONSOLE=y
+# CONFIG_FRAMEBUFFER_CONSOLE is not set
+CONFIG_LOGO=y
+CONFIG_LOGO_LINUX_MONO=y
+CONFIG_LOGO_LINUX_VGA16=y
+CONFIG_LOGO_LINUX_CLUT224=y
+CONFIG_SOUND=y
+# CONFIG_SOUND_OSS_CORE is not set
+CONFIG_SND=y
+CONFIG_SND_TIMER=y
+CONFIG_SND_PCM=y
+CONFIG_SND_HWDEP=y
+CONFIG_SND_RAWMIDI=y
+CONFIG_SND_JACK=y
+# CONFIG_SND_SEQUENCER is not set
+# CONFIG_SND_MIXER_OSS is not set
+# CONFIG_SND_PCM_OSS is not set
+# CONFIG_SND_HRTIMER is not set
+# CONFIG_SND_DYNAMIC_MINORS is not set
+CONFIG_SND_SUPPORT_OLD_API=y
+CONFIG_SND_VERBOSE_PROCFS=y
+# CONFIG_SND_VERBOSE_PRINTK is not set
+# CONFIG_SND_DEBUG is not set
+# CONFIG_SND_RAWMIDI_SEQ is not set
+# CONFIG_SND_OPL3_LIB_SEQ is not set
+# CONFIG_SND_OPL4_LIB_SEQ is not set
+# CONFIG_SND_SBAWE_SEQ is not set
+# CONFIG_SND_EMU10K1_SEQ is not set
+CONFIG_SND_DRIVERS=y
+# CONFIG_SND_DUMMY is not set
+# CONFIG_SND_ALOOP is not set
+# CONFIG_SND_MTPAV is not set
+# CONFIG_SND_SERIAL_U16550 is not set
+# CONFIG_SND_MPU401 is not set
+CONFIG_SND_ARM=y
+CONFIG_SND_SPI=y
+CONFIG_SND_USB=y
+CONFIG_SND_USB_AUDIO=y
+# CONFIG_SND_USB_UA101 is not set
+# CONFIG_SND_USB_CAIAQ is not set
+CONFIG_SND_SOC=y
+CONFIG_SND_OMAP_SOC=y
+CONFIG_SND_OMAP_SOC_MCBSP=y
+CONFIG_SND_OMAP_SOC_OMAP3_BEAGLE=y
+CONFIG_SND_SOC_I2C_AND_SPI=y
+# CONFIG_SND_SOC_ALL_CODECS is not set
+CONFIG_SND_SOC_TWL4030=y
+# CONFIG_SND_SOC_WL1271BT is not set
+# CONFIG_SOUND_PRIME is not set
+CONFIG_HID_SUPPORT=y
+CONFIG_HID=y
+# CONFIG_HIDRAW is not set
+
+#
+# USB Input Devices
+#
+CONFIG_USB_HID=y
+# CONFIG_HID_PID is not set
+# CONFIG_USB_HIDDEV is not set
+
+#
+# Special HID drivers
+#
+# CONFIG_HID_3M_PCT is not set
+# CONFIG_HID_A4TECH is not set
+# CONFIG_HID_ACRUX_FF is not set
+# CONFIG_HID_APPLE is not set
+# CONFIG_HID_BELKIN is not set
+# CONFIG_HID_CANDO is not set
+# CONFIG_HID_CHERRY is not set
+# CONFIG_HID_CHICONY is not set
+# CONFIG_HID_PRODIKEYS is not set
+# CONFIG_HID_CYPRESS is not set
+# CONFIG_HID_DRAGONRISE is not set
+# CONFIG_HID_EGALAX is not set
+# CONFIG_HID_EZKEY is not set
+# CONFIG_HID_KYE is not set
+# CONFIG_HID_UCLOGIC is not set
+# CONFIG_HID_WALTOP is not set
+# CONFIG_HID_GYRATION is not set
+# CONFIG_HID_TWINHAN is not set
+# CONFIG_HID_KENSINGTON is not set
+# CONFIG_HID_LOGITECH is not set
+# CONFIG_HID_MICROSOFT is not set
+# CONFIG_HID_MOSART is not set
+# CONFIG_HID_MONTEREY is not set
+# CONFIG_HID_NTRIG is not set
+# CONFIG_HID_ORTEK is not set
+# CONFIG_HID_PANTHERLORD is not set
+# CONFIG_HID_PETALYNX is not set
+# CONFIG_HID_PICOLCD is not set
+# CONFIG_HID_QUANTA is not set
+# CONFIG_HID_ROCCAT is not set
+# CONFIG_HID_ROCCAT_KONE is not set
+# CONFIG_HID_ROCCAT_PYRA is not set
+# CONFIG_HID_SAMSUNG is not set
+# CONFIG_HID_SONY is not set
+# CONFIG_HID_STANTUM is not set
+# CONFIG_HID_SUNPLUS is not set
+# CONFIG_HID_GREENASIA is not set
+# CONFIG_HID_SMARTJOYPLUS is not set
+# CONFIG_HID_TOPSEED is not set
+# CONFIG_HID_THRUSTMASTER is not set
+# CONFIG_HID_ZEROPLUS is not set
+# CONFIG_HID_ZYDACRON is not set
+CONFIG_USB_SUPPORT=y
+CONFIG_USB_ARCH_HAS_HCD=y
+CONFIG_USB_ARCH_HAS_OHCI=y
+CONFIG_USB_ARCH_HAS_EHCI=y
+CONFIG_USB=y
+# CONFIG_USB_DEBUG is not set
+CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
+
+#
+# Miscellaneous USB options
+#
+# CONFIG_USB_DEVICEFS is not set
+# CONFIG_USB_DEVICE_CLASS is not set
+# CONFIG_USB_DYNAMIC_MINORS is not set
+CONFIG_USB_SUSPEND=y
+CONFIG_USB_OTG=y
+# CONFIG_USB_OTG_WHITELIST is not set
+# CONFIG_USB_OTG_BLACKLIST_HUB is not set
+CONFIG_USB_MON=y
+# CONFIG_USB_WUSB is not set
+# CONFIG_USB_WUSB_CBAF is not set
+
+#
+# USB Host Controller Drivers
+#
+# CONFIG_USB_C67X00_HCD is not set
+CONFIG_USB_EHCI_HCD=y
+CONFIG_USB_EHCI_ROOT_HUB_TT=y
+CONFIG_USB_EHCI_TT_NEWSCHED=y
+# CONFIG_USB_OXU210HP_HCD is not set
+# CONFIG_USB_ISP116X_HCD is not set
+# CONFIG_USB_ISP1760_HCD is not set
+# CONFIG_USB_ISP1362_HCD is not set
+# CONFIG_USB_OHCI_HCD is not set
+# CONFIG_USB_SL811_HCD is not set
+# CONFIG_USB_R8A66597_HCD is not set
+# CONFIG_USB_HWA_HCD is not set
+CONFIG_USB_MUSB_HDRC=y
+
+#
+# Platform Glue Layer
+#
+# CONFIG_USB_MUSB_TUSB6010_GLUE is not set
+CONFIG_USB_MUSB_OMAP2PLUS_GLUE=y
+# CONFIG_USB_MUSB_AM35X_GLUE is not set
+# CONFIG_USB_MUSB_DAVINCI is not set
+# CONFIG_USB_MUSB_DA8XX is not set
+# CONFIG_USB_MUSB_TUSB6010 is not set
+CONFIG_USB_MUSB_OMAP2PLUS=y
+# CONFIG_USB_MUSB_AM35X is not set
+# CONFIG_USB_MUSB_TI81XX is not set
+# CONFIG_USB_MUSB_BLACKFIN is not set
+# CONFIG_USB_MUSB_UX500 is not set
+# CONFIG_USB_MUSB_HOST is not set
+# CONFIG_USB_MUSB_PERIPHERAL is not set
+CONFIG_USB_MUSB_OTG=y
+CONFIG_USB_GADGET_MUSB_HDRC=y
+CONFIG_USB_MUSB_HDRC_HCD=y
+# CONFIG_MUSB_PIO_ONLY is not set
+CONFIG_USB_INVENTRA_DMA_HW=y
+# CONFIG_USB_TI_CPPI_DMA_HW is not set
+# CONFIG_USB_TI_CPPI41_DMA_HW is not set
+CONFIG_USB_INVENTRA_DMA=y
+CONFIG_MUSB_USE_SYSTEM_DMA_WORKAROUND=y
+# CONFIG_USB_TI_CPPI_DMA is not set
+# CONFIG_USB_TI_CPPI41_DMA is not set
+# CONFIG_USB_TUSB_OMAP_DMA is not set
+# CONFIG_USB_MUSB_DEBUG is not set
+
+#
+# USB Device Class drivers
+#
+# CONFIG_USB_ACM is not set
+# CONFIG_USB_PRINTER is not set
+# CONFIG_USB_WDM is not set
+# CONFIG_USB_TMC is not set
+
+#
+# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may
+#
+
+#
+# also be needed; see USB_STORAGE Help for more info
+#
+CONFIG_USB_STORAGE=y
+# CONFIG_USB_STORAGE_DEBUG is not set
+# CONFIG_USB_STORAGE_DATAFAB is not set
+# CONFIG_USB_STORAGE_FREECOM is not set
+# CONFIG_USB_STORAGE_ISD200 is not set
+# CONFIG_USB_STORAGE_USBAT is not set
+# CONFIG_USB_STORAGE_SDDR09 is not set
+# CONFIG_USB_STORAGE_SDDR55 is not set
+# CONFIG_USB_STORAGE_JUMPSHOT is not set
+# CONFIG_USB_STORAGE_ALAUDA is not set
+# CONFIG_USB_STORAGE_ONETOUCH is not set
+# CONFIG_USB_STORAGE_KARMA is not set
+# CONFIG_USB_STORAGE_CYPRESS_ATACB is not set
+# CONFIG_USB_UAS is not set
+# CONFIG_USB_LIBUSUAL is not set
+
+#
+# USB Imaging devices
+#
+# CONFIG_USB_MDC800 is not set
+# CONFIG_USB_MICROTEK is not set
+
+#
+# USB port drivers
+#
+# CONFIG_USB_SERIAL is not set
+
+#
+# USB Miscellaneous drivers
+#
+# CONFIG_USB_EMI62 is not set
+# CONFIG_USB_EMI26 is not set
+# CONFIG_USB_ADUTUX is not set
+# CONFIG_USB_SEVSEG is not set
+# CONFIG_USB_RIO500 is not set
+# CONFIG_USB_LEGOTOWER is not set
+# CONFIG_USB_LCD is not set
+# CONFIG_USB_LED is not set
+# CONFIG_USB_CYPRESS_CY7C63 is not set
+# CONFIG_USB_CYTHERM is not set
+# CONFIG_USB_IDMOUSE is not set
+# CONFIG_USB_FTDI_ELAN is not set
+# CONFIG_USB_APPLEDISPLAY is not set
+# CONFIG_USB_SISUSBVGA is not set
+# CONFIG_USB_LD is not set
+# CONFIG_USB_TRANCEVIBRATOR is not set
+# CONFIG_USB_IOWARRIOR is not set
+# CONFIG_USB_TEST is not set
+# CONFIG_USB_ISIGHTFW is not set
+# CONFIG_USB_YUREX is not set
+CONFIG_USB_GADGET=y
+# CONFIG_USB_GADGET_DEBUG_FILES is not set
+# CONFIG_USB_GADGET_DEBUG_FS is not set
+CONFIG_USB_GADGET_VBUS_DRAW=2
+CONFIG_USB_GADGET_SELECTED=y
+# CONFIG_USB_GADGET_OMAP is not set
+# CONFIG_USB_GADGET_R8A66597 is not set
+# CONFIG_USB_GADGET_M66592 is not set
+# CONFIG_USB_GADGET_DUMMY_HCD is not set
+CONFIG_USB_GADGET_DUALSPEED=y
+# CONFIG_USB_ZERO is not set
+# CONFIG_USB_AUDIO is not set
+# CONFIG_USB_ETH is not set
+# CONFIG_USB_GADGETFS is not set
+# CONFIG_USB_FUNCTIONFS is not set
+# CONFIG_USB_FILE_STORAGE is not set
+# CONFIG_USB_MASS_STORAGE is not set
+# CONFIG_USB_G_SERIAL is not set
+# CONFIG_USB_MIDI_GADGET is not set
+# CONFIG_USB_G_PRINTER is not set
+CONFIG_USB_ANDROID=y
+# CONFIG_USB_ANDROID_ACM is not set
+CONFIG_USB_ANDROID_ADB=y
+CONFIG_USB_ANDROID_MASS_STORAGE=y
+# CONFIG_USB_ANDROID_MTP is not set
+# CONFIG_USB_ANDROID_RNDIS is not set
+# CONFIG_USB_CDC_COMPOSITE is not set
+# CONFIG_USB_G_MULTI is not set
+# CONFIG_USB_G_HID is not set
+# CONFIG_USB_G_DBGP is not set
+# CONFIG_USB_G_WEBCAM is not set
+
+#
+# OTG and related infrastructure
+#
+CONFIG_USB_OTG_UTILS=y
+# CONFIG_USB_GPIO_VBUS is not set
+# CONFIG_ISP1301_OMAP is not set
+# CONFIG_USB_ULPI is not set
+CONFIG_TWL4030_USB=y
+# CONFIG_NOP_USB_XCEIV is not set
+CONFIG_MMC=y
+# CONFIG_MMC_DEBUG is not set
+CONFIG_MMC_UNSAFE_RESUME=y
+# CONFIG_MMC_EMBEDDED_SDIO is not set
+# CONFIG_MMC_PARANOID_SD_INIT is not set
+
+#
+# MMC/SD/SDIO Card Drivers
+#
+CONFIG_MMC_BLOCK=y
+CONFIG_MMC_BLOCK_MINORS=8
+CONFIG_MMC_BLOCK_BOUNCE=y
+# CONFIG_MMC_BLOCK_DEFERRED_RESUME is not set
+CONFIG_SDIO_UART=y
+# CONFIG_MMC_TEST is not set
+
+#
+# MMC/SD/SDIO Host Controller Drivers
+#
+# CONFIG_MMC_SDHCI is not set
+CONFIG_MMC_OMAP=y
+CONFIG_MMC_OMAP_HS=y
+# CONFIG_MMC_SPI is not set
+# CONFIG_MMC_USHC is not set
+# CONFIG_MEMSTICK is not set
+# CONFIG_NEW_LEDS is not set
+CONFIG_SWITCH=y
+# CONFIG_SWITCH_GPIO is not set
+# CONFIG_ACCESSIBILITY is not set
+CONFIG_RTC_LIB=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_HCTOSYS=y
+CONFIG_RTC_HCTOSYS_DEVICE="rtc0"
+# CONFIG_RTC_DEBUG is not set
+
+#
+# RTC interfaces
+#
+CONFIG_RTC_INTF_SYSFS=y
+CONFIG_RTC_INTF_PROC=y
+CONFIG_RTC_INTF_DEV=y
+# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set
+CONFIG_RTC_INTF_ALARM=y
+CONFIG_RTC_INTF_ALARM_DEV=y
+# CONFIG_RTC_DRV_TEST is not set
+
+#
+# I2C RTC drivers
+#
+# CONFIG_RTC_DRV_DS1307 is not set
+# CONFIG_RTC_DRV_DS1374 is not set
+# CONFIG_RTC_DRV_DS1672 is not set
+# CONFIG_RTC_DRV_DS3232 is not set
+# CONFIG_RTC_DRV_MAX6900 is not set
+# CONFIG_RTC_DRV_RS5C372 is not set
+# CONFIG_RTC_DRV_ISL1208 is not set
+# CONFIG_RTC_DRV_ISL12022 is not set
+# CONFIG_RTC_DRV_X1205 is not set
+# CONFIG_RTC_DRV_PCF8563 is not set
+# CONFIG_RTC_DRV_PCF8583 is not set
+# CONFIG_RTC_DRV_M41T80 is not set
+# CONFIG_RTC_DRV_BQ32K is not set
+CONFIG_RTC_DRV_TWL4030=y
+# CONFIG_RTC_DRV_S35390A is not set
+# CONFIG_RTC_DRV_FM3130 is not set
+# CONFIG_RTC_DRV_RX8581 is not set
+# CONFIG_RTC_DRV_RX8025 is not set
+
+#
+# SPI RTC drivers
+#
+# CONFIG_RTC_DRV_M41T94 is not set
+# CONFIG_RTC_DRV_DS1305 is not set
+# CONFIG_RTC_DRV_DS1390 is not set
+# CONFIG_RTC_DRV_MAX6902 is not set
+# CONFIG_RTC_DRV_R9701 is not set
+# CONFIG_RTC_DRV_RS5C348 is not set
+# CONFIG_RTC_DRV_DS3234 is not set
+# CONFIG_RTC_DRV_PCF2123 is not set
+
+#
+# Platform RTC drivers
+#
+# CONFIG_RTC_DRV_CMOS is not set
+# CONFIG_RTC_DRV_DS1286 is not set
+# CONFIG_RTC_DRV_DS1511 is not set
+# CONFIG_RTC_DRV_DS1553 is not set
+# CONFIG_RTC_DRV_DS1742 is not set
+# CONFIG_RTC_DRV_STK17TA8 is not set
+# CONFIG_RTC_DRV_M48T86 is not set
+# CONFIG_RTC_DRV_M48T35 is not set
+# CONFIG_RTC_DRV_M48T59 is not set
+# CONFIG_RTC_DRV_MSM6242 is not set
+# CONFIG_RTC_DRV_BQ4802 is not set
+# CONFIG_RTC_DRV_RP5C01 is not set
+# CONFIG_RTC_DRV_V3020 is not set
+
+#
+# on-CPU RTC drivers
+#
+# CONFIG_DMADEVICES is not set
+# CONFIG_AUXDISPLAY is not set
+# CONFIG_UIO is not set
+CONFIG_STAGING=y
+# CONFIG_STAGING_EXCLUDE_BUILD is not set
+# CONFIG_VIDEO_CPIA is not set
+# CONFIG_USB_IP_COMMON is not set
+# CONFIG_ECHO is not set
+# CONFIG_BRCM80211 is not set
+# CONFIG_RT2870 is not set
+# CONFIG_COMEDI is not set
+# CONFIG_ASUS_OLED is not set
+# CONFIG_R8712U is not set
+# CONFIG_TRANZPORT is not set
+
+#
+# Android
+#
+CONFIG_ANDROID=y
+CONFIG_ANDROID_BINDER_IPC=y
+CONFIG_ANDROID_LOGGER=y
+CONFIG_ANDROID_RAM_CONSOLE=y
+CONFIG_ANDROID_RAM_CONSOLE_ENABLE_VERBOSE=y
+CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION=y
+CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION_DATA_SIZE=128
+CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION_ECC_SIZE=16
+CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION_SYMBOL_SIZE=8
+CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION_POLYNOMIAL=0x11d
+# CONFIG_ANDROID_RAM_CONSOLE_EARLY_INIT is not set
+CONFIG_ANDROID_TIMED_OUTPUT=y
+CONFIG_ANDROID_TIMED_GPIO=y
+CONFIG_ANDROID_LOW_MEMORY_KILLER=y
+# CONFIG_POHMELFS is not set
+# CONFIG_AUTOFS_FS is not set
+# CONFIG_LINE6_USB is not set
+# CONFIG_VT6656 is not set
+# CONFIG_FB_UDL is not set
+# CONFIG_IIO is not set
+# CONFIG_ZRAM is not set
+# CONFIG_BATMAN_ADV is not set
+# CONFIG_FB_SM7XX is not set
+
+#
+# Texas Instruments shared transport line discipline
+#
+# CONFIG_ADIS16255 is not set
+# CONFIG_SMB_FS is not set
+# CONFIG_EASYCAP is not set
+# CONFIG_TIDSPBRIDGE is not set
+# CONFIG_WESTBRIDGE is not set
+CONFIG_WESTBRIDGE_HAL_SELECTED=y
+CONFIG_MACH_OMAP3_WESTBRIDGE_AST_PNAND_HAL=y
+# CONFIG_MACH_NO_WESTBRIDGE is not set
+# CONFIG_ATH6K_LEGACY is not set
+# CONFIG_USB_ENESTORAGE is not set
+# CONFIG_BCM_WIMAX is not set
+# CONFIG_FT1000 is not set
+
+#
+# Speakup console speech
+#
+# CONFIG_SPEAKUP is not set
+
+#
+# File systems
+#
+CONFIG_EXT2_FS=y
+# CONFIG_EXT2_FS_XATTR is not set
+# CONFIG_EXT2_FS_XIP is not set
+CONFIG_EXT3_FS=y
+CONFIG_EXT3_DEFAULTS_TO_ORDERED=y
+# CONFIG_EXT3_FS_XATTR is not set
+CONFIG_EXT4_FS=y
+CONFIG_EXT4_FS_XATTR=y
+# CONFIG_EXT4_FS_POSIX_ACL is not set
+# CONFIG_EXT4_FS_SECURITY is not set
+# CONFIG_EXT4_DEBUG is not set
+CONFIG_JBD=y
+# CONFIG_JBD_DEBUG is not set
+# CONFIG_REISERFS_FS is not set
+# CONFIG_JFS_FS is not set
+CONFIG_FS_POSIX_ACL=y
+# CONFIG_XFS_FS is not set
+# CONFIG_GFS2_FS is not set
+# CONFIG_OCFS2_FS is not set
+# CONFIG_BTRFS_FS is not set
+# CONFIG_NILFS2_FS is not set
+CONFIG_FILE_LOCKING=y
+CONFIG_FSNOTIFY=y
+CONFIG_DNOTIFY=y
+CONFIG_INOTIFY_USER=y
+# CONFIG_FANOTIFY is not set
+CONFIG_QUOTA=y
+# CONFIG_QUOTA_NETLINK_INTERFACE is not set
+CONFIG_PRINT_QUOTA_WARNING=y
+# CONFIG_QUOTA_DEBUG is not set
+CONFIG_QUOTA_TREE=y
+# CONFIG_QFMT_V1 is not set
+CONFIG_QFMT_V2=y
+CONFIG_QUOTACTL=y
+# CONFIG_AUTOFS4_FS is not set
+# CONFIG_FUSE_FS is not set
+
+#
+# Caches
+#
+# CONFIG_FSCACHE is not set
+
+#
+# CD-ROM/DVD Filesystems
+#
+# CONFIG_ISO9660_FS is not set
+# CONFIG_UDF_FS is not set
+
+#
+# DOS/FAT/NT Filesystems
+#
+CONFIG_FAT_FS=y
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_FAT_DEFAULT_CODEPAGE=437
+CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
+# CONFIG_NTFS_FS is not set
+
+#
+# Pseudo filesystems
+#
+CONFIG_PROC_FS=y
+CONFIG_PROC_SYSCTL=y
+CONFIG_PROC_PAGE_MONITOR=y
+CONFIG_SYSFS=y
+CONFIG_TMPFS=y
+# CONFIG_TMPFS_POSIX_ACL is not set
+# CONFIG_HUGETLB_PAGE is not set
+# CONFIG_CONFIGFS_FS is not set
+CONFIG_MISC_FILESYSTEMS=y
+# CONFIG_ADFS_FS is not set
+# CONFIG_AFFS_FS is not set
+# CONFIG_ECRYPT_FS is not set
+# CONFIG_HFS_FS is not set
+# CONFIG_HFSPLUS_FS is not set
+# CONFIG_BEFS_FS is not set
+# CONFIG_BFS_FS is not set
+# CONFIG_EFS_FS is not set
+# CONFIG_YAFFS_FS is not set
+CONFIG_JFFS2_FS=y
+CONFIG_JFFS2_FS_DEBUG=0
+CONFIG_JFFS2_FS_WRITEBUFFER=y
+# CONFIG_JFFS2_FS_WBUF_VERIFY is not set
+CONFIG_JFFS2_SUMMARY=y
+CONFIG_JFFS2_FS_XATTR=y
+CONFIG_JFFS2_FS_POSIX_ACL=y
+CONFIG_JFFS2_FS_SECURITY=y
+CONFIG_JFFS2_COMPRESSION_OPTIONS=y
+CONFIG_JFFS2_ZLIB=y
+CONFIG_JFFS2_LZO=y
+CONFIG_JFFS2_RTIME=y
+CONFIG_JFFS2_RUBIN=y
+# CONFIG_JFFS2_CMODE_NONE is not set
+CONFIG_JFFS2_CMODE_PRIORITY=y
+# CONFIG_JFFS2_CMODE_SIZE is not set
+# CONFIG_JFFS2_CMODE_FAVOURLZO is not set
+# CONFIG_LOGFS is not set
+CONFIG_CRAMFS=y
+# CONFIG_SQUASHFS is not set
+# CONFIG_VXFS_FS is not set
+# CONFIG_MINIX_FS is not set
+# CONFIG_OMFS_FS is not set
+# CONFIG_HPFS_FS is not set
+# CONFIG_QNX4FS_FS is not set
+# CONFIG_ROMFS_FS is not set
+# CONFIG_SYSV_FS is not set
+# CONFIG_UFS_FS is not set
+CONFIG_NETWORK_FILESYSTEMS=y
+CONFIG_NFS_FS=y
+CONFIG_NFS_V3=y
+CONFIG_NFS_V3_ACL=y
+CONFIG_NFS_V4=y
+# CONFIG_NFS_V4_1 is not set
+CONFIG_ROOT_NFS=y
+CONFIG_NFS_USE_LEGACY_DNS=y
+# CONFIG_NFS_USE_NEW_IDMAPPER is not set
+# CONFIG_NFSD is not set
+CONFIG_LOCKD=y
+CONFIG_LOCKD_V4=y
+CONFIG_NFS_ACL_SUPPORT=y
+CONFIG_NFS_COMMON=y
+CONFIG_SUNRPC=y
+CONFIG_SUNRPC_GSS=y
+CONFIG_RPCSEC_GSS_KRB5=y
+# CONFIG_CEPH_FS is not set
+# CONFIG_CIFS is not set
+# CONFIG_NCP_FS is not set
+# CONFIG_CODA_FS is not set
+# CONFIG_AFS_FS is not set
+
+#
+# Partition Types
+#
+CONFIG_PARTITION_ADVANCED=y
+# CONFIG_ACORN_PARTITION is not set
+# CONFIG_OSF_PARTITION is not set
+# CONFIG_AMIGA_PARTITION is not set
+# CONFIG_ATARI_PARTITION is not set
+# CONFIG_MAC_PARTITION is not set
+CONFIG_MSDOS_PARTITION=y
+# CONFIG_BSD_DISKLABEL is not set
+# CONFIG_MINIX_SUBPARTITION is not set
+# CONFIG_SOLARIS_X86_PARTITION is not set
+# CONFIG_UNIXWARE_DISKLABEL is not set
+# CONFIG_LDM_PARTITION is not set
+# CONFIG_SGI_PARTITION is not set
+# CONFIG_ULTRIX_PARTITION is not set
+# CONFIG_SUN_PARTITION is not set
+# CONFIG_KARMA_PARTITION is not set
+# CONFIG_EFI_PARTITION is not set
+# CONFIG_SYSV68_PARTITION is not set
+CONFIG_NLS=y
+CONFIG_NLS_DEFAULT="iso8859-1"
+CONFIG_NLS_CODEPAGE_437=y
+# CONFIG_NLS_CODEPAGE_737 is not set
+# CONFIG_NLS_CODEPAGE_775 is not set
+# CONFIG_NLS_CODEPAGE_850 is not set
+# CONFIG_NLS_CODEPAGE_852 is not set
+# CONFIG_NLS_CODEPAGE_855 is not set
+# CONFIG_NLS_CODEPAGE_857 is not set
+# CONFIG_NLS_CODEPAGE_860 is not set
+# CONFIG_NLS_CODEPAGE_861 is not set
+# CONFIG_NLS_CODEPAGE_862 is not set
+# CONFIG_NLS_CODEPAGE_863 is not set
+# CONFIG_NLS_CODEPAGE_864 is not set
+# CONFIG_NLS_CODEPAGE_865 is not set
+# CONFIG_NLS_CODEPAGE_866 is not set
+# CONFIG_NLS_CODEPAGE_869 is not set
+# CONFIG_NLS_CODEPAGE_936 is not set
+# CONFIG_NLS_CODEPAGE_950 is not set
+# CONFIG_NLS_CODEPAGE_932 is not set
+# CONFIG_NLS_CODEPAGE_949 is not set
+# CONFIG_NLS_CODEPAGE_874 is not set
+# CONFIG_NLS_ISO8859_8 is not set
+# CONFIG_NLS_CODEPAGE_1250 is not set
+# CONFIG_NLS_CODEPAGE_1251 is not set
+# CONFIG_NLS_ASCII is not set
+CONFIG_NLS_ISO8859_1=y
+# CONFIG_NLS_ISO8859_2 is not set
+# CONFIG_NLS_ISO8859_3 is not set
+# CONFIG_NLS_ISO8859_4 is not set
+# CONFIG_NLS_ISO8859_5 is not set
+# CONFIG_NLS_ISO8859_6 is not set
+# CONFIG_NLS_ISO8859_7 is not set
+# CONFIG_NLS_ISO8859_9 is not set
+# CONFIG_NLS_ISO8859_13 is not set
+# CONFIG_NLS_ISO8859_14 is not set
+# CONFIG_NLS_ISO8859_15 is not set
+# CONFIG_NLS_KOI8_R is not set
+# CONFIG_NLS_KOI8_U is not set
+# CONFIG_NLS_UTF8 is not set
+# CONFIG_DLM is not set
+
+#
+# Kernel hacking
+#
+# CONFIG_PRINTK_TIME is not set
+CONFIG_ENABLE_WARN_DEPRECATED=y
+CONFIG_ENABLE_MUST_CHECK=y
+CONFIG_FRAME_WARN=1024
+CONFIG_MAGIC_SYSRQ=y
+# CONFIG_STRIP_ASM_SYMS is not set
+# CONFIG_UNUSED_SYMBOLS is not set
+CONFIG_DEBUG_FS=y
+# CONFIG_HEADERS_CHECK is not set
+# CONFIG_DEBUG_KERNEL is not set
+# CONFIG_HARDLOCKUP_DETECTOR is not set
+CONFIG_BKL=y
+# CONFIG_SPARSE_RCU_POINTER is not set
+CONFIG_STACKTRACE=y
+CONFIG_DEBUG_BUGVERBOSE=y
+# CONFIG_DEBUG_MEMORY_INIT is not set
+CONFIG_FRAME_POINTER=y
+# CONFIG_LKDTM is not set
+# CONFIG_SYSCTL_SYSCALL_CHECK is not set
+CONFIG_NOP_TRACER=y
+CONFIG_HAVE_FUNCTION_TRACER=y
+CONFIG_HAVE_DYNAMIC_FTRACE=y
+CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y
+CONFIG_RING_BUFFER=y
+CONFIG_EVENT_TRACING=y
+CONFIG_CONTEXT_SWITCH_TRACER=y
+CONFIG_RING_BUFFER_ALLOW_SWAP=y
+CONFIG_TRACING=y
+CONFIG_TRACING_SUPPORT=y
+CONFIG_FTRACE=y
+# CONFIG_FUNCTION_TRACER is not set
+# CONFIG_IRQSOFF_TRACER is not set
+# CONFIG_SCHED_TRACER is not set
+# CONFIG_ENABLE_DEFAULT_TRACERS is not set
+CONFIG_BRANCH_PROFILE_NONE=y
+# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set
+# CONFIG_PROFILE_ALL_BRANCHES is not set
+# CONFIG_STACK_TRACER is not set
+# CONFIG_BLK_DEV_IO_TRACE is not set
+CONFIG_KPROBE_EVENT=y
+# CONFIG_RING_BUFFER_BENCHMARK is not set
+# CONFIG_DYNAMIC_DEBUG is not set
+# CONFIG_ATOMIC64_SELFTEST is not set
+# CONFIG_SAMPLES is not set
+CONFIG_HAVE_ARCH_KGDB=y
+# CONFIG_STRICT_DEVMEM is not set
+# CONFIG_ARM_UNWIND is not set
+# CONFIG_DEBUG_USER is not set
+# CONFIG_OC_ETM is not set
+
+#
+# Security options
+#
+CONFIG_KEYS=y
+# CONFIG_KEYS_DEBUG_PROC_KEYS is not set
+# CONFIG_SECURITY_DMESG_RESTRICT is not set
+CONFIG_SECURITY=y
+# CONFIG_SECURITYFS is not set
+# CONFIG_SECURITY_NETWORK is not set
+# CONFIG_SECURITY_PATH is not set
+# CONFIG_SECURITY_TOMOYO is not set
+# CONFIG_SECURITY_APPARMOR is not set
+# CONFIG_IMA is not set
+CONFIG_DEFAULT_SECURITY_DAC=y
+CONFIG_DEFAULT_SECURITY=""
+CONFIG_CRYPTO=y
+
+#
+# Crypto core or helper
+#
+CONFIG_CRYPTO_ALGAPI=y
+CONFIG_CRYPTO_ALGAPI2=y
+CONFIG_CRYPTO_AEAD2=y
+CONFIG_CRYPTO_BLKCIPHER=y
+CONFIG_CRYPTO_BLKCIPHER2=y
+CONFIG_CRYPTO_HASH=y
+CONFIG_CRYPTO_HASH2=y
+CONFIG_CRYPTO_RNG2=y
+CONFIG_CRYPTO_PCOMP2=y
+CONFIG_CRYPTO_MANAGER=y
+CONFIG_CRYPTO_MANAGER2=y
+CONFIG_CRYPTO_MANAGER_DISABLE_TESTS=y
+# CONFIG_CRYPTO_GF128MUL is not set
+# CONFIG_CRYPTO_NULL is not set
+CONFIG_CRYPTO_WORKQUEUE=y
+# CONFIG_CRYPTO_CRYPTD is not set
+# CONFIG_CRYPTO_AUTHENC is not set
+# CONFIG_CRYPTO_TEST is not set
+
+#
+# Authenticated Encryption with Associated Data
+#
+# CONFIG_CRYPTO_CCM is not set
+# CONFIG_CRYPTO_GCM is not set
+# CONFIG_CRYPTO_SEQIV is not set
+
+#
+# Block modes
+#
+CONFIG_CRYPTO_CBC=y
+# CONFIG_CRYPTO_CTR is not set
+# CONFIG_CRYPTO_CTS is not set
+# CONFIG_CRYPTO_ECB is not set
+# CONFIG_CRYPTO_LRW is not set
+# CONFIG_CRYPTO_PCBC is not set
+# CONFIG_CRYPTO_XTS is not set
+
+#
+# Hash modes
+#
+# CONFIG_CRYPTO_HMAC is not set
+# CONFIG_CRYPTO_XCBC is not set
+# CONFIG_CRYPTO_VMAC is not set
+
+#
+# Digest
+#
+CONFIG_CRYPTO_CRC32C=y
+# CONFIG_CRYPTO_GHASH is not set
+# CONFIG_CRYPTO_MD4 is not set
+CONFIG_CRYPTO_MD5=y
+# CONFIG_CRYPTO_MICHAEL_MIC is not set
+# CONFIG_CRYPTO_RMD128 is not set
+# CONFIG_CRYPTO_RMD160 is not set
+# CONFIG_CRYPTO_RMD256 is not set
+# CONFIG_CRYPTO_RMD320 is not set
+# CONFIG_CRYPTO_SHA1 is not set
+# CONFIG_CRYPTO_SHA256 is not set
+# CONFIG_CRYPTO_SHA512 is not set
+# CONFIG_CRYPTO_TGR192 is not set
+# CONFIG_CRYPTO_WP512 is not set
+
+#
+# Ciphers
+#
+# CONFIG_CRYPTO_AES is not set
+# CONFIG_CRYPTO_ANUBIS is not set
+# CONFIG_CRYPTO_ARC4 is not set
+# CONFIG_CRYPTO_BLOWFISH is not set
+# CONFIG_CRYPTO_CAMELLIA is not set
+# CONFIG_CRYPTO_CAST5 is not set
+# CONFIG_CRYPTO_CAST6 is not set
+CONFIG_CRYPTO_DES=y
+# CONFIG_CRYPTO_FCRYPT is not set
+# CONFIG_CRYPTO_KHAZAD is not set
+# CONFIG_CRYPTO_SALSA20 is not set
+# CONFIG_CRYPTO_SEED is not set
+# CONFIG_CRYPTO_SERPENT is not set
+# CONFIG_CRYPTO_TEA is not set
+CONFIG_CRYPTO_TWOFISH=y
+CONFIG_CRYPTO_TWOFISH_COMMON=y
+
+#
+# Compression
+#
+# CONFIG_CRYPTO_DEFLATE is not set
+# CONFIG_CRYPTO_ZLIB is not set
+# CONFIG_CRYPTO_LZO is not set
+
+#
+# Random Number Generation
+#
+# CONFIG_CRYPTO_ANSI_CPRNG is not set
+CONFIG_CRYPTO_HW=y
+# CONFIG_CRYPTO_DEV_OMAP_SHAM is not set
+# CONFIG_CRYPTO_DEV_OMAP_AES is not set
+CONFIG_BINARY_PRINTF=y
+
+#
+# Library routines
+#
+CONFIG_BITREVERSE=y
+CONFIG_GENERIC_FIND_LAST_BIT=y
+CONFIG_CRC_CCITT=y
+# CONFIG_CRC16 is not set
+# CONFIG_CRC_T10DIF is not set
+# CONFIG_CRC_ITU_T is not set
+CONFIG_CRC32=y
+# CONFIG_CRC7 is not set
+CONFIG_LIBCRC32C=y
+CONFIG_ZLIB_INFLATE=y
+CONFIG_ZLIB_DEFLATE=y
+CONFIG_LZO_COMPRESS=y
+CONFIG_LZO_DECOMPRESS=y
+CONFIG_DECOMPRESS_GZIP=y
+CONFIG_REED_SOLOMON=y
+CONFIG_REED_SOLOMON_ENC8=y
+CONFIG_REED_SOLOMON_DEC8=y
+CONFIG_HAS_IOMEM=y
+CONFIG_HAS_IOPORT=y
+CONFIG_HAS_DMA=y
+CONFIG_NLATTR=y
diff --git a/kernel/arch/arm/configs/panda_defconfig b/kernel/arch/arm/configs/panda_defconfig
new file mode 100644
index 000000000000..4c5e56c56cf6
--- /dev/null
+++ b/kernel/arch/arm/configs/panda_defconfig
@@ -0,0 +1,331 @@
+CONFIG_EXPERIMENTAL=y
+# CONFIG_SWAP is not set
+CONFIG_SYSVIPC=y
+CONFIG_CGROUPS=y
+CONFIG_CGROUP_DEBUG=y
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_RESOURCE_COUNTERS=y
+CONFIG_CGROUP_SCHED=y
+CONFIG_RT_GROUP_SCHED=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_KALLSYMS_ALL=y
+CONFIG_PANIC_TIMEOUT=5
+CONFIG_ASHMEM=y
+# CONFIG_AIO is not set
+CONFIG_EMBEDDED=y
+# CONFIG_SLUB_DEBUG is not set
+CONFIG_MODULES=y
+CONFIG_MODULE_FORCE_LOAD=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULE_FORCE_UNLOAD=y
+# CONFIG_BLK_DEV_BSG is not set
+CONFIG_ARCH_OMAP=y
+CONFIG_OMAP_RESET_CLOCKS=y
+# CONFIG_ARCH_OMAP2 is not set
+# CONFIG_ARCH_OMAP3 is not set
+# CONFIG_MACH_OMAP_4430SDP is not set
+CONFIG_ARM_THUMBEE=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_SMP=y
+# CONFIG_SMP_ON_UP is not set
+CONFIG_NR_CPUS=2
+CONFIG_PREEMPT=y
+CONFIG_CMDLINE="console=ttyO2,115200n8 mem=1G androidboot.console=ttyO2"
+CONFIG_CMDLINE_EXTEND=y
+CONFIG_CPU_FREQ=y
+CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE=y
+CONFIG_CPU_FREQ_GOV_PERFORMANCE=y
+CONFIG_CPU_FREQ_GOV_POWERSAVE=y
+CONFIG_CPU_FREQ_GOV_USERSPACE=y
+CONFIG_CPU_FREQ_GOV_ONDEMAND=y
+CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
+CONFIG_CPU_IDLE=y
+CONFIG_OMAP_SMARTREFLEX=y
+CONFIG_OMAP_SMARTREFLEX_CLASS1P5=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_BINFMT_MISC=y
+CONFIG_WAKELOCK=y
+CONFIG_PM_DEBUG=y
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_NET_KEY=y
+CONFIG_INET=y
+CONFIG_INET_ESP=y
+# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
+# CONFIG_INET_LRO is not set
+CONFIG_IPV6=y
+CONFIG_IPV6_PRIVACY=y
+CONFIG_IPV6_ROUTER_PREF=y
+CONFIG_IPV6_OPTIMISTIC_DAD=y
+CONFIG_INET6_AH=y
+CONFIG_INET6_ESP=y
+CONFIG_INET6_IPCOMP=y
+CONFIG_IPV6_MIP6=y
+CONFIG_IPV6_TUNNEL=y
+CONFIG_IPV6_MULTIPLE_TABLES=y
+CONFIG_NETFILTER=y
+CONFIG_NETFILTER_NETLINK_LOG=y
+CONFIG_NETFILTER_TPROXY=y
+CONFIG_NF_CONNTRACK=y
+CONFIG_NF_CONNTRACK_EVENTS=y
+CONFIG_NF_CT_PROTO_DCCP=y
+CONFIG_NF_CT_PROTO_SCTP=y
+CONFIG_NF_CT_PROTO_UDPLITE=y
+CONFIG_NF_CONNTRACK_AMANDA=y
+CONFIG_NF_CONNTRACK_FTP=y
+CONFIG_NF_CONNTRACK_H323=y
+CONFIG_NF_CONNTRACK_IRC=y
+CONFIG_NF_CONNTRACK_NETBIOS_NS=y
+CONFIG_NF_CONNTRACK_PPTP=y
+CONFIG_NF_CONNTRACK_SANE=y
+CONFIG_NF_CONNTRACK_TFTP=y
+CONFIG_NF_CT_NETLINK=y
+CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y
+CONFIG_NETFILTER_XT_TARGET_CONNMARK=y
+CONFIG_NETFILTER_XT_TARGET_MARK=y
+CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y
+CONFIG_NETFILTER_XT_MATCH_COMMENT=y
+CONFIG_NETFILTER_XT_MATCH_CONNBYTES=y
+CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y
+CONFIG_NETFILTER_XT_MATCH_CONNMARK=y
+CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y
+CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y
+CONFIG_NETFILTER_XT_MATCH_HELPER=y
+CONFIG_NETFILTER_XT_MATCH_IPRANGE=y
+CONFIG_NETFILTER_XT_MATCH_LENGTH=y
+CONFIG_NETFILTER_XT_MATCH_LIMIT=y
+CONFIG_NETFILTER_XT_MATCH_MAC=y
+CONFIG_NETFILTER_XT_MATCH_MARK=y
+CONFIG_NETFILTER_XT_MATCH_POLICY=y
+CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y
+CONFIG_NETFILTER_XT_MATCH_QTAGUID=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA2=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA2_LOG=y
+CONFIG_NETFILTER_XT_MATCH_SOCKET=y
+CONFIG_NETFILTER_XT_MATCH_STATE=y
+CONFIG_NETFILTER_XT_MATCH_STATISTIC=y
+CONFIG_NETFILTER_XT_MATCH_STRING=y
+CONFIG_NETFILTER_XT_MATCH_TIME=y
+CONFIG_NETFILTER_XT_MATCH_U32=y
+CONFIG_NF_CONNTRACK_IPV4=y
+CONFIG_NF_CONNTRACK_IPV6=y
+CONFIG_IP_NF_IPTABLES=y
+CONFIG_IP_NF_MATCH_AH=y
+CONFIG_IP_NF_MATCH_ECN=y
+CONFIG_IP_NF_MATCH_TTL=y
+CONFIG_IP_NF_FILTER=y
+CONFIG_IP_NF_TARGET_REJECT=y
+CONFIG_IP_NF_TARGET_REJECT_SKERR=y
+CONFIG_IP_NF_TARGET_LOG=y
+CONFIG_NF_NAT=y
+CONFIG_IP_NF_MANGLE=y
+CONFIG_IP_NF_TARGET_MASQUERADE=y
+CONFIG_IP_NF_TARGET_NETMAP=y
+CONFIG_IP_NF_TARGET_REDIRECT=y
+CONFIG_IP_NF_RAW=y
+CONFIG_IP_NF_ARPTABLES=y
+CONFIG_IP_NF_ARPFILTER=y
+CONFIG_IP_NF_ARP_MANGLE=y
+CONFIG_IP6_NF_IPTABLES=y
+CONFIG_IP6_NF_TARGET_LOG=y
+CONFIG_IP6_NF_FILTER=y
+CONFIG_IP6_NF_TARGET_REJECT=y
+CONFIG_IP6_NF_TARGET_REJECT_SKERR=y
+CONFIG_IP6_NF_MANGLE=y
+CONFIG_IP6_NF_RAW=y
+CONFIG_PHONET=y
+CONFIG_NET_SCHED=y
+CONFIG_NET_SCH_HTB=y
+CONFIG_NET_SCH_INGRESS=y
+CONFIG_NET_CLS_U32=y
+CONFIG_NET_EMATCH=y
+CONFIG_NET_EMATCH_U32=y
+CONFIG_NET_CLS_ACT=y
+CONFIG_NET_ACT_POLICE=y
+CONFIG_NET_ACT_GACT=y
+CONFIG_NET_ACT_MIRRED=y
+CONFIG_BT=y
+CONFIG_BT_BNEP=y
+CONFIG_BT_L2CAP=y
+CONFIG_BT_SCO=y
+CONFIG_BT_RFCOMM=y
+CONFIG_BT_RFCOMM_TTY=y
+CONFIG_BT_HCIUART=y
+CONFIG_BT_HCIUART_H4=y
+CONFIG_BT_WILINK=y
+CONFIG_RFKILL=y
+CONFIG_RFKILL_INPUT=y
+CONFIG_MTD=y
+CONFIG_MTD_CHAR=y
+CONFIG_MTD_BLOCK=y
+CONFIG_MTD_NAND_IDS=y
+CONFIG_MTD_ONENAND=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_SIZE=8192
+CONFIG_MISC_DEVICES=y
+# CONFIG_ANDROID_PMEM is not set
+CONFIG_KERNEL_DEBUGGER_CORE=y
+CONFIG_UID_STAT=y
+CONFIG_SCSI=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_CHR_DEV_SG=y
+CONFIG_MD=y
+CONFIG_BLK_DEV_DM=y
+CONFIG_DM_DEBUG=y
+CONFIG_DM_CRYPT=y
+CONFIG_DM_UEVENT=y
+CONFIG_NETDEVICES=y
+CONFIG_IFB=y
+CONFIG_USB_USBNET=y
+CONFIG_USB_NET_SMSC95XX=y
+CONFIG_PPP=y
+CONFIG_PPP_DEFLATE=y
+CONFIG_PPP_BSDCOMP=y
+CONFIG_PPP_MPPE=y
+CONFIG_PPPOLAC=y
+CONFIG_PPPOPNS=y
+CONFIG_INPUT_EVDEV=y
+CONFIG_INPUT_KEYRESET=y
+# CONFIG_INPUT_MOUSE is not set
+CONFIG_INPUT_TOUCHSCREEN=y
+CONFIG_TOUCHSCREEN_SYNAPTICS_I2C_RMI4=y
+CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_RMI4_DEV=y
+CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_FW_UPDATE=y
+CONFIG_INPUT_MISC=y
+CONFIG_INPUT_KEYCHORD=y
+CONFIG_INPUT_UINPUT=y
+CONFIG_INPUT_GPIO=y
+# CONFIG_VT is not set
+# CONFIG_LEGACY_PTYS is not set
+CONFIG_HW_RANDOM=y
+CONFIG_I2C_CHARDEV=y
+CONFIG_I2C_GPIO=y
+CONFIG_SPI=y
+CONFIG_SPI_GPIO=y
+CONFIG_GPIO_SYSFS=y
+CONFIG_GPIO_TWL4030=y
+CONFIG_POWER_SUPPLY=y
+# CONFIG_HWMON is not set
+CONFIG_TWL6030_PWM=y
+CONFIG_REGULATOR_TWL4030=y
+CONFIG_MEDIA_SUPPORT=y
+CONFIG_PVR_SGX=y
+CONFIG_PVR_NEED_PVR_DPF=y
+CONFIG_PVR_NEED_PVR_ASSERT=y
+CONFIG_PVR_USSE_EDM_STATUS_DEBUG=y
+CONFIG_FB=y
+CONFIG_OMAP2_DSS=y
+# CONFIG_OMAP2_DSS_VENC is not set
+CONFIG_FB_OMAP2=y
+CONFIG_FB_OMAP2_NUM_FBS=2
+CONFIG_OMAP2_VRAM_SIZE=16
+CONFIG_PANEL_GENERIC_DPI=y
+CONFIG_DISPLAY_SUPPORT=y
+CONFIG_USB=y
+CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
+CONFIG_USB_DEVICEFS=y
+CONFIG_USB_SUSPEND=y
+CONFIG_USB_EHCI_HCD=y
+CONFIG_USB_MUSB_HDRC=y
+CONFIG_USB_MUSB_OMAP2PLUS=y
+CONFIG_USB_MUSB_PERIPHERAL=y
+CONFIG_USB_GADGET_MUSB_HDRC=y
+CONFIG_USB_ACM=y
+CONFIG_USB_STORAGE=y
+CONFIG_USB_SERIAL=y
+CONFIG_USB_SERIAL_KEYSPAN=y
+CONFIG_USB_SERIAL_KEYSPAN_MPR=y
+CONFIG_USB_SERIAL_KEYSPAN_USA28=y
+CONFIG_USB_SERIAL_KEYSPAN_USA28X=y
+CONFIG_USB_SERIAL_KEYSPAN_USA28XA=y
+CONFIG_USB_SERIAL_KEYSPAN_USA28XB=y
+CONFIG_USB_SERIAL_KEYSPAN_USA19=y
+CONFIG_USB_SERIAL_KEYSPAN_USA18X=y
+CONFIG_USB_SERIAL_KEYSPAN_USA19W=y
+CONFIG_USB_SERIAL_KEYSPAN_USA19QW=y
+CONFIG_USB_SERIAL_KEYSPAN_USA19QI=y
+CONFIG_USB_SERIAL_KEYSPAN_USA49W=y
+CONFIG_USB_SERIAL_KEYSPAN_USA49WLC=y
+CONFIG_USB_GADGET=y
+CONFIG_USB_GADGET_VBUS_DRAW=500
+CONFIG_USB_G_ANDROID=y
+CONFIG_MMC=y
+CONFIG_MMC_UNSAFE_RESUME=y
+CONFIG_MMC_EMBEDDED_SDIO=y
+CONFIG_MMC_PARANOID_SD_INIT=y
+CONFIG_MMC_OMAP=y
+CONFIG_MMC_OMAP_HS=y
+CONFIG_SWITCH=y
+CONFIG_SWITCH_GPIO=y
+CONFIG_RTC_CLASS=y
+CONFIG_STAGING=y
+CONFIG_ANDROID=y
+CONFIG_ANDROID_BINDER_IPC=y
+CONFIG_ANDROID_LOGGER=y
+CONFIG_ANDROID_RAM_CONSOLE=y
+CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION=y
+CONFIG_ANDROID_TIMED_GPIO=y
+CONFIG_ANDROID_LOW_MEMORY_KILLER=y
+CONFIG_EXT2_FS=y
+CONFIG_EXT4_FS=y
+# CONFIG_EXT4_FS_XATTR is not set
+# CONFIG_DNOTIFY is not set
+CONFIG_FUSE_FS=y
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
+# CONFIG_NETWORK_FILESYSTEMS is not set
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_EFI_PARTITION=y
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_ASCII=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_PRINTK_TIME=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_DEBUG_FS=y
+CONFIG_DEBUG_KERNEL=y
+CONFIG_DETECT_HUNG_TASK=y
+# CONFIG_DEBUG_PREEMPT is not set
+CONFIG_DEBUG_RT_MUTEXES=y
+CONFIG_DEBUG_SPINLOCK=y
+CONFIG_DEBUG_MUTEXES=y
+CONFIG_DEBUG_SPINLOCK_SLEEP=y
+CONFIG_DEBUG_INFO=y
+CONFIG_SYSCTL_SYSCALL_CHECK=y
+# CONFIG_ARM_UNWIND is not set
+CONFIG_DEBUG_USER=y
+CONFIG_CRYPTO_TWOFISH=y
+CONFIG_CRC_CCITT=y
+CONFIG_SOUND=y
+CONFIG_SND=y
+CONFIG_SND_SOC=y
+CONFIG_SND_OMAP_SOC=y
+CONFIG_SND_OMAP_SOC_SDP4430=y
+CONFIG_SND_OMAP_SOC_OMAP4_HDMI=y
+CONFIG_OMAP_HSI=y
+CONFIG_OMAP_HSI_DEVICE=y
+CONFIG_CFG80211=y
+CONFIG_NL80211_TESTMODE=y
+CONFIG_LIB80211=y
+CONFIG_MAC80211=y
+CONFIG_MAC80211_LEDS=y
+CONFIG_MAC80211_DEBUGFS=y
+CONFIG_USB_ZD1201=y
+CONFIG_WL12XX_MENU=y
+CONFIG_WL12XX=y
+CONFIG_WL12XX_SDIO=y
+CONFIG_CRYPTO_PCBC=y
+CONFIG_CRYPTO_MD4=y
+CONFIG_CRYPTO_MICHAEL_MIC=y
+CONFIG_CRYPTO_SHA256=y
+CONFIG_OMAP_TEMP_SENSOR=y
+CONFIG_OMAP_DIE_TEMP_SENSOR=y
+CONFIG_TI_ST=y
+CONFIG_KEYBOARD_GPIO=y
diff --git a/kernel/arch/arm/mach-omap2/board-omap3beagle.c b/kernel/arch/arm/mach-omap2/board-omap3beagle.c
new file mode 100644
index 000000000000..b3d1b81b2a2e
--- /dev/null
+++ b/kernel/arch/arm/mach-omap2/board-omap3beagle.c
@@ -0,0 +1,1038 @@
+/*
+ * linux/arch/arm/mach-omap2/board-omap3beagle.c
+ *
+ * Copyright (C) 2008 Texas Instruments
+ *
+ * Modified from mach-omap2/board-3430sdp.c
+ *
+ * Initial code: Syed Mohammed Khasim
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/leds.h>
+#include <linux/gpio.h>
+#include <linux/input.h>
+#include <linux/gpio_keys.h>
+
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+#include <linux/mtd/nand.h>
+#include <linux/mmc/host.h>
+
+#include <linux/usb/android_composite.h>
+
+#include <linux/regulator/machine.h>
+#include <linux/i2c/twl.h>
+
+#include <mach/hardware.h>
+#include <asm/mach-types.h>
+#include <asm/mach/arch.h>
+#include <asm/mach/map.h>
+#include <asm/mach/flash.h>
+
+#include <plat/board.h>
+#include <plat/common.h>
+#include <plat/display.h>
+#include <plat/gpmc.h>
+#include <plat/nand.h>
+#include <plat/usb.h>
+
+#include "mux.h"
+#include "hsmmc.h"
+#include "timer-gp.h"
+#include "board-flash.h"
+
+#ifdef CONFIG_TOUCHSCREEN_SYNAPTICS_I2C_RMI4
+#include <linux/input/synaptics_dsx.h>
+
+#define TM_SAMPLE1 (1) // 2D only
+#define TM_SAMPLE2 (2) // 2D + 0D x 2
+#define TM_SAMPLE3 (3) // 2D + 0D x 4
+#define SYNAPTICS_MODULE TM_SAMPLE1
+#endif
+
+#define NAND_BLOCK_SIZE SZ_128K
+
+#ifdef CONFIG_USB_ANDROID
+#define GOOGLE_VENDOR_ID 0x18d1
+#define GOOGLE_PRODUCT_ID 0x9018
+#define GOOGLE_ADB_PRODUCT_ID 0x9015
+#endif
+
+/* Synaptics Thin Driver */
+#ifdef CONFIG_TOUCHSCREEN_SYNAPTICS_I2C_RMI4
+static int synaptics_gpio_setup(unsigned gpio, bool configure)
+{
+ int retval=0;
+ if (configure)
+ {
+ retval = gpio_request(gpio, "rmi4_attn");
+ if (retval) {
+ pr_err("%s: Failed to get attn gpio %d. Code: %d.",
+ __func__, gpio, retval);
+ return retval;
+ }
+ omap_mux_init_signal("sdmmc2_clk.gpio_130", OMAP_PIN_INPUT_PULLUP);
+
+ retval = gpio_direction_input(gpio);
+ if (retval) {
+ pr_err("%s: Failed to setup attn gpio %d. Code: %d.",
+ __func__, gpio, retval);
+ gpio_free(gpio);
+ }
+ } else {
+ pr_warn("%s: No way to deconfigure gpio %d.",
+ __func__, gpio);
+ }
+
+ return retval;
+}
+
+ #if (SYNAPTICS_MODULE == TM_SAMPLE1)
+#define TM_SAMPLE1_ADDR 0x20
+#define TM_SAMPLE1_ATTN 130
+
+static unsigned char TM_SAMPLE1_f1a_button_codes[] = {};
+
+static struct synaptics_rmi4_capacitance_button_map TM_SAMPLE1_capacitance_button_map = {
+ .nbuttons = ARRAY_SIZE(TM_SAMPLE1_f1a_button_codes),
+ .map = TM_SAMPLE1_f1a_button_codes,
+};
+
+static struct synaptics_rmi4_platform_data rmi4_platformdata = {
+ .irq_flags = IRQF_TRIGGER_FALLING,
+ .irq_gpio = TM_SAMPLE1_ATTN,
+ .gpio_config = synaptics_gpio_setup,
+ .capacitance_button_map = &TM_SAMPLE1_capacitance_button_map,
+};
+
+static struct i2c_board_info bus2_i2c_devices[] = {
+ {
+ I2C_BOARD_INFO("synaptics_rmi4_i2c", TM_SAMPLE1_ADDR),
+ .platform_data = &rmi4_platformdata,
+ },
+};
+
+#elif (SYNAPTICS_MODULE == TM_SAMPLE2)
+#define TM_SAMPLE2_ADDR 0x20
+#define TM_SAMPLE2_ATTN 130
+
+static unsigned char TM_SAMPLE2_f1a_button_codes[] = {KEY_MENU, KEY_BACK};
+
+static struct synaptics_rmi4_capacitance_button_map TM_SAMPLE2_capacitance_button_map = {
+ .nbuttons = ARRAY_SIZE(TM_SAMPLE2_f1a_button_codes),
+ .map = TM_SAMPLE2_f1a_button_codes,
+};
+
+static struct synaptics_rmi4_platform_data rmi4_platformdata = {
+ .irq_flags = IRQF_TRIGGER_FALLING,
+ .irq_gpio = TM_SAMPLE2_ATTN,
+ .gpio_config = synaptics_gpio_setup,
+ .capacitance_button_map = &TM_SAMPLE2_capacitance_button_map,
+};
+
+static struct i2c_board_info bus2_i2c_devices[] = {
+ {
+ I2C_BOARD_INFO("synaptics_rmi4_i2c", TM_SAMPLE2_ADDR),
+ .platform_data = &rmi4_platformdata,
+ },
+};
+
+#elif (SYNAPTICS_MODULE == TM_SAMPLE3)
+#define TM_SAMPLE3_ADDR 0x20
+#define TM_SAMPLE3_ATTN 130
+
+static unsigned char TM_SAMPLE3_f1a_button_codes[] = {KEY_MENU, KEY_HOME,KEY_BACK,KEY_SEARCH};
+
+static struct synaptics_rmi4_capacitance_button_map TM_SAMPLE3_capacitance_button_map = {
+ .nbuttons = ARRAY_SIZE(TM_SAMPLE3_f1a_button_codes),
+ .map = TM_SAMPLE3_f1a_button_codes,
+};
+
+static struct synaptics_rmi4_platform_data rmi4_platformdata = {
+ .irq_flags = IRQF_TRIGGER_FALLING,
+ .irq_gpio = TM_SAMPLE3_ATTN,
+ .gpio_config = synaptics_gpio_setup,
+ .capacitance_button_map = &TM_SAMPLE3_capacitance_button_map,
+};
+
+static struct i2c_board_info bus2_i2c_devices[] = {
+ {
+ I2C_BOARD_INFO("synaptics_rmi4_i2c", TM_SAMPLE3_ADDR),
+ .platform_data = &rmi4_platformdata,
+ },
+};
+#endif
+
+void __init i2c_device_setup(void)
+{
+ pr_info(">>>>I2C device setup.");
+ if (ARRAY_SIZE(bus2_i2c_devices)) {
+ i2c_register_board_info(2, bus2_i2c_devices,
+ ARRAY_SIZE(bus2_i2c_devices));
+ }
+}
+
+/* End of Synaptics change for beagle board */
+
+static char *usb_functions_adb[] = {
+ "adb",
+};
+
+static char *usb_functions_mass_storage[] = {
+ "usb_mass_storage",
+};
+static char *usb_functions_ums_adb[] = {
+ "usb_mass_storage",
+ "adb",
+};
+
+static char *usb_functions_all[] = {
+ "adb", "usb_mass_storage",
+};
+
+static struct android_usb_product usb_products[] = {
+ {
+ .product_id = GOOGLE_PRODUCT_ID,
+ .num_functions = ARRAY_SIZE(usb_functions_adb),
+ .functions = usb_functions_adb,
+ },
+ {
+ .product_id = GOOGLE_PRODUCT_ID,
+ .num_functions = ARRAY_SIZE(usb_functions_mass_storage),
+ .functions = usb_functions_mass_storage,
+ },
+ {
+ .product_id = GOOGLE_PRODUCT_ID,
+ .num_functions = ARRAY_SIZE(usb_functions_ums_adb),
+ .functions = usb_functions_ums_adb,
+ },
+};
+
+static struct usb_mass_storage_platform_data mass_storage_pdata = {
+ .nluns = 1,
+ .vendor = "rowboat",
+ .product = "rowboat gadget",
+ .release = 0x100,
+};
+
+static struct platform_device usb_mass_storage_device = {
+ .name = "usb_mass_storage",
+ .id = -1,
+ .dev = {
+ .platform_data = &mass_storage_pdata,
+ },
+};
+
+static struct android_usb_platform_data android_usb_pdata = {
+ .vendor_id = GOOGLE_VENDOR_ID,
+ .product_id = GOOGLE_PRODUCT_ID,
+ .functions = usb_functions_all,
+ .products = usb_products,
+ .num_products = ARRAY_SIZE(usb_products),
+ .version = 0x0100,
+ .product_name = "rowboat gadget",
+ .manufacturer_name = "rowboat",
+ .serial_number = "20100720",
+ .num_functions = ARRAY_SIZE(usb_functions_all),
+};
+
+static struct platform_device androidusb_device = {
+ .name = "android_usb",
+ .id = -1,
+ .dev = {
+ .platform_data = &android_usb_pdata,
+ },
+};
+
+static void omap3beagle_android_gadget_init(void)
+{
+ platform_device_register(&androidusb_device);
+}
+#endif
+/*
+ * OMAP3 Beagle revision
+ * Run time detection of Beagle revision is done by reading GPIO.
+ * GPIO ID -
+ * AXBX = GPIO173, GPIO172, GPIO171: 1 1 1
+ * C1_3 = GPIO173, GPIO172, GPIO171: 1 1 0
+ * C4 = GPIO173, GPIO172, GPIO171: 1 0 1
+ * XM = GPIO173, GPIO172, GPIO171: 0 0 0
+ */
+enum {
+ OMAP3BEAGLE_BOARD_UNKN = 0,
+ OMAP3BEAGLE_BOARD_AXBX,
+ OMAP3BEAGLE_BOARD_C1_3,
+ OMAP3BEAGLE_BOARD_C4,
+ OMAP3BEAGLE_BOARD_XM,
+ OMAP3BEAGLE_BOARD_XMC,
+};
+
+extern void omap_pm_sys_offmode_select(int);
+extern void omap_pm_sys_offmode_pol(int);
+extern void omap_pm_sys_clkreq_pol(int);
+extern void omap_pm_auto_off(int);
+extern void omap_pm_auto_ret(int);
+
+static u8 omap3_beagle_version;
+
+static u8 omap3_beagle_get_rev(void)
+{
+ return omap3_beagle_version;
+}
+
+/**
+ * Board specific initialization of PM components
+ */
+static void __init omap3_beagle_pm_init(void)
+{
+ /* Use sys_offmode signal */
+ omap_pm_sys_offmode_select(1);
+
+ /* sys_clkreq - active high */
+ omap_pm_sys_clkreq_pol(1);
+
+ /* sys_offmode - active low */
+ omap_pm_sys_offmode_pol(0);
+
+ /* Automatically send OFF command */
+ omap_pm_auto_off(1);
+
+ /* Automatically send RET command */
+ omap_pm_auto_ret(1);
+}
+
+static void __init omap3_beagle_init_rev(void)
+{
+ int ret;
+ u16 beagle_rev = 0;
+
+ omap_mux_init_gpio(171, OMAP_PIN_INPUT_PULLUP);
+ omap_mux_init_gpio(172, OMAP_PIN_INPUT_PULLUP);
+ omap_mux_init_gpio(173, OMAP_PIN_INPUT_PULLUP);
+
+ ret = gpio_request(171, "rev_id_0");
+ if (ret < 0)
+ goto fail0;
+
+ ret = gpio_request(172, "rev_id_1");
+ if (ret < 0)
+ goto fail1;
+
+ ret = gpio_request(173, "rev_id_2");
+ if (ret < 0)
+ goto fail2;
+
+ gpio_direction_input(171);
+ gpio_direction_input(172);
+ gpio_direction_input(173);
+
+ beagle_rev = gpio_get_value(171) | (gpio_get_value(172) << 1)
+ | (gpio_get_value(173) << 2);
+
+ switch (beagle_rev) {
+ case 7:
+ printk(KERN_INFO "OMAP3 Beagle Rev: Ax/Bx\n");
+ omap3_beagle_version = OMAP3BEAGLE_BOARD_AXBX;
+ break;
+ case 6:
+ printk(KERN_INFO "OMAP3 Beagle Rev: C1/C2/C3\n");
+ omap3_beagle_version = OMAP3BEAGLE_BOARD_C1_3;
+ break;
+ case 5:
+ printk(KERN_INFO "OMAP3 Beagle Rev: C4\n");
+ omap3_beagle_version = OMAP3BEAGLE_BOARD_C4;
+ break;
+ case 2:
+ printk(KERN_INFO "OMAP3 Beagle Rev: xM C\n");
+ omap3_beagle_version = OMAP3BEAGLE_BOARD_XMC;
+ break;
+ case 0:
+ printk(KERN_INFO "OMAP3 Beagle Rev: xM\n");
+ omap3_beagle_version = OMAP3BEAGLE_BOARD_XM;
+ break;
+ default:
+ printk(KERN_INFO "OMAP3 Beagle Rev: unknown %hd\n", beagle_rev);
+ omap3_beagle_version = OMAP3BEAGLE_BOARD_UNKN;
+ }
+
+ return;
+
+fail2:
+ gpio_free(172);
+fail1:
+ gpio_free(171);
+fail0:
+ printk(KERN_ERR "Unable to get revision detection GPIO pins\n");
+ omap3_beagle_version = OMAP3BEAGLE_BOARD_UNKN;
+
+ return;
+}
+
+static struct mtd_partition omap3beagle_nand_partitions[] = {
+ /* All the partition sizes are listed in terms of NAND block size */
+ {
+ .name = "X-Loader",
+ .offset = 0,
+ .size = 4 * NAND_BLOCK_SIZE,
+ .mask_flags = MTD_WRITEABLE, /* force read-only */
+ },
+ {
+ .name = "U-Boot",
+ .offset = MTDPART_OFS_APPEND, /* Offset = 0x80000 */
+ .size = 15 * NAND_BLOCK_SIZE,
+ .mask_flags = MTD_WRITEABLE, /* force read-only */
+ },
+ {
+ .name = "U-Boot Env",
+ .offset = MTDPART_OFS_APPEND, /* Offset = 0x260000 */
+ .size = 1 * NAND_BLOCK_SIZE,
+ },
+ {
+ .name = "Kernel",
+ .offset = MTDPART_OFS_APPEND, /* Offset = 0x280000 */
+ .size = 32 * NAND_BLOCK_SIZE,
+ },
+ {
+ .name = "File System",
+ .offset = MTDPART_OFS_APPEND, /* Offset = 0x680000 */
+ .size = MTDPART_SIZ_FULL,
+ },
+};
+
+/* DSS */
+
+static int beagle_enable_dvi(struct omap_dss_device *dssdev)
+{
+ if (gpio_is_valid(dssdev->reset_gpio))
+ gpio_set_value(dssdev->reset_gpio, 1);
+
+ return 0;
+}
+
+static void beagle_disable_dvi(struct omap_dss_device *dssdev)
+{
+ if (gpio_is_valid(dssdev->reset_gpio))
+ gpio_set_value(dssdev->reset_gpio, 0);
+}
+
+static struct omap_dss_device beagle_dvi_device = {
+ .type = OMAP_DISPLAY_TYPE_DPI,
+ .name = "dvi",
+ .driver_name = "generic_panel",
+ .phy.dpi.data_lines = 24,
+ .reset_gpio = -EINVAL,
+ .platform_enable = beagle_enable_dvi,
+ .platform_disable = beagle_disable_dvi,
+};
+
+static struct omap_dss_device beagle_tv_device = {
+ .name = "tv",
+ .driver_name = "venc",
+ .type = OMAP_DISPLAY_TYPE_VENC,
+ .phy.venc.type = OMAP_DSS_VENC_TYPE_SVIDEO,
+};
+
+static struct omap_dss_device *beagle_dss_devices[] = {
+ &beagle_dvi_device,
+ &beagle_tv_device,
+};
+
+static struct omap_dss_board_info beagle_dss_data = {
+ .num_devices = ARRAY_SIZE(beagle_dss_devices),
+ .devices = beagle_dss_devices,
+ .default_device = &beagle_dvi_device,
+};
+
+static struct platform_device beagle_dss_device = {
+ .name = "omapdss",
+ .id = -1,
+ .dev = {
+ .platform_data = &beagle_dss_data,
+ },
+};
+
+static struct regulator_consumer_supply beagle_vdac_supply =
+ REGULATOR_SUPPLY("vdda_dac", "omapdss");
+
+static struct regulator_consumer_supply beagle_vdvi_supply =
+ REGULATOR_SUPPLY("vdds_dsi", "omapdss");
+
+static void __init beagle_display_init(void)
+{
+ int r;
+
+ r = gpio_request(beagle_dvi_device.reset_gpio, "DVI reset");
+ if (r < 0) {
+ printk(KERN_ERR "Unable to get DVI reset GPIO\n");
+ return;
+ }
+
+ gpio_direction_output(beagle_dvi_device.reset_gpio, 0);
+}
+
+#include "sdram-micron-mt46h32m32lf-6.h"
+
+static struct omap2_hsmmc_info mmc[] = {
+ {
+ .mmc = 1,
+ .caps = MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA,
+ .gpio_wp = 29,
+ },
+ {} /* Terminator */
+};
+
+static struct regulator_consumer_supply beagle_vmmc1_supply = {
+ .supply = "vmmc",
+};
+
+static struct regulator_consumer_supply beagle_vsim_supply = {
+ .supply = "vmmc_aux",
+};
+
+static struct regulator_consumer_supply beagle_vaux3_supply = {
+ .supply = "cam_1v8",
+};
+
+static struct regulator_consumer_supply beagle_vaux4_supply = {
+ .supply = "cam_2v8",
+};
+
+static struct gpio_led gpio_leds[];
+
+static int beagle_twl_gpio_setup(struct device *dev,
+ unsigned gpio, unsigned ngpio)
+{
+ if (omap3_beagle_get_rev() == OMAP3BEAGLE_BOARD_XM || omap3_beagle_get_rev() == OMAP3BEAGLE_BOARD_XMC) {
+ mmc[0].gpio_wp = -EINVAL;
+ } else if ((omap3_beagle_get_rev() == OMAP3BEAGLE_BOARD_C1_3) ||
+ (omap3_beagle_get_rev() == OMAP3BEAGLE_BOARD_C4)) {
+ omap_mux_init_gpio(23, OMAP_PIN_INPUT);
+ mmc[0].gpio_wp = 23;
+ } else {
+ omap_mux_init_gpio(29, OMAP_PIN_INPUT);
+ }
+ /* gpio + 0 is "mmc0_cd" (input/IRQ) */
+ mmc[0].gpio_cd = gpio + 0;
+ omap2_hsmmc_init(mmc);
+
+ /* link regulators to MMC adapters */
+ beagle_vmmc1_supply.dev = mmc[0].dev;
+ beagle_vsim_supply.dev = mmc[0].dev;
+
+ /* REVISIT: need ehci-omap hooks for external VBUS
+ * power switch and overcurrent detect
+ */
+ if (omap3_beagle_get_rev() != OMAP3BEAGLE_BOARD_XM || omap3_beagle_get_rev() == OMAP3BEAGLE_BOARD_XMC) {
+ gpio_request(gpio + 1, "EHCI_nOC");
+ gpio_direction_input(gpio + 1);
+ }
+
+ /*
+ * TWL4030_GPIO_MAX + 0 == ledA, EHCI nEN_USB_PWR (out, XM active
+ * high / others active low)
+ */
+ gpio_request(gpio + TWL4030_GPIO_MAX, "nEN_USB_PWR");
+ gpio_direction_output(gpio + TWL4030_GPIO_MAX, 0);
+ if (omap3_beagle_get_rev() == OMAP3BEAGLE_BOARD_XM)
+ gpio_direction_output(gpio + TWL4030_GPIO_MAX, 1);
+ else
+ gpio_direction_output(gpio + TWL4030_GPIO_MAX, 0);
+
+ /* DVI reset GPIO is different between beagle revisions */
+ if (omap3_beagle_get_rev() == OMAP3BEAGLE_BOARD_XM || omap3_beagle_get_rev() == OMAP3BEAGLE_BOARD_XMC)
+ beagle_dvi_device.reset_gpio = 129;
+ else
+ beagle_dvi_device.reset_gpio = 170;
+
+ if (omap3_beagle_get_rev() == OMAP3BEAGLE_BOARD_XM) {
+ /* Power on camera interface */
+ gpio_request(gpio + 2, "CAM_EN");
+ gpio_direction_output(gpio + 2, 1);
+
+ /* TWL4030_GPIO_MAX + 0 == ledA, EHCI nEN_USB_PWR (out, active low) */
+ gpio_request(gpio + TWL4030_GPIO_MAX, "nEN_USB_PWR");
+ gpio_direction_output(gpio + TWL4030_GPIO_MAX, 1);
+ } else {
+ gpio_request(gpio + 1, "EHCI_nOC");
+ gpio_direction_input(gpio + 1);
+
+ /* TWL4030_GPIO_MAX + 0 == ledA, EHCI nEN_USB_PWR (out, active low) */
+ gpio_request(gpio + TWL4030_GPIO_MAX, "nEN_USB_PWR");
+ gpio_direction_output(gpio + TWL4030_GPIO_MAX, 0);
+ }
+ /* TWL4030_GPIO_MAX + 1 == ledB, PMU_STAT (out, active low LED) */
+ gpio_leds[2].gpio = gpio + TWL4030_GPIO_MAX + 1;
+
+ /*
+ * gpio + 1 on Xm controls the TFP410's enable line (active low)
+ * gpio + 2 control varies depending on the board rev as follows:
+ * P7/P8 revisions(prototype): Camera EN
+ * A2+ revisions (production): LDO (supplies DVI, serial, led blocks)
+ */
+ if (omap3_beagle_get_rev() == OMAP3BEAGLE_BOARD_XM || omap3_beagle_get_rev() == OMAP3BEAGLE_BOARD_XMC) {
+ gpio_request(gpio + 1, "nDVI_PWR_EN");
+ gpio_direction_output(gpio + 1, 0);
+ gpio_request(gpio + 2, "DVI_LDO_EN");
+ gpio_direction_output(gpio + 2, 1);
+ }
+
+ return 0;
+}
+
+static struct twl4030_gpio_platform_data beagle_gpio_data = {
+ .gpio_base = OMAP_MAX_GPIO_LINES,
+ .irq_base = TWL4030_GPIO_IRQ_BASE,
+ .irq_end = TWL4030_GPIO_IRQ_END,
+ .use_leds = true,
+ .pullups = BIT(1),
+ .pulldowns = BIT(2) | BIT(6) | BIT(7) | BIT(8) | BIT(13)
+ | BIT(15) | BIT(16) | BIT(17),
+ .setup = beagle_twl_gpio_setup,
+};
+
+/* VMMC1 for MMC1 pins CMD, CLK, DAT0..DAT3 (20 mA, plus card == max 220 mA) */
+static struct regulator_init_data beagle_vmmc1 = {
+ .constraints = {
+ .min_uV = 1850000,
+ .max_uV = 3150000,
+ .valid_modes_mask = REGULATOR_MODE_NORMAL
+ | REGULATOR_MODE_STANDBY,
+ .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE
+ | REGULATOR_CHANGE_MODE
+ | REGULATOR_CHANGE_STATUS,
+ },
+ .num_consumer_supplies = 1,
+ .consumer_supplies = &beagle_vmmc1_supply,
+};
+
+/* VSIM for MMC1 pins DAT4..DAT7 (2 mA, plus card == max 50 mA) */
+static struct regulator_init_data beagle_vsim = {
+ .constraints = {
+ .min_uV = 1800000,
+ .max_uV = 3000000,
+ .valid_modes_mask = REGULATOR_MODE_NORMAL
+ | REGULATOR_MODE_STANDBY,
+ .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE
+ | REGULATOR_CHANGE_MODE
+ | REGULATOR_CHANGE_STATUS,
+ },
+ .num_consumer_supplies = 1,
+ .consumer_supplies = &beagle_vsim_supply,
+};
+
+/* VDAC for DSS driving S-Video (8 mA unloaded, max 65 mA) */
+static struct regulator_init_data beagle_vdac = {
+ .constraints = {
+ .min_uV = 1800000,
+ .max_uV = 1800000,
+ .valid_modes_mask = REGULATOR_MODE_NORMAL
+ | REGULATOR_MODE_STANDBY,
+ .valid_ops_mask = REGULATOR_CHANGE_MODE
+ | REGULATOR_CHANGE_STATUS,
+ },
+ .num_consumer_supplies = 1,
+ .consumer_supplies = &beagle_vdac_supply,
+};
+
+/* VPLL2 for digital video outputs */
+static struct regulator_init_data beagle_vpll2 = {
+ .constraints = {
+ .name = "VDVI",
+ .min_uV = 1800000,
+ .max_uV = 1800000,
+ .valid_modes_mask = REGULATOR_MODE_NORMAL
+ | REGULATOR_MODE_STANDBY,
+ .valid_ops_mask = REGULATOR_CHANGE_MODE
+ | REGULATOR_CHANGE_STATUS,
+ },
+ .num_consumer_supplies = 1,
+ .consumer_supplies = &beagle_vdvi_supply,
+};
+
+/* VAUX3 for CAM_1V8 */
+static struct regulator_init_data beagle_vaux3 = {
+ .constraints = {
+ .min_uV = 1800000,
+ .max_uV = 1800000,
+ .apply_uV = true,
+ .valid_modes_mask = REGULATOR_MODE_NORMAL
+ | REGULATOR_MODE_STANDBY,
+ .valid_ops_mask = REGULATOR_CHANGE_MODE
+ | REGULATOR_CHANGE_STATUS,
+ },
+ .num_consumer_supplies = 1,
+ .consumer_supplies = &beagle_vaux3_supply,
+};
+
+ /* VAUX4 for CAM_2V8 */
+static struct regulator_init_data beagle_vaux4 = {
+ .constraints = {
+ .min_uV = 1800000,
+ .max_uV = 1800000,
+ .apply_uV = true,
+ .valid_modes_mask = REGULATOR_MODE_NORMAL
+ | REGULATOR_MODE_STANDBY,
+ .valid_ops_mask = REGULATOR_CHANGE_MODE
+ | REGULATOR_CHANGE_STATUS,
+ },
+ .num_consumer_supplies = 1,
+ .consumer_supplies = &beagle_vaux4_supply,
+};
+
+static struct twl4030_usb_data beagle_usb_data = {
+ .usb_mode = T2_USB_MODE_ULPI,
+};
+
+/**
+ * Macro to configure resources
+ */
+#define TWL4030_RESCONFIG(res,grp,typ1,typ2,state) \
+ { \
+ .resource = res, \
+ .devgroup = grp, \
+ .type = typ1, \
+ .type2 = typ2, \
+ .remap_sleep = state \
+ }
+
+static struct twl4030_resconfig __initdata board_twl4030_rconfig[] = {
+ TWL4030_RESCONFIG(RES_VPLL1, DEV_GRP_P1, 3, 1, RES_STATE_OFF), /* ? */
+ TWL4030_RESCONFIG(RES_VINTANA1, DEV_GRP_ALL, 1, 2, RES_STATE_SLEEP),
+ TWL4030_RESCONFIG(RES_VINTANA2, DEV_GRP_ALL, 0, 2, RES_STATE_SLEEP),
+ TWL4030_RESCONFIG(RES_VINTDIG, DEV_GRP_ALL, 1, 2, RES_STATE_SLEEP),
+ TWL4030_RESCONFIG(RES_VIO, DEV_GRP_ALL, 2, 2, RES_STATE_SLEEP),
+ TWL4030_RESCONFIG(RES_VDD1, DEV_GRP_P1, 4, 1, RES_STATE_OFF), /* ? */
+ TWL4030_RESCONFIG(RES_VDD2, DEV_GRP_P1, 3, 1, RES_STATE_OFF), /* ? */
+ TWL4030_RESCONFIG(RES_REGEN, DEV_GRP_ALL, 2, 1, RES_STATE_SLEEP),
+ TWL4030_RESCONFIG(RES_NRES_PWRON, DEV_GRP_ALL, 0, 1, RES_STATE_SLEEP),
+ TWL4030_RESCONFIG(RES_CLKEN, DEV_GRP_ALL, 3, 2, RES_STATE_SLEEP),
+ TWL4030_RESCONFIG(RES_SYSEN, DEV_GRP_ALL, 6, 1, RES_STATE_SLEEP),
+ TWL4030_RESCONFIG(RES_HFCLKOUT, DEV_GRP_P3, 0, 2, RES_STATE_SLEEP), /* ? */
+ TWL4030_RESCONFIG(0, 0, 0, 0, 0),
+};
+
+/**
+ * Optimized 'Active to Sleep' sequence
+ */
+static struct twl4030_ins omap3beagle_sleep_seq[] __initdata = {
+ { MSG_SINGULAR(DEV_GRP_NULL, RES_HFCLKOUT, RES_STATE_SLEEP), 20},
+ { MSG_BROADCAST(DEV_GRP_NULL, RES_GRP_ALL, RES_TYPE_R0, RES_TYPE2_R1, RES_STATE_SLEEP), 2 },
+ { MSG_BROADCAST(DEV_GRP_NULL, RES_GRP_ALL, RES_TYPE_R0, RES_TYPE2_R2, RES_STATE_SLEEP), 2 },
+};
+
+static struct twl4030_script omap3beagle_sleep_script __initdata = {
+ .script = omap3beagle_sleep_seq,
+ .size = ARRAY_SIZE(omap3beagle_sleep_seq),
+ .flags = TWL4030_SLEEP_SCRIPT,
+};
+
+/**
+ * Optimized 'Sleep to Active (P12)' sequence
+ */
+static struct twl4030_ins omap3beagle_wake_p12_seq[] __initdata = {
+ { MSG_BROADCAST(DEV_GRP_NULL, RES_GRP_ALL, RES_TYPE_R0, RES_TYPE2_R1, RES_STATE_ACTIVE), 2 }
+};
+
+static struct twl4030_script omap3beagle_wake_p12_script __initdata = {
+ .script = omap3beagle_wake_p12_seq,
+ .size = ARRAY_SIZE(omap3beagle_wake_p12_seq),
+ .flags = TWL4030_WAKEUP12_SCRIPT,
+};
+
+/**
+ * Optimized 'Sleep to Active' (P3) sequence
+ */
+static struct twl4030_ins omap3beagle_wake_p3_seq[] __initdata = {
+ { MSG_BROADCAST(DEV_GRP_NULL, RES_GRP_ALL, RES_TYPE_R0, RES_TYPE2_R2, RES_STATE_ACTIVE), 2 }
+};
+
+static struct twl4030_script omap3beagle_wake_p3_script __initdata = {
+ .script = omap3beagle_wake_p3_seq,
+ .size = ARRAY_SIZE(omap3beagle_wake_p3_seq),
+ .flags = TWL4030_WAKEUP3_SCRIPT,
+};
+
+/**
+ * Optimized warm reset sequence (for less power surge)
+ */
+static struct twl4030_ins omap3beagle_wrst_seq[] __initdata = {
+ { MSG_SINGULAR(DEV_GRP_NULL, RES_RESET, RES_STATE_OFF), 0x2 },
+ { MSG_SINGULAR(DEV_GRP_NULL, RES_MAIN_REF, RES_STATE_WRST), 2 },
+ { MSG_BROADCAST(DEV_GRP_NULL, RES_GRP_ALL, RES_TYPE_R0, RES_TYPE2_R2, RES_STATE_WRST), 0x2},
+ { MSG_SINGULAR(DEV_GRP_NULL, RES_VUSB_3V1, RES_STATE_WRST), 0x2 },
+ { MSG_SINGULAR(DEV_GRP_NULL, RES_VPLL1, RES_STATE_WRST), 0x2 },
+ { MSG_SINGULAR(DEV_GRP_NULL, RES_VDD2, RES_STATE_WRST), 0x7 },
+ { MSG_SINGULAR(DEV_GRP_NULL, RES_VDD1, RES_STATE_WRST), 0x25 },
+ { MSG_BROADCAST(DEV_GRP_NULL, RES_GRP_RC, RES_TYPE_ALL, RES_TYPE2_R0, RES_STATE_WRST), 0x2 },
+ { MSG_SINGULAR(DEV_GRP_NULL, RES_RESET, RES_STATE_ACTIVE), 0x2 },
+
+};
+
+static struct twl4030_script omap3beagle_wrst_script __initdata = {
+ .script = omap3beagle_wrst_seq,
+ .size = ARRAY_SIZE(omap3beagle_wrst_seq),
+ .flags = TWL4030_WRST_SCRIPT,
+};
+
+static struct twl4030_script __initdata *board_twl4030_scripts[] = {
+ &omap3beagle_wake_p12_script,
+ &omap3beagle_wake_p3_script,
+ &omap3beagle_sleep_script,
+ &omap3beagle_wrst_script
+};
+
+static struct twl4030_power_data __initdata omap3beagle_script_data = {
+ .scripts = board_twl4030_scripts,
+ .num = ARRAY_SIZE(board_twl4030_scripts),
+ .resource_config = board_twl4030_rconfig,
+};
+
+static struct twl4030_codec_audio_data beagle_audio_data = {
+ .audio_mclk = 26000000,
+ .digimic_delay = 1,
+ .ramp_delay_value = 1,
+ .offset_cncl_path = 1,
+ .check_defaults = false,
+ .reset_registers = false,
+ .reset_registers = false,
+};
+
+static struct twl4030_codec_data beagle_codec_data = {
+ .audio_mclk = 26000000,
+ .audio = &beagle_audio_data,
+};
+
+static struct twl4030_platform_data beagle_twldata = {
+ .irq_base = TWL4030_IRQ_BASE,
+ .irq_end = TWL4030_IRQ_END,
+
+ /* platform_data for children goes here */
+ .usb = &beagle_usb_data,
+ .gpio = &beagle_gpio_data,
+ .codec = &beagle_codec_data,
+ .vmmc1 = &beagle_vmmc1,
+ .vsim = &beagle_vsim,
+ .vdac = &beagle_vdac,
+ .vpll2 = &beagle_vpll2,
+ .vaux3 = &beagle_vaux3,
+ .vaux4 = &beagle_vaux4,
+ .power = &omap3beagle_script_data,
+};
+
+static struct i2c_board_info __initdata beagle_i2c_boardinfo[] = {
+ {
+ I2C_BOARD_INFO("twl4030", 0x48),
+ .flags = I2C_CLIENT_WAKE,
+ .irq = INT_34XX_SYS_NIRQ,
+ .platform_data = &beagle_twldata,
+ },
+};
+
+static struct i2c_board_info __initdata beagle_i2c_eeprom[] = {
+ {
+ I2C_BOARD_INFO("eeprom", 0x50),
+ },
+};
+
+static int __init omap3_beagle_i2c_init(void)
+{
+ omap_register_i2c_bus(1, 2600, beagle_i2c_boardinfo,
+ ARRAY_SIZE(beagle_i2c_boardinfo));
+
+ /* Bus 2 is used for Camera/Sensor interface */
+ if (ARRAY_SIZE(bus2_i2c_devices))
+ omap_register_i2c_bus(2, 400, bus2_i2c_devices,
+ ARRAY_SIZE(bus2_i2c_devices));
+ else
+ omap_register_i2c_bus(2, 400, NULL, 0);
+
+ /* Bus 3 is attached to the DVI port where devices like the pico DLP
+ * projector don't work reliably with 400kHz */
+ omap_register_i2c_bus(3, 100, beagle_i2c_eeprom, ARRAY_SIZE(beagle_i2c_eeprom));
+
+ return 0;
+}
+
+static struct gpio_led gpio_leds[] = {
+ {
+ .name = "beagleboard::usr0",
+ .default_trigger = "heartbeat",
+ .gpio = 150,
+ },
+ {
+ .name = "beagleboard::usr1",
+ .default_trigger = "mmc0",
+ .gpio = 149,
+ },
+ {
+ .name = "beagleboard::pmu_stat",
+ .gpio = -EINVAL, /* gets replaced */
+ .active_low = true,
+ },
+};
+
+static struct gpio_led_platform_data gpio_led_info = {
+ .leds = gpio_leds,
+ .num_leds = ARRAY_SIZE(gpio_leds),
+};
+
+static struct platform_device leds_gpio = {
+ .name = "leds-gpio",
+ .id = -1,
+ .dev = {
+ .platform_data = &gpio_led_info,
+ },
+};
+
+static struct gpio_keys_button gpio_buttons[] = {
+ {
+ .code = KEY_POWER,
+ .gpio = 4,
+ .desc = "user",
+ .wakeup = 1,
+ },
+};
+
+static struct gpio_keys_platform_data gpio_key_info = {
+ .buttons = gpio_buttons,
+ .nbuttons = ARRAY_SIZE(gpio_buttons),
+};
+
+static struct platform_device keys_gpio = {
+ .name = "gpio-keys",
+ .id = -1,
+ .dev = {
+ .platform_data = &gpio_key_info,
+ },
+};
+
+static void __init omap3_beagle_init_irq(void)
+{
+ omap2_init_common_infrastructure();
+ omap2_init_common_devices(mt46h32m32lf6_sdrc_params,
+ mt46h32m32lf6_sdrc_params);
+ omap_init_irq();
+ gpmc_init();
+#ifdef CONFIG_OMAP_32K_TIMER
+ if (omap3_beagle_version == OMAP3BEAGLE_BOARD_AXBX)
+ omap2_gp_clockevent_set_gptimer(12);
+ else
+ omap2_gp_clockevent_set_gptimer(1);
+#endif
+}
+
+static struct platform_device *omap3_beagle_devices[] __initdata = {
+ &leds_gpio,
+ &keys_gpio,
+ &beagle_dss_device,
+ &usb_mass_storage_device,
+};
+
+static void __init omap3beagle_flash_init(void)
+{
+ u8 cs = 0;
+ u8 nandcs = GPMC_CS_NUM + 1;
+
+ /* find out the chip-select on which NAND exists */
+ while (cs < GPMC_CS_NUM) {
+ u32 ret = 0;
+ ret = gpmc_cs_read_reg(cs, GPMC_CS_CONFIG1);
+
+ if ((ret & 0xC00) == 0x800) {
+ printk(KERN_INFO "Found NAND on CS%d\n", cs);
+ if (nandcs > GPMC_CS_NUM)
+ nandcs = cs;
+ }
+ cs++;
+ }
+
+ if (nandcs > GPMC_CS_NUM) {
+ printk(KERN_INFO "NAND: Unable to find configuration "
+ "in GPMC\n ");
+ return;
+ }
+
+ if (nandcs < GPMC_CS_NUM) {
+ printk(KERN_INFO "Registering NAND on CS%d\n", nandcs);
+ board_nand_init(omap3beagle_nand_partitions,
+ ARRAY_SIZE(omap3beagle_nand_partitions),
+ nandcs, NAND_BUSWIDTH_16);
+ }
+}
+
+static const struct ehci_hcd_omap_platform_data ehci_pdata __initconst = {
+
+ .port_mode[0] = EHCI_HCD_OMAP_MODE_PHY,
+ .port_mode[1] = EHCI_HCD_OMAP_MODE_PHY,
+ .port_mode[2] = EHCI_HCD_OMAP_MODE_UNKNOWN,
+
+ .phy_reset = true,
+ .reset_gpio_port[0] = -EINVAL,
+ .reset_gpio_port[1] = 147,
+ .reset_gpio_port[2] = -EINVAL
+};
+
+#ifdef CONFIG_OMAP_MUX
+static struct omap_board_mux board_mux[] __initdata = {
+ OMAP3_MUX(SYS_NIRQ, OMAP_MUX_MODE0 | OMAP_PIN_INPUT_PULLUP |
+ OMAP_PIN_OFF_INPUT_PULLUP | OMAP_PIN_OFF_OUTPUT_LOW |
+ OMAP_PIN_OFF_WAKEUPENABLE),
+ { .reg_offset = OMAP_MUX_TERMINATOR },
+};
+#endif
+
+static struct omap_musb_board_data musb_board_data = {
+ .interface_type = MUSB_INTERFACE_ULPI,
+ .mode = MUSB_OTG,
+ .power = 100,
+};
+
+static void __init omap3_beagle_init(void)
+{
+ omap3_mux_init(board_mux, OMAP_PACKAGE_CBB);
+ omap3_beagle_init_rev();
+ omap3_beagle_i2c_init();
+ platform_add_devices(omap3_beagle_devices,
+ ARRAY_SIZE(omap3_beagle_devices));
+ omap_serial_init();
+
+ omap_mux_init_gpio(170, OMAP_PIN_INPUT);
+ gpio_request(170, "DVI_nPD");
+ /* REVISIT leave DVI powered down until it's needed ... */
+ gpio_direction_output(170, true);
+
+ usb_musb_init(&musb_board_data);
+ usb_ehci_init(&ehci_pdata);
+ omap3beagle_flash_init();
+
+ /* Ensure SDRC pins are mux'd for self-refresh */
+ omap_mux_init_signal("sdrc_cke0", OMAP_PIN_OUTPUT);
+ omap_mux_init_signal("sdrc_cke1", OMAP_PIN_OUTPUT);
+
+ beagle_display_init();
+#ifdef CONFIG_USB_ANDROID
+ omap3beagle_android_gadget_init();
+#endif
+ omap3_beagle_pm_init();
+}
+
+MACHINE_START(OMAP3_BEAGLE, "OMAP3 Beagle Board")
+ /* Maintainer: Syed Mohammed Khasim - http://beagleboard.org */
+ .boot_params = 0x80000100,
+ .map_io = omap3_map_io,
+ .reserve = omap_reserve,
+ .init_irq = omap3_beagle_init_irq,
+ .init_machine = omap3_beagle_init,
+ .timer = &omap_timer,
+MACHINE_END
diff --git a/kernel/arch/arm/mach-omap2/board-omap4panda.c b/kernel/arch/arm/mach-omap2/board-omap4panda.c
new file mode 100644
index 000000000000..4f8c79ddd650
--- /dev/null
+++ b/kernel/arch/arm/mach-omap2/board-omap4panda.c
@@ -0,0 +1,1053 @@
+/*
+ * Board support file for OMAP4430 based PandaBoard.
+ *
+ * Copyright (C) 2010 Texas Instruments
+ *
+ * Author: David Anders <x0132446@ti.com>
+ *
+ * Based on mach-omap2/board-4430sdp.c
+ *
+ * Author: Santosh Shilimkar <santosh.shilimkar@ti.com>
+ *
+ * Based on mach-omap2/board-3430sdp.c
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/input.h>
+#include <linux/io.h>
+#include <linux/leds.h>
+#include <linux/gpio.h>
+#include <linux/gpio_keys.h>
+#include <linux/omapfb.h>
+#include <linux/reboot.h>
+#include <linux/usb/otg.h>
+#include <linux/i2c/twl.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/fixed.h>
+#include <linux/wl12xx.h>
+#include <linux/memblock.h>
+#include <linux/skbuff.h>
+#include <linux/ti_wilink_st.h>
+#include <linux/platform_data/ram_console.h>
+
+#include <mach/hardware.h>
+#include <mach/omap4-common.h>
+#include <mach/emif.h>
+#include <mach/lpddr2-elpida.h>
+#include <mach/dmm.h>
+
+#include <asm/mach-types.h>
+#include <asm/mach/arch.h>
+#include <asm/mach/map.h>
+#include <video/omapdss.h>
+
+#include <plat/board.h>
+#include <plat/common.h>
+#include <plat/usb.h>
+#include <plat/mmc.h>
+#include <plat/remoteproc.h>
+#include <plat/vram.h>
+#include <video/omap-panel-generic-dpi.h>
+#include "timer-gp.h"
+
+#include "hsmmc.h"
+#include "control.h"
+#include "mux.h"
+#include "common-board-devices.h"
+#include "prm-regbits-44xx.h"
+#include "prm44xx.h"
+#include "pm.h"
+#include "resetreason.h"
+
+#ifdef CONFIG_TOUCHSCREEN_SYNAPTICS_I2C_RMI4
+#include <linux/input/synaptics_dsx.h>
+#define TM_SAMPLE1 (1) // 2D only
+#define TM_SAMPLE2 (2) // 2D + 0D x 2
+#define TM_SAMPLE3 (3) // 2D + 0D x 4
+#define SYNAPTICS_MODULE TM_SAMPLE1
+#endif
+
+#define PANDA_RAMCONSOLE_START (PLAT_PHYS_OFFSET + SZ_512M)
+#define PANDA_RAMCONSOLE_SIZE SZ_2M
+
+#define GPIO_HUB_POWER 1
+#define GPIO_HUB_NRESET 62
+#define GPIO_WIFI_PMENA 43
+#define GPIO_WIFI_IRQ 53
+#define HDMI_GPIO_CT_CP_HPD 60
+#define HDMI_GPIO_HPD 63 /* Hot plug pin for HDMI */
+#define HDMI_GPIO_LS_OE 41 /* Level shifter for HDMI */
+#define TPS62361_GPIO 7 /* VCORE1 power control */
+#define PANDA_BT_GPIO 46
+
+
+#define PHYS_ADDR_SMC_SIZE (SZ_1M * 3)
+#define PHYS_ADDR_SMC_MEM (0x80000000 + SZ_1G - PHYS_ADDR_SMC_SIZE)
+#define OMAP_ION_HEAP_SECURE_INPUT_SIZE (SZ_1M * 90)
+#define PHYS_ADDR_DUCATI_SIZE (SZ_1M * 105)
+#define PHYS_ADDR_DUCATI_MEM (PHYS_ADDR_SMC_MEM - PHYS_ADDR_DUCATI_SIZE - \
+ OMAP_ION_HEAP_SECURE_INPUT_SIZE)
+
+#define WILINK_UART_DEV_NAME "/dev/ttyO1"
+
+
+/* Synaptics changes for PandaBoard */
+#ifdef CONFIG_TOUCHSCREEN_SYNAPTICS_I2C_RMI4
+static int synaptics_gpio_setup(unsigned gpio, bool configure)
+{
+ int retval = 0;
+
+ if (configure) {
+ retval = gpio_request(gpio, "rmi4_attn");
+ if (retval) {
+ pr_err("%s: Failed to get attn gpio %d (code: %d)",
+ __func__, gpio, retval);
+ return retval;
+ }
+ omap_mux_init_signal("gpmc_ad15.gpio_39", OMAP_PIN_INPUT_PULLUP);
+
+ retval = gpio_direction_input(gpio);
+ if (retval) {
+ pr_err("%s: Failed to setup attn gpio %d (code: %d)",
+ __func__, gpio, retval);
+ gpio_free(gpio);
+ }
+ } else {
+ pr_warn("%s: No way to deconfigure gpio %d",
+ __func__, gpio);
+ }
+
+ return retval;
+}
+
+ #if (SYNAPTICS_MODULE == TM_SAMPLE1)
+#define TM_SAMPLE1_ADDR 0x20
+#define TM_SAMPLE1_ATTN 130
+
+static unsigned char TM_SAMPLE1_f1a_button_codes[] = {};
+
+static struct synaptics_rmi4_capacitance_button_map TM_SAMPLE1_capacitance_button_map = {
+ .nbuttons = ARRAY_SIZE(TM_SAMPLE1_f1a_button_codes),
+ .map = TM_SAMPLE1_f1a_button_codes,
+};
+
+static struct synaptics_rmi4_platform_data rmi4_platformdata = {
+ .irq_flags = IRQF_TRIGGER_FALLING,
+ .irq_gpio = TM_SAMPLE1_ATTN,
+ .gpio_config = synaptics_gpio_setup,
+ .capacitance_button_map = &TM_SAMPLE1_capacitance_button_map,
+};
+
+static struct i2c_board_info bus4_i2c_devices[] = {
+ {
+ I2C_BOARD_INFO("synaptics_rmi4_i2c", TM_SAMPLE1_ADDR),
+ .platform_data = &rmi4_platformdata,
+ },
+};
+
+#elif (SYNAPTICS_MODULE == TM_SAMPLE2)
+#define TM_SAMPLE2_ADDR 0x20
+#define TM_SAMPLE2_ATTN 130
+
+static unsigned char TM_SAMPLE2_f1a_button_codes[] = {KEY_MENU, KEY_BACK};
+
+static struct synaptics_rmi4_capacitance_button_map TM_SAMPLE2_capacitance_button_map = {
+ .nbuttons = ARRAY_SIZE(TM_SAMPLE2_f1a_button_codes),
+ .map = TM_SAMPLE2_f1a_button_codes,
+};
+
+static struct synaptics_rmi4_platform_data rmi4_platformdata = {
+ .irq_flags = IRQF_TRIGGER_FALLING,
+ .irq_gpio = TM_SAMPLE2_ATTN,
+ .gpio_config = synaptics_gpio_setup,
+ .capacitance_button_map = &TM_SAMPLE2_capacitance_button_map,
+};
+
+static struct i2c_board_info bus4_i2c_devices[] = {
+ {
+ I2C_BOARD_INFO("synaptics_rmi4_i2c", TM_SAMPLE2_ADDR),
+ .platform_data = &rmi4_platformdata,
+ },
+};
+};
+
+#elif (SYNAPTICS_MODULE == TM_SAMPLE3)
+#define TM_SAMPLE3_ADDR 0x20
+#define TM_SAMPLE3_ATTN 130
+
+static unsigned char TM_SAMPLE3_f1a_button_codes[] = {KEY_MENU, KEY_HOME,KEY_BACK,KEY_SEARCH};
+
+static struct synaptics_rmi4_capacitance_button_map TM_SAMPLE3_capacitance_button_map = {
+ .nbuttons = ARRAY_SIZE(TM_SAMPLE3_f1a_button_codes),
+ .map = TM_SAMPLE3_f1a_button_codes,
+};
+
+static struct synaptics_rmi4_platform_data rmi4_platformdata = {
+ .irq_flags = IRQF_TRIGGER_FALLING,
+ .irq_gpio = TM_SAMPLE3_ATTN,
+ .gpio_config = synaptics_gpio_setup,
+ .capacitance_button_map = &TM_SAMPLE3_capacitance_button_map,
+};
+
+static struct i2c_board_info bus4_i2c_devices[] = {
+ {
+ I2C_BOARD_INFO("synaptics_rmi4_i2c", TM_SAMPLE3_ADDR),
+ .platform_data = &rmi4_platformdata,
+ },
+};
+#endif
+
+void __init i2c_device_setup(void)
+{
+ pr_info(">>>>I2C device setup");
+ if (ARRAY_SIZE(bus4_i2c_devices)) {
+ i2c_register_board_info(4, bus4_i2c_devices,
+ ARRAY_SIZE(bus4_i2c_devices));
+ }
+}
+#endif
+/* End of Synaptics changes for PandaBoard */
+
+static struct gpio_led gpio_leds[] = {
+ {
+ .name = "pandaboard::status1",
+ .default_trigger = "heartbeat",
+ .gpio = 7,
+ },
+ {
+ .name = "pandaboard::status2",
+ .default_trigger = "mmc0",
+ .gpio = 8,
+ },
+};
+
+static struct gpio_led_platform_data gpio_led_info = {
+ .leds = gpio_leds,
+ .num_leds = ARRAY_SIZE(gpio_leds),
+};
+
+static struct platform_device leds_gpio = {
+ .name = "leds-gpio",
+ .id = -1,
+ .dev = {
+ .platform_data = &gpio_led_info,
+ },
+};
+
+/* GPIO_KEY for the panda */
+static struct gpio_keys_button panda_gpio_keys_buttons[] = {
+ [0] = {
+ .code = KEY_HOME,
+ .gpio = 113,
+ .desc = "user_button",
+ .active_low = 1,
+ .debounce_interval = 5,
+ },
+};
+
+static struct gpio_keys_platform_data panda_gpio_keys = {
+ .buttons = panda_gpio_keys_buttons,
+ .nbuttons = ARRAY_SIZE(panda_gpio_keys_buttons),
+ .rep = 0,
+};
+
+static struct platform_device panda_gpio_keys_device = {
+ .name = "gpio-keys",
+ .id = -1,
+ .dev = {
+ .platform_data = &panda_gpio_keys,
+ },
+};
+
+/* TODO: handle suspend/resume here.
+ * Upon every suspend, make sure the wilink chip is
+ * capable enough to wake-up the OMAP host.
+ */
+static int plat_wlink_kim_suspend(struct platform_device *pdev, pm_message_t
+ state)
+{
+ return 0;
+}
+
+static int plat_wlink_kim_resume(struct platform_device *pdev)
+{
+ return 0;
+}
+
+/* wl128x BT, FM, GPS connectivity chip */
+static struct ti_st_plat_data wilink_pdata = {
+ .nshutdown_gpio = PANDA_BT_GPIO,
+ .dev_name = WILINK_UART_DEV_NAME,
+ .flow_cntrl = 1,
+ .baud_rate = 3686400,
+ .suspend = plat_wlink_kim_suspend,
+ .resume = plat_wlink_kim_resume,
+};
+
+static struct platform_device btwilink_device = {
+ .name = "btwilink",
+ .id = -1,
+};
+
+/* wl127x BT, FM, GPS connectivity chip */
+static struct platform_device wl1271_device = {
+ .name = "kim",
+ .id = -1,
+ .dev.platform_data = &wilink_pdata,
+};
+
+
+static struct platform_device *panda_devices[] __initdata = {
+ &leds_gpio,
+ &wl1271_device,
+ &btwilink_device,
+ &panda_gpio_keys_device,
+};
+
+static void __init omap4_panda_init_early(void)
+{
+ omap2_init_common_infrastructure();
+ omap2_init_common_devices(NULL, NULL);
+}
+
+static const struct usbhs_omap_board_data usbhs_bdata __initconst = {
+ .port_mode[0] = OMAP_EHCI_PORT_MODE_PHY,
+ .port_mode[1] = OMAP_USBHS_PORT_MODE_UNUSED,
+ .port_mode[2] = OMAP_USBHS_PORT_MODE_UNUSED,
+ .phy_reset = false,
+ .reset_gpio_port[0] = -EINVAL,
+ .reset_gpio_port[1] = -EINVAL,
+ .reset_gpio_port[2] = -EINVAL
+};
+
+static struct gpio panda_ehci_gpios[] __initdata = {
+ { GPIO_HUB_POWER, GPIOF_OUT_INIT_LOW, "hub_power" },
+ { GPIO_HUB_NRESET, GPIOF_OUT_INIT_LOW, "hub_nreset" },
+};
+
+static void __init omap4_ehci_init(void)
+{
+ int ret;
+ struct clk *phy_ref_clk;
+
+ /* FREF_CLK3 provides the 19.2 MHz reference clock to the PHY */
+ phy_ref_clk = clk_get(NULL, "auxclk3_ck");
+ if (IS_ERR(phy_ref_clk)) {
+ pr_err("Cannot request auxclk3\n");
+ return;
+ }
+ clk_set_rate(phy_ref_clk, 19200000);
+ clk_enable(phy_ref_clk);
+
+ /* disable the power to the usb hub prior to init and reset phy+hub */
+ ret = gpio_request_array(panda_ehci_gpios,
+ ARRAY_SIZE(panda_ehci_gpios));
+ if (ret) {
+ pr_err("Unable to initialize EHCI power/reset\n");
+ return;
+ }
+
+ gpio_export(GPIO_HUB_POWER, 0);
+ gpio_export(GPIO_HUB_NRESET, 0);
+ gpio_set_value(GPIO_HUB_NRESET, 1);
+
+ usbhs_init(&usbhs_bdata);
+
+ /* enable power to hub */
+ gpio_set_value(GPIO_HUB_POWER, 1);
+}
+
+static struct omap_musb_board_data musb_board_data = {
+ .interface_type = MUSB_INTERFACE_UTMI,
+#ifdef CONFIG_USB_GADGET_MUSB_HDRC
+ .mode = MUSB_PERIPHERAL,
+#else
+ .mode = MUSB_OTG,
+#endif
+ .power = 100,
+};
+
+static struct twl4030_usb_data omap4_usbphy_data = {
+ .phy_init = omap4430_phy_init,
+ .phy_exit = omap4430_phy_exit,
+ .phy_power = omap4430_phy_power,
+ .phy_set_clock = omap4430_phy_set_clk,
+ .phy_suspend = omap4430_phy_suspend,
+};
+
+static struct omap2_hsmmc_info mmc[] = {
+ {
+ .mmc = 1,
+ .caps = MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA,
+ .gpio_wp = -EINVAL,
+ .gpio_cd = -EINVAL,
+ },
+ {
+ .name = "wl1271",
+ .mmc = 5,
+ .caps = MMC_CAP_4_BIT_DATA | MMC_CAP_POWER_OFF_CARD,
+ .gpio_wp = -EINVAL,
+ .gpio_cd = -EINVAL,
+ .ocr_mask = MMC_VDD_165_195,
+ .nonremovable = true,
+ },
+ {} /* Terminator */
+};
+
+static struct regulator_consumer_supply omap4_panda_vmmc_supply[] = {
+ {
+ .supply = "vmmc",
+ .dev_name = "omap_hsmmc.0",
+ },
+};
+
+static struct regulator_consumer_supply omap4_panda_vmmc5_supply = {
+ .supply = "vmmc",
+ .dev_name = "omap_hsmmc.4",
+};
+
+static struct regulator_init_data panda_vmmc5 = {
+ .constraints = {
+ .valid_ops_mask = REGULATOR_CHANGE_STATUS,
+ },
+ .num_consumer_supplies = 1,
+ .consumer_supplies = &omap4_panda_vmmc5_supply,
+};
+
+static struct fixed_voltage_config panda_vwlan = {
+ .supply_name = "vwl1271",
+ .microvolts = 1800000, /* 1.8V */
+ .gpio = GPIO_WIFI_PMENA,
+ .startup_delay = 70000, /* 70msec */
+ .enable_high = 1,
+ .enabled_at_boot = 0,
+ .init_data = &panda_vmmc5,
+};
+
+static struct platform_device omap_vwlan_device = {
+ .name = "reg-fixed-voltage",
+ .id = 1,
+ .dev = {
+ .platform_data = &panda_vwlan,
+ },
+};
+
+struct wl12xx_platform_data omap_panda_wlan_data __initdata = {
+ .irq = OMAP_GPIO_IRQ(GPIO_WIFI_IRQ),
+ /* PANDA ref clock is 38.4 MHz */
+ .board_ref_clock = 2,
+};
+
+static int omap4_twl6030_hsmmc_late_init(struct device *dev)
+{
+ int ret = 0;
+ struct platform_device *pdev = container_of(dev,
+ struct platform_device, dev);
+ struct omap_mmc_platform_data *pdata = dev->platform_data;
+
+ if (!pdata) {
+ dev_err(dev, "%s: NULL platform data\n", __func__);
+ return -EINVAL;
+ }
+ /* Setting MMC1 Card detect Irq */
+ if (pdev->id == 0) {
+ ret = twl6030_mmc_card_detect_config();
+ if (ret)
+ dev_err(dev, "%s: Error card detect config(%d)\n",
+ __func__, ret);
+ else
+ pdata->slots[0].card_detect = twl6030_mmc_card_detect;
+ }
+ return ret;
+}
+
+static __init void omap4_twl6030_hsmmc_set_late_init(struct device *dev)
+{
+ struct omap_mmc_platform_data *pdata;
+
+ /* dev can be null if CONFIG_MMC_OMAP_HS is not set */
+ if (!dev) {
+ pr_err("Failed omap4_twl6030_hsmmc_set_late_init\n");
+ return;
+ }
+ pdata = dev->platform_data;
+
+ pdata->init = omap4_twl6030_hsmmc_late_init;
+}
+
+static int __init omap4_twl6030_hsmmc_init(struct omap2_hsmmc_info *controllers)
+{
+ struct omap2_hsmmc_info *c;
+
+ omap2_hsmmc_init(controllers);
+ for (c = controllers; c->mmc; c++)
+ omap4_twl6030_hsmmc_set_late_init(c->dev);
+
+ return 0;
+}
+
+static struct regulator_init_data omap4_panda_vaux2 = {
+ .constraints = {
+ .min_uV = 1200000,
+ .max_uV = 2800000,
+ .apply_uV = true,
+ .valid_modes_mask = REGULATOR_MODE_NORMAL
+ | REGULATOR_MODE_STANDBY,
+ .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE
+ | REGULATOR_CHANGE_MODE
+ | REGULATOR_CHANGE_STATUS,
+ },
+};
+
+static struct regulator_init_data omap4_panda_vaux3 = {
+ .constraints = {
+ .min_uV = 1000000,
+ .max_uV = 3000000,
+ .apply_uV = true,
+ .valid_modes_mask = REGULATOR_MODE_NORMAL
+ | REGULATOR_MODE_STANDBY,
+ .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE
+ | REGULATOR_CHANGE_MODE
+ | REGULATOR_CHANGE_STATUS,
+ },
+};
+
+/* VMMC1 for MMC1 card */
+static struct regulator_init_data omap4_panda_vmmc = {
+ .constraints = {
+ .min_uV = 1200000,
+ .max_uV = 3000000,
+ .apply_uV = true,
+ .valid_modes_mask = REGULATOR_MODE_NORMAL
+ | REGULATOR_MODE_STANDBY,
+ .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE
+ | REGULATOR_CHANGE_MODE
+ | REGULATOR_CHANGE_STATUS,
+ },
+ .num_consumer_supplies = 1,
+ .consumer_supplies = omap4_panda_vmmc_supply,
+};
+
+static struct regulator_init_data omap4_panda_vpp = {
+ .constraints = {
+ .min_uV = 1800000,
+ .max_uV = 2500000,
+ .apply_uV = true,
+ .valid_modes_mask = REGULATOR_MODE_NORMAL
+ | REGULATOR_MODE_STANDBY,
+ .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE
+ | REGULATOR_CHANGE_MODE
+ | REGULATOR_CHANGE_STATUS,
+ },
+};
+
+static struct regulator_init_data omap4_panda_vana = {
+ .constraints = {
+ .min_uV = 2100000,
+ .max_uV = 2100000,
+ .valid_modes_mask = REGULATOR_MODE_NORMAL
+ | REGULATOR_MODE_STANDBY,
+ .valid_ops_mask = REGULATOR_CHANGE_MODE
+ | REGULATOR_CHANGE_STATUS,
+ },
+};
+
+static struct regulator_init_data omap4_panda_vcxio = {
+ .constraints = {
+ .min_uV = 1800000,
+ .max_uV = 1800000,
+ .valid_modes_mask = REGULATOR_MODE_NORMAL
+ | REGULATOR_MODE_STANDBY,
+ .valid_ops_mask = REGULATOR_CHANGE_MODE
+ | REGULATOR_CHANGE_STATUS,
+ },
+};
+
+static struct regulator_consumer_supply panda_vdac_supply[] = {
+ {
+ .supply = "hdmi_vref",
+ },
+};
+
+static struct regulator_init_data omap4_panda_vdac = {
+ .constraints = {
+ .min_uV = 1800000,
+ .max_uV = 1800000,
+ .valid_modes_mask = REGULATOR_MODE_NORMAL
+ | REGULATOR_MODE_STANDBY,
+ .valid_ops_mask = REGULATOR_CHANGE_MODE
+ | REGULATOR_CHANGE_STATUS,
+ },
+ .num_consumer_supplies = ARRAY_SIZE(panda_vdac_supply),
+ .consumer_supplies = panda_vdac_supply,
+};
+
+static struct regulator_init_data omap4_panda_vusb = {
+ .constraints = {
+ .min_uV = 3300000,
+ .max_uV = 3300000,
+ .apply_uV = true,
+ .valid_modes_mask = REGULATOR_MODE_NORMAL
+ | REGULATOR_MODE_STANDBY,
+ .valid_ops_mask = REGULATOR_CHANGE_MODE
+ | REGULATOR_CHANGE_STATUS,
+ },
+};
+
+static struct regulator_init_data omap4_panda_clk32kg = {
+ .constraints = {
+ .valid_ops_mask = REGULATOR_CHANGE_STATUS,
+ .always_on = true,
+ },
+};
+
+static void omap4_audio_conf(void)
+{
+ /* twl6040 naudint */
+ omap_mux_init_signal("sys_nirq2.sys_nirq2", \
+ OMAP_PIN_INPUT_PULLUP);
+}
+
+static struct twl4030_codec_audio_data twl6040_audio = {
+ /* single-step ramp for headset and handsfree */
+ .hs_left_step = 0x0f,
+ .hs_right_step = 0x0f,
+ .hf_left_step = 0x1d,
+ .hf_right_step = 0x1d,
+ .hs_switch_dev = 0x1,
+ .hs_forced_hs_state = 0x1
+};
+
+static struct twl4030_codec_data twl6040_codec = {
+ .audio = &twl6040_audio,
+ .audpwron_gpio = 127,
+ .naudint_irq = OMAP44XX_IRQ_SYS_2N,
+ .irq_base = TWL6040_CODEC_IRQ_BASE,
+};
+
+static struct twl4030_platform_data omap4_panda_twldata = {
+ .irq_base = TWL6030_IRQ_BASE,
+ .irq_end = TWL6030_IRQ_END,
+
+ /* Regulators */
+ .vmmc = &omap4_panda_vmmc,
+ .vpp = &omap4_panda_vpp,
+ .vana = &omap4_panda_vana,
+ .vcxio = &omap4_panda_vcxio,
+ .vdac = &omap4_panda_vdac,
+ .vusb = &omap4_panda_vusb,
+ .vaux2 = &omap4_panda_vaux2,
+ .vaux3 = &omap4_panda_vaux3,
+ .clk32kg = &omap4_panda_clk32kg,
+ .usb = &omap4_usbphy_data,
+
+ /* children */
+ .codec = &twl6040_codec,
+};
+
+/*
+ * Display monitor features are burnt in their EEPROM as EDID data. The EEPROM
+ * is connected as I2C slave device, and can be accessed at address 0x50
+ */
+static struct i2c_board_info __initdata panda_i2c_eeprom[] = {
+ {
+ I2C_BOARD_INFO("eeprom", 0x50),
+ },
+};
+
+static int __init omap4_panda_i2c_init(void)
+{
+ omap4_pmic_init("twl6030", &omap4_panda_twldata);
+ omap_register_i2c_bus(2, 400, NULL, 0);
+ /*
+ * Bus 3 is attached to the DVI port where devices like the pico DLP
+ * projector don't work reliably with 400kHz
+ */
+ omap_register_i2c_bus(3, 100, panda_i2c_eeprom,
+ ARRAY_SIZE(panda_i2c_eeprom));
+ if(ARRAY_SIZE(bus4_i2c_devices))
+ omap_register_i2c_bus(4, 400, bus4_i2c_devices, ARRAY_SIZE(bus4_i2c_devices));
+ else
+ omap_register_i2c_bus(4, 400, NULL, 0);
+ return 0;
+}
+
+#ifdef CONFIG_OMAP_MUX
+static struct omap_board_mux board_mux[] __initdata = {
+ /* WLAN IRQ - GPIO 53 */
+ OMAP4_MUX(GPMC_NCS3, OMAP_MUX_MODE3 | OMAP_PIN_INPUT),
+ /* WLAN POWER ENABLE - GPIO 43 */
+ OMAP4_MUX(GPMC_A19, OMAP_MUX_MODE3 | OMAP_PIN_OUTPUT),
+ /* WLAN SDIO: MMC5 CMD */
+ OMAP4_MUX(SDMMC5_CMD, OMAP_MUX_MODE0 | OMAP_PIN_INPUT_PULLUP),
+ /* WLAN SDIO: MMC5 CLK */
+ OMAP4_MUX(SDMMC5_CLK, OMAP_MUX_MODE0 | OMAP_PIN_INPUT_PULLUP),
+ /* WLAN SDIO: MMC5 DAT[0-3] */
+ OMAP4_MUX(SDMMC5_DAT0, OMAP_MUX_MODE0 | OMAP_PIN_INPUT_PULLUP),
+ OMAP4_MUX(SDMMC5_DAT1, OMAP_MUX_MODE0 | OMAP_PIN_INPUT_PULLUP),
+ OMAP4_MUX(SDMMC5_DAT2, OMAP_MUX_MODE0 | OMAP_PIN_INPUT_PULLUP),
+ OMAP4_MUX(SDMMC5_DAT3, OMAP_MUX_MODE0 | OMAP_PIN_INPUT_PULLUP),
+ /* gpio 0 - TFP410 PD */
+ OMAP4_MUX(KPD_COL1, OMAP_PIN_OUTPUT | OMAP_MUX_MODE3),
+ /* dispc2_data23 */
+ OMAP4_MUX(USBB2_ULPITLL_STP, OMAP_PIN_OUTPUT | OMAP_MUX_MODE5),
+ /* dispc2_data22 */
+ OMAP4_MUX(USBB2_ULPITLL_DIR, OMAP_PIN_OUTPUT | OMAP_MUX_MODE5),
+ /* dispc2_data21 */
+ OMAP4_MUX(USBB2_ULPITLL_NXT, OMAP_PIN_OUTPUT | OMAP_MUX_MODE5),
+ /* dispc2_data20 */
+ OMAP4_MUX(USBB2_ULPITLL_DAT0, OMAP_PIN_OUTPUT | OMAP_MUX_MODE5),
+ /* dispc2_data19 */
+ OMAP4_MUX(USBB2_ULPITLL_DAT1, OMAP_PIN_OUTPUT | OMAP_MUX_MODE5),
+ /* dispc2_data18 */
+ OMAP4_MUX(USBB2_ULPITLL_DAT2, OMAP_PIN_OUTPUT | OMAP_MUX_MODE5),
+ /* dispc2_data15 */
+ OMAP4_MUX(USBB2_ULPITLL_DAT3, OMAP_PIN_OUTPUT | OMAP_MUX_MODE5),
+ /* dispc2_data14 */
+ OMAP4_MUX(USBB2_ULPITLL_DAT4, OMAP_PIN_OUTPUT | OMAP_MUX_MODE5),
+ /* dispc2_data13 */
+ OMAP4_MUX(USBB2_ULPITLL_DAT5, OMAP_PIN_OUTPUT | OMAP_MUX_MODE5),
+ /* dispc2_data12 */
+ OMAP4_MUX(USBB2_ULPITLL_DAT6, OMAP_PIN_OUTPUT | OMAP_MUX_MODE5),
+ /* dispc2_data11 */
+ OMAP4_MUX(USBB2_ULPITLL_DAT7, OMAP_PIN_OUTPUT | OMAP_MUX_MODE5),
+ /* dispc2_data10 */
+ OMAP4_MUX(DPM_EMU3, OMAP_PIN_OUTPUT | OMAP_MUX_MODE5),
+ /* dispc2_data9 */
+ OMAP4_MUX(DPM_EMU4, OMAP_PIN_OUTPUT | OMAP_MUX_MODE5),
+ /* dispc2_data16 */
+ OMAP4_MUX(DPM_EMU5, OMAP_PIN_OUTPUT | OMAP_MUX_MODE5),
+ /* dispc2_data17 */
+ OMAP4_MUX(DPM_EMU6, OMAP_PIN_OUTPUT | OMAP_MUX_MODE5),
+ /* dispc2_hsync */
+ OMAP4_MUX(DPM_EMU7, OMAP_PIN_OUTPUT | OMAP_MUX_MODE5),
+ /* dispc2_pclk */
+ OMAP4_MUX(DPM_EMU8, OMAP_PIN_OUTPUT | OMAP_MUX_MODE5),
+ /* dispc2_vsync */
+ OMAP4_MUX(DPM_EMU9, OMAP_PIN_OUTPUT | OMAP_MUX_MODE5),
+ /* dispc2_de */
+ OMAP4_MUX(DPM_EMU10, OMAP_PIN_OUTPUT | OMAP_MUX_MODE5),
+ /* dispc2_data8 */
+ OMAP4_MUX(DPM_EMU11, OMAP_PIN_OUTPUT | OMAP_MUX_MODE5),
+ /* dispc2_data7 */
+ OMAP4_MUX(DPM_EMU12, OMAP_PIN_OUTPUT | OMAP_MUX_MODE5),
+ /* dispc2_data6 */
+ OMAP4_MUX(DPM_EMU13, OMAP_PIN_OUTPUT | OMAP_MUX_MODE5),
+ /* dispc2_data5 */
+ OMAP4_MUX(DPM_EMU14, OMAP_PIN_OUTPUT | OMAP_MUX_MODE5),
+ /* dispc2_data4 */
+ OMAP4_MUX(DPM_EMU15, OMAP_PIN_OUTPUT | OMAP_MUX_MODE5),
+ /* dispc2_data3 */
+ OMAP4_MUX(DPM_EMU16, OMAP_PIN_OUTPUT | OMAP_MUX_MODE5),
+ /* dispc2_data2 */
+ OMAP4_MUX(DPM_EMU17, OMAP_PIN_OUTPUT | OMAP_MUX_MODE5),
+ /* dispc2_data1 */
+ OMAP4_MUX(DPM_EMU18, OMAP_PIN_OUTPUT | OMAP_MUX_MODE5),
+ /* dispc2_data0 */
+ OMAP4_MUX(DPM_EMU19, OMAP_PIN_OUTPUT | OMAP_MUX_MODE5),
+ { .reg_offset = OMAP_MUX_TERMINATOR },
+};
+
+static inline void __init board_serial_init(void)
+{
+ omap_serial_init();
+}
+#else
+#define board_mux NULL
+
+static inline void __init board_serial_init(void)
+{
+ omap_serial_init();
+}
+#endif
+
+/* Display DVI */
+#define PANDA_DVI_TFP410_POWER_DOWN_GPIO 0
+
+static int omap4_panda_enable_dvi(struct omap_dss_device *dssdev)
+{
+ gpio_set_value(dssdev->reset_gpio, 1);
+ return 0;
+}
+
+static void omap4_panda_disable_dvi(struct omap_dss_device *dssdev)
+{
+ gpio_set_value(dssdev->reset_gpio, 0);
+}
+
+/* Using generic display panel */
+static struct panel_generic_dpi_data omap4_dvi_panel = {
+ .name = "generic_720p",
+ .platform_enable = omap4_panda_enable_dvi,
+ .platform_disable = omap4_panda_disable_dvi,
+};
+
+struct omap_dss_device omap4_panda_dvi_device = {
+ .type = OMAP_DISPLAY_TYPE_DPI,
+ .name = "dvi",
+ .driver_name = "generic_dpi_panel",
+ .data = &omap4_dvi_panel,
+ .phy.dpi.data_lines = 24,
+ .reset_gpio = PANDA_DVI_TFP410_POWER_DOWN_GPIO,
+ .channel = OMAP_DSS_CHANNEL_LCD2,
+};
+
+int __init omap4_panda_dvi_init(void)
+{
+ int r;
+
+ /* Requesting TFP410 DVI GPIO and disabling it, at bootup */
+ r = gpio_request_one(omap4_panda_dvi_device.reset_gpio,
+ GPIOF_OUT_INIT_LOW, "DVI PD");
+ if (r)
+ pr_err("Failed to get DVI powerdown GPIO\n");
+
+ return r;
+}
+
+static struct gpio panda_hdmi_gpios[] = {
+ { HDMI_GPIO_CT_CP_HPD, GPIOF_OUT_INIT_HIGH, "hdmi_gpio_hpd" },
+ { HDMI_GPIO_LS_OE, GPIOF_OUT_INIT_HIGH, "hdmi_gpio_ls_oe" },
+};
+
+static void omap4_panda_hdmi_mux_init(void)
+{
+ u32 r;
+ int status;
+ /* PAD0_HDMI_HPD_PAD1_HDMI_CEC */
+ omap_mux_init_signal("hdmi_hpd.hdmi_hpd",
+ OMAP_PIN_INPUT_PULLUP);
+ omap_mux_init_signal("gpmc_wait2.gpio_100",
+ OMAP_PIN_INPUT_PULLDOWN);
+ omap_mux_init_signal("hdmi_cec.hdmi_cec",
+ OMAP_PIN_INPUT_PULLUP);
+ /* PAD0_HDMI_DDC_SCL_PAD1_HDMI_DDC_SDA */
+ omap_mux_init_signal("hdmi_ddc_scl.hdmi_ddc_scl",
+ OMAP_PIN_INPUT_PULLUP);
+ omap_mux_init_signal("hdmi_ddc_sda.hdmi_ddc_sda",
+ OMAP_PIN_INPUT_PULLUP);
+
+ /* strong pullup on DDC lines using unpublished register */
+ r = ((1 << 24) | (1 << 28)) ;
+ omap4_ctrl_pad_writel(r, OMAP4_CTRL_MODULE_PAD_CORE_CONTROL_I2C_1);
+
+ gpio_request(HDMI_GPIO_HPD, NULL);
+ omap_mux_init_gpio(HDMI_GPIO_HPD, OMAP_PIN_INPUT | OMAP_PULL_ENA);
+ gpio_direction_input(HDMI_GPIO_HPD);
+
+ status = gpio_request_array(panda_hdmi_gpios,
+ ARRAY_SIZE(panda_hdmi_gpios));
+ if (status)
+ pr_err("%s: Cannot request HDMI GPIOs %x \n", __func__, status);
+}
+
+static struct omap_dss_device omap4_panda_hdmi_device = {
+ .name = "hdmi",
+ .driver_name = "hdmi_panel",
+ .type = OMAP_DISPLAY_TYPE_HDMI,
+ .clocks = {
+ .dispc = {
+ .dispc_fclk_src = OMAP_DSS_CLK_SRC_FCK,
+ },
+ .hdmi = {
+ .regn = 15,
+ .regm2 = 1,
+ },
+ },
+ .hpd_gpio = HDMI_GPIO_HPD,
+ .channel = OMAP_DSS_CHANNEL_DIGIT,
+};
+
+static struct omap_dss_device *omap4_panda_dss_devices[] = {
+ &omap4_panda_dvi_device,
+ &omap4_panda_hdmi_device,
+};
+
+static struct omap_dss_board_info omap4_panda_dss_data = {
+ .num_devices = ARRAY_SIZE(omap4_panda_dss_devices),
+ .devices = omap4_panda_dss_devices,
+ .default_device = &omap4_panda_dvi_device,
+};
+
+/*
+ * LPDDR2 Configeration Data:
+ * The memory organisation is as below :
+ * EMIF1 - CS0 - 2 Gb
+ * CS1 - 2 Gb
+ * EMIF2 - CS0 - 2 Gb
+ * CS1 - 2 Gb
+ * --------------------
+ * TOTAL - 8 Gb
+ *
+ * Same devices installed on EMIF1 and EMIF2
+ */
+static __initdata struct emif_device_details emif_devices = {
+ .cs0_device = &lpddr2_elpida_2G_S4_dev,
+ .cs1_device = &lpddr2_elpida_2G_S4_dev
+};
+
+void omap4_panda_display_init(void)
+{
+ int r;
+
+ r = omap4_panda_dvi_init();
+ if (r)
+ pr_err("error initializing panda DVI\n");
+
+ omap4_panda_hdmi_mux_init();
+ omap_display_init(&omap4_panda_dss_data);
+}
+
+static int panda_notifier_call(struct notifier_block *this,
+ unsigned long code, void *cmd)
+{
+ void __iomem *sar_base;
+ u32 v = OMAP4430_RST_GLOBAL_COLD_SW_MASK;
+
+ sar_base = omap4_get_sar_ram_base();
+
+ if (!sar_base)
+ return notifier_from_errno(-ENOMEM);
+
+ if ((code == SYS_RESTART) && (cmd != NULL)) {
+ /* cmd != null; case: warm boot */
+ if (!strcmp(cmd, "bootloader")) {
+ /* Save reboot mode in scratch memory */
+ strcpy(sar_base + 0xA0C, cmd);
+ v |= OMAP4430_RST_GLOBAL_WARM_SW_MASK;
+ } else if (!strcmp(cmd, "recovery")) {
+ /* Save reboot mode in scratch memory */
+ strcpy(sar_base + 0xA0C, cmd);
+ v |= OMAP4430_RST_GLOBAL_WARM_SW_MASK;
+ } else {
+ v |= OMAP4430_RST_GLOBAL_COLD_SW_MASK;
+ }
+ }
+
+ omap4_prm_write_inst_reg(0xfff, OMAP4430_PRM_DEVICE_INST,
+ OMAP4_RM_RSTST);
+ omap4_prm_write_inst_reg(v, OMAP4430_PRM_DEVICE_INST, OMAP4_RM_RSTCTRL);
+ v = omap4_prm_read_inst_reg(WKUP_MOD, OMAP4_RM_RSTCTRL);
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block panda_reboot_notifier = {
+ .notifier_call = panda_notifier_call,
+};
+
+#define PANDA_FB_RAM_SIZE SZ_16M /* 1920?1080*4 * 2 */
+static struct omapfb_platform_data panda_fb_pdata = {
+ .mem_desc = {
+ .region_cnt = 1,
+ .region = {
+ [0] = {
+ .size = PANDA_FB_RAM_SIZE,
+ },
+ },
+ },
+};
+
+static struct resource ramconsole_resources[] = {
+ {
+ .flags = IORESOURCE_MEM,
+ .start = PANDA_RAMCONSOLE_START,
+ .end = PANDA_RAMCONSOLE_START + PANDA_RAMCONSOLE_SIZE - 1,
+ },
+};
+
+static struct ram_console_platform_data ramconsole_pdata;
+
+static struct platform_device ramconsole_device = {
+ .name = "ram_console",
+ .id = -1,
+ .num_resources = ARRAY_SIZE(ramconsole_resources),
+ .resource = ramconsole_resources,
+ .dev = {
+ .platform_data = &ramconsole_pdata,
+ },
+};
+
+extern void __init omap4_panda_android_init(void);
+
+static void __init omap4_panda_init(void)
+{
+ int package = OMAP_PACKAGE_CBS;
+ int status;
+
+ omap_emif_setup_device_details(&emif_devices, &emif_devices);
+
+ if (omap_rev() == OMAP4430_REV_ES1_0)
+ package = OMAP_PACKAGE_CBL;
+ omap4_mux_init(board_mux, NULL, package);
+
+ if (wl12xx_set_platform_data(&omap_panda_wlan_data))
+ pr_err("error setting wl12xx data\n");
+
+ register_reboot_notifier(&panda_reboot_notifier);
+ ramconsole_pdata.bootinfo = omap4_get_resetreason();
+ platform_device_register(&ramconsole_device);
+ omap4_panda_i2c_init();
+ omap4_audio_conf();
+
+ if (cpu_is_omap4430())
+ panda_gpio_keys_buttons[0].gpio = 121;
+
+ platform_add_devices(panda_devices, ARRAY_SIZE(panda_devices));
+ platform_device_register(&omap_vwlan_device);
+ board_serial_init();
+ omap4_twl6030_hsmmc_init(mmc);
+ omap4_ehci_init();
+ usb_musb_init(&musb_board_data);
+
+ omap_dmm_init();
+ omap_vram_set_sdram_vram(PANDA_FB_RAM_SIZE, 0);
+ omapfb_set_platform_data(&panda_fb_pdata);
+ omap4_panda_display_init();
+
+ if (cpu_is_omap446x()) {
+ /* Vsel0 = gpio, vsel1 = gnd */
+ status = omap_tps6236x_board_setup(true, TPS62361_GPIO, -1,
+ OMAP_PIN_OFF_OUTPUT_HIGH, -1);
+ if (status)
+ pr_err("TPS62361 initialization failed: %d\n", status);
+ }
+ omap_enable_smartreflex_on_init();
+}
+
+static void __init omap4_panda_map_io(void)
+{
+ omap2_set_globals_443x();
+ omap44xx_map_common_io();
+}
+
+static void __init omap4_panda_reserve(void)
+{
+ /* do the static reservations first */
+ memblock_remove(PANDA_RAMCONSOLE_START, PANDA_RAMCONSOLE_SIZE);
+ memblock_remove(PHYS_ADDR_SMC_MEM, PHYS_ADDR_SMC_SIZE);
+ memblock_remove(PHYS_ADDR_DUCATI_MEM, PHYS_ADDR_DUCATI_SIZE);
+ /* ipu needs to recognize secure input buffer area as well */
+ omap_ipu_set_static_mempool(PHYS_ADDR_DUCATI_MEM, PHYS_ADDR_DUCATI_SIZE +
+ OMAP_ION_HEAP_SECURE_INPUT_SIZE);
+
+ omap_reserve();
+}
+
+MACHINE_START(OMAP4_PANDA, "OMAP4 Panda board")
+ /* Maintainer: David Anders - Texas Instruments Inc */
+ .boot_params = 0x80000100,
+ .reserve = omap4_panda_reserve,
+ .map_io = omap4_panda_map_io,
+ .init_early = omap4_panda_init_early,
+ .init_irq = gic_init_irq,
+ .init_machine = omap4_panda_init,
+ .timer = &omap_timer,
+MACHINE_END
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index f0bc36879e1d..e8d71110ed2a 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -2006,7 +2006,7 @@ static struct dentry *cgroup_mount(struct file_system_type *fs_type,
{
struct super_block *pinned_sb = NULL;
struct cgroup_subsys *ss;
- struct cgroup_root *root;
+ struct cgroup_root *root = NULL;
struct cgroup_sb_opts opts;
struct dentry *dentry;
int ret;
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 37731292f8a1..1cfd381642da 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -365,21 +365,6 @@ static int _cpu_down(unsigned int cpu, int tasks_frozen)
goto out_release;
}
- /*
- * By now we've cleared cpu_active_mask, wait for all preempt-disabled
- * and RCU users of this state to go away such that all new such users
- * will observe it.
- *
- * For CONFIG_PREEMPT we have preemptible RCU and its sync_rcu() might
- * not imply sync_sched(), so wait for both.
- *
- * Do sync before park smpboot threads to take care the rcu boost case.
- */
- if (IS_ENABLED(CONFIG_PREEMPT))
- synchronize_rcu_mult(call_rcu, call_rcu_sched);
- else
- synchronize_rcu();
-
smpboot_park_threads(cpu);
/*
diff --git a/kernel/cpu_pm.c b/kernel/cpu_pm.c
index 009cc9a17d95..774bfe7a2893 100644
--- a/kernel/cpu_pm.c
+++ b/kernel/cpu_pm.c
@@ -22,14 +22,17 @@
#include <linux/spinlock.h>
#include <linux/syscore_ops.h>
+bool from_suspend = false;
+
static DEFINE_RWLOCK(cpu_pm_notifier_lock);
static RAW_NOTIFIER_HEAD(cpu_pm_notifier_chain);
-static int cpu_pm_notify(enum cpu_pm_event event, int nr_to_call, int *nr_calls)
+static int cpu_pm_notify(enum cpu_pm_event event, int nr_to_call, int *nr_calls,
+ void *data)
{
int ret;
- ret = __raw_notifier_call_chain(&cpu_pm_notifier_chain, event, NULL,
+ ret = __raw_notifier_call_chain(&cpu_pm_notifier_chain, event, data,
nr_to_call, nr_calls);
return notifier_to_errno(ret);
@@ -101,13 +104,13 @@ int cpu_pm_enter(void)
int ret = 0;
read_lock(&cpu_pm_notifier_lock);
- ret = cpu_pm_notify(CPU_PM_ENTER, -1, &nr_calls);
+ ret = cpu_pm_notify(CPU_PM_ENTER, -1, &nr_calls, NULL);
if (ret)
/*
* Inform listeners (nr_calls - 1) about failure of CPU PM
* PM entry who are notified earlier to prepare for it.
*/
- cpu_pm_notify(CPU_PM_ENTER_FAILED, nr_calls - 1, NULL);
+ cpu_pm_notify(CPU_PM_ENTER_FAILED, nr_calls - 1, NULL, NULL);
read_unlock(&cpu_pm_notifier_lock);
return ret;
@@ -131,7 +134,7 @@ int cpu_pm_exit(void)
int ret;
read_lock(&cpu_pm_notifier_lock);
- ret = cpu_pm_notify(CPU_PM_EXIT, -1, NULL);
+ ret = cpu_pm_notify(CPU_PM_EXIT, -1, NULL, NULL);
read_unlock(&cpu_pm_notifier_lock);
return ret;
@@ -154,19 +157,21 @@ EXPORT_SYMBOL_GPL(cpu_pm_exit);
*
* Return conditions are same as __raw_notifier_call_chain.
*/
-int cpu_cluster_pm_enter(void)
+int cpu_cluster_pm_enter(unsigned long aff_level)
{
int nr_calls;
int ret = 0;
read_lock(&cpu_pm_notifier_lock);
- ret = cpu_pm_notify(CPU_CLUSTER_PM_ENTER, -1, &nr_calls);
+ ret = cpu_pm_notify(CPU_CLUSTER_PM_ENTER, -1, &nr_calls,
+ (void *) aff_level);
if (ret)
/*
* Inform listeners (nr_calls - 1) about failure of CPU cluster
* PM entry who are notified earlier to prepare for it.
*/
- cpu_pm_notify(CPU_CLUSTER_PM_ENTER_FAILED, nr_calls - 1, NULL);
+ cpu_pm_notify(CPU_CLUSTER_PM_ENTER_FAILED, nr_calls - 1, NULL,
+ (void *) aff_level);
read_unlock(&cpu_pm_notifier_lock);
return ret;
@@ -188,12 +193,12 @@ EXPORT_SYMBOL_GPL(cpu_cluster_pm_enter);
*
* Return conditions are same as __raw_notifier_call_chain.
*/
-int cpu_cluster_pm_exit(void)
+int cpu_cluster_pm_exit(unsigned long aff_level)
{
int ret;
read_lock(&cpu_pm_notifier_lock);
- ret = cpu_pm_notify(CPU_CLUSTER_PM_EXIT, -1, NULL);
+ ret = cpu_pm_notify(CPU_CLUSTER_PM_EXIT, -1, NULL, (void *) aff_level);
read_unlock(&cpu_pm_notifier_lock);
return ret;
@@ -205,17 +210,19 @@ static int cpu_pm_suspend(void)
{
int ret;
+ from_suspend = true;
ret = cpu_pm_enter();
if (ret)
return ret;
- ret = cpu_cluster_pm_enter();
+ ret = cpu_cluster_pm_enter(0);
return ret;
}
static void cpu_pm_resume(void)
{
- cpu_cluster_pm_exit();
+ from_suspend = false;
+ cpu_cluster_pm_exit(0);
cpu_pm_exit();
}
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index 2ade632197d5..69d25607d41b 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -99,6 +99,7 @@ struct cpuset {
/* user-configured CPUs and Memory Nodes allow to tasks */
cpumask_var_t cpus_allowed;
+ cpumask_var_t cpus_requested; /* CPUS requested, but not used because of hotplug */
nodemask_t mems_allowed;
/* effective CPUs and Memory Nodes allow to tasks */
@@ -387,7 +388,7 @@ static void cpuset_update_task_spread_flag(struct cpuset *cs,
static int is_cpuset_subset(const struct cpuset *p, const struct cpuset *q)
{
- return cpumask_subset(p->cpus_allowed, q->cpus_allowed) &&
+ return cpumask_subset(p->cpus_requested, q->cpus_requested) &&
nodes_subset(p->mems_allowed, q->mems_allowed) &&
is_cpu_exclusive(p) <= is_cpu_exclusive(q) &&
is_mem_exclusive(p) <= is_mem_exclusive(q);
@@ -487,7 +488,7 @@ static int validate_change(struct cpuset *cur, struct cpuset *trial)
cpuset_for_each_child(c, css, par) {
if ((is_cpu_exclusive(trial) || is_cpu_exclusive(c)) &&
c != cur &&
- cpumask_intersects(trial->cpus_allowed, c->cpus_allowed))
+ cpumask_intersects(trial->cpus_requested, c->cpus_requested))
goto out;
if ((is_mem_exclusive(trial) || is_mem_exclusive(c)) &&
c != cur &&
@@ -946,17 +947,18 @@ static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs,
if (!*buf) {
cpumask_clear(trialcs->cpus_allowed);
} else {
- retval = cpulist_parse(buf, trialcs->cpus_allowed);
+ retval = cpulist_parse(buf, trialcs->cpus_requested);
if (retval < 0)
return retval;
- if (!cpumask_subset(trialcs->cpus_allowed,
- top_cpuset.cpus_allowed))
+ if (!cpumask_subset(trialcs->cpus_requested, cpu_present_mask))
return -EINVAL;
+
+ cpumask_and(trialcs->cpus_allowed, trialcs->cpus_requested, cpu_active_mask);
}
/* Nothing to do if the cpus didn't change */
- if (cpumask_equal(cs->cpus_allowed, trialcs->cpus_allowed))
+ if (cpumask_equal(cs->cpus_requested, trialcs->cpus_requested))
return 0;
retval = validate_change(cs, trialcs);
@@ -965,6 +967,7 @@ static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs,
spin_lock_irq(&callback_lock);
cpumask_copy(cs->cpus_allowed, trialcs->cpus_allowed);
+ cpumask_copy(cs->cpus_requested, trialcs->cpus_requested);
spin_unlock_irq(&callback_lock);
/* use trialcs->cpus_allowed as a temp variable */
@@ -1755,7 +1758,7 @@ static int cpuset_common_seq_show(struct seq_file *sf, void *v)
switch (type) {
case FILE_CPULIST:
- seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->cpus_allowed));
+ seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->cpus_requested));
break;
case FILE_MEMLIST:
seq_printf(sf, "%*pbl\n", nodemask_pr_args(&cs->mems_allowed));
@@ -1943,12 +1946,15 @@ cpuset_css_alloc(struct cgroup_subsys_state *parent_css)
if (!cs)
return ERR_PTR(-ENOMEM);
if (!alloc_cpumask_var(&cs->cpus_allowed, GFP_KERNEL))
- goto free_cs;
+ goto error_allowed;
if (!alloc_cpumask_var(&cs->effective_cpus, GFP_KERNEL))
- goto free_cpus;
+ goto error_effective;
+ if (!alloc_cpumask_var(&cs->cpus_requested, GFP_KERNEL))
+ goto error_requested;
set_bit(CS_SCHED_LOAD_BALANCE, &cs->flags);
cpumask_clear(cs->cpus_allowed);
+ cpumask_clear(cs->cpus_requested);
nodes_clear(cs->mems_allowed);
cpumask_clear(cs->effective_cpus);
nodes_clear(cs->effective_mems);
@@ -1957,9 +1963,11 @@ cpuset_css_alloc(struct cgroup_subsys_state *parent_css)
return &cs->css;
-free_cpus:
+error_requested:
+ free_cpumask_var(cs->effective_cpus);
+error_effective:
free_cpumask_var(cs->cpus_allowed);
-free_cs:
+error_allowed:
kfree(cs);
return ERR_PTR(-ENOMEM);
}
@@ -2020,6 +2028,7 @@ static int cpuset_css_online(struct cgroup_subsys_state *css)
cs->mems_allowed = parent->mems_allowed;
cs->effective_mems = parent->mems_allowed;
cpumask_copy(cs->cpus_allowed, parent->cpus_allowed);
+ cpumask_copy(cs->cpus_requested, parent->cpus_requested);
cpumask_copy(cs->effective_cpus, parent->cpus_allowed);
spin_unlock_irq(&callback_lock);
out_unlock:
@@ -2054,6 +2063,7 @@ static void cpuset_css_free(struct cgroup_subsys_state *css)
free_cpumask_var(cs->effective_cpus);
free_cpumask_var(cs->cpus_allowed);
+ free_cpumask_var(cs->cpus_requested);
kfree(cs);
}
@@ -2075,12 +2085,30 @@ static void cpuset_bind(struct cgroup_subsys_state *root_css)
mutex_unlock(&cpuset_mutex);
}
+static int cpuset_allow_attach(struct cgroup_taskset *tset)
+{
+ const struct cred *cred = current_cred(), *tcred;
+ struct task_struct *task;
+ struct cgroup_subsys_state *css;
+
+ cgroup_taskset_for_each(task, css, tset) {
+ tcred = __task_cred(task);
+
+ if ((current != task) && !capable(CAP_SYS_ADMIN) &&
+ cred->euid.val != tcred->uid.val && cred->euid.val != tcred->suid.val)
+ return -EACCES;
+ }
+
+ return 0;
+}
+
struct cgroup_subsys cpuset_cgrp_subsys = {
.css_alloc = cpuset_css_alloc,
.css_online = cpuset_css_online,
.css_offline = cpuset_css_offline,
.css_free = cpuset_css_free,
.can_attach = cpuset_can_attach,
+ .allow_attach = cpuset_allow_attach,
.cancel_attach = cpuset_cancel_attach,
.attach = cpuset_attach,
.bind = cpuset_bind,
@@ -2102,8 +2130,11 @@ int __init cpuset_init(void)
BUG();
if (!alloc_cpumask_var(&top_cpuset.effective_cpus, GFP_KERNEL))
BUG();
+ if (!alloc_cpumask_var(&top_cpuset.cpus_requested, GFP_KERNEL))
+ BUG();
cpumask_setall(top_cpuset.cpus_allowed);
+ cpumask_setall(top_cpuset.cpus_requested);
nodes_setall(top_cpuset.mems_allowed);
cpumask_setall(top_cpuset.effective_cpus);
nodes_setall(top_cpuset.effective_mems);
@@ -2237,7 +2268,8 @@ retry:
goto retry;
}
- cpumask_and(&new_cpus, cs->cpus_allowed, parent_cs(cs)->effective_cpus);
+ cpumask_and(&new_cpus, cs->cpus_requested,
+ parent_cs(cs)->effective_cpus);
nodes_and(new_mems, cs->mems_allowed, parent_cs(cs)->effective_mems);
cpus_updated = !cpumask_equal(&new_cpus, cs->effective_cpus);
diff --git a/kernel/drivers/input/touchscreen/Kconfig b/kernel/drivers/input/touchscreen/Kconfig
new file mode 100644
index 000000000000..18655c0b3997
--- /dev/null
+++ b/kernel/drivers/input/touchscreen/Kconfig
@@ -0,0 +1,721 @@
+#
+# Touchscreen driver configuration
+#
+menuconfig INPUT_TOUCHSCREEN
+ bool "Touchscreens"
+ help
+ Say Y here, and a list of supported touchscreens will be displayed.
+ This option doesn't affect the kernel.
+
+ If unsure, say Y.
+
+if INPUT_TOUCHSCREEN
+
+config TOUCHSCREEN_88PM860X
+ tristate "Marvell 88PM860x touchscreen"
+ depends on MFD_88PM860X
+ help
+ Say Y here if you have a 88PM860x PMIC and want to enable
+ support for the built-in touchscreen.
+
+ If unsure, say N.
+
+ To compile this driver as a module, choose M here: the
+ module will be called 88pm860x-ts.
+
+config TOUCHSCREEN_ADS7846
+ tristate "ADS7846/TSC2046/AD7873 and AD(S)7843 based touchscreens"
+ depends on SPI_MASTER
+ depends on HWMON = n || HWMON
+ help
+ Say Y here if you have a touchscreen interface using the
+ ADS7846/TSC2046/AD7873 or ADS7843/AD7843 controller,
+ and your board-specific setup code includes that in its
+ table of SPI devices.
+
+ If HWMON is selected, and the driver is told the reference voltage
+ on your board, you will also get hwmon interfaces for the voltage
+ (and on ads7846/tsc2046/ad7873, temperature) sensors of this chip.
+
+ If unsure, say N (but it's safe to say "Y").
+
+ To compile this driver as a module, choose M here: the
+ module will be called ads7846.
+
+config TOUCHSCREEN_AD7877
+ tristate "AD7877 based touchscreens"
+ depends on SPI_MASTER
+ help
+ Say Y here if you have a touchscreen interface using the
+ AD7877 controller, and your board-specific initialization
+ code includes that in its table of SPI devices.
+
+ If unsure, say N (but it's safe to say "Y").
+
+ To compile this driver as a module, choose M here: the
+ module will be called ad7877.
+
+config TOUCHSCREEN_AD7879
+ tristate "Analog Devices AD7879-1/AD7889-1 touchscreen interface"
+ help
+ Say Y here if you want to support a touchscreen interface using
+ the AD7879-1/AD7889-1 controller.
+
+ You should select a bus connection too.
+
+ To compile this driver as a module, choose M here: the
+ module will be called ad7879.
+
+config TOUCHSCREEN_AD7879_I2C
+ tristate "support I2C bus connection"
+ depends on TOUCHSCREEN_AD7879 && I2C
+ help
+ Say Y here if you have AD7879-1/AD7889-1 hooked to an I2C bus.
+
+ To compile this driver as a module, choose M here: the
+ module will be called ad7879-i2c.
+
+config TOUCHSCREEN_AD7879_SPI
+ tristate "support SPI bus connection"
+ depends on TOUCHSCREEN_AD7879 && SPI_MASTER
+ help
+ Say Y here if you have AD7879-1/AD7889-1 hooked to a SPI bus.
+
+ If unsure, say N (but it's safe to say "Y").
+
+ To compile this driver as a module, choose M here: the
+ module will be called ad7879-spi.
+
+config TOUCHSCREEN_BITSY
+ tristate "Compaq iPAQ H3600 (Bitsy) touchscreen"
+ depends on SA1100_BITSY
+ select SERIO
+ help
+ Say Y here if you have the h3600 (Bitsy) touchscreen.
+
+ If unsure, say N.
+
+ To compile this driver as a module, choose M here: the
+ module will be called h3600_ts_input.
+
+config TOUCHSCREEN_BU21013
+ tristate "BU21013 based touch panel controllers"
+ depends on I2C
+ help
+ Say Y here if you have a bu21013 touchscreen connected to
+ your system.
+
+ If unsure, say N.
+
+ To compile this driver as a module, choose M here: the
+ module will be called bu21013_ts.
+
+config TOUCHSCREEN_CY8CTMG110
+ tristate "cy8ctmg110 touchscreen"
+ depends on I2C
+ depends on GPIOLIB
+
+ help
+ Say Y here if you have a cy8ctmg110 capacitive touchscreen on
+ an AAVA device.
+
+ If unsure, say N.
+
+ To compile this driver as a module, choose M here: the
+ module will be called cy8ctmg110_ts.
+
+config TOUCHSCREEN_DA9034
+ tristate "Touchscreen support for Dialog Semiconductor DA9034"
+ depends on PMIC_DA903X
+ default y
+ help
+ Say Y here to enable the support for the touchscreen found
+ on Dialog Semiconductor DA9034 PMIC.
+
+config TOUCHSCREEN_DYNAPRO
+ tristate "Dynapro serial touchscreen"
+ select SERIO
+ help
+ Say Y here if you have a Dynapro serial touchscreen connected to
+ your system.
+
+ If unsure, say N.
+
+ To compile this driver as a module, choose M here: the
+ module will be called dynapro.
+
+config TOUCHSCREEN_HAMPSHIRE
+ tristate "Hampshire serial touchscreen"
+ select SERIO
+ help
+ Say Y here if you have a Hampshire serial touchscreen connected to
+ your system.
+
+ If unsure, say N.
+
+ To compile this driver as a module, choose M here: the
+ module will be called hampshire.
+
+config TOUCHSCREEN_EETI
+ tristate "EETI touchscreen panel support"
+ depends on I2C
+ help
+ Say Y here to enable support for I2C connected EETI touch panels.
+
+ To compile this driver as a module, choose M here: the
+ module will be called eeti_ts.
+
+config TOUCHSCREEN_FUJITSU
+ tristate "Fujitsu serial touchscreen"
+ select SERIO
+ help
+ Say Y here if you have the Fujitsu touchscreen (such as one
+ installed in Lifebook P series laptop) connected to your
+ system.
+
+ If unsure, say N.
+
+ To compile this driver as a module, choose M here: the
+ module will be called fujitsu-ts.
+
+config TOUCHSCREEN_S3C2410
+ tristate "Samsung S3C2410/generic touchscreen input driver"
+ depends on ARCH_S3C2410 || SAMSUNG_DEV_TS
+ select S3C_ADC
+ help
+ Say Y here if you have the s3c2410 touchscreen.
+
+ If unsure, say N.
+
+ To compile this driver as a module, choose M here: the
+ module will be called s3c2410_ts.
+
+config TOUCHSCREEN_GUNZE
+ tristate "Gunze AHL-51S touchscreen"
+ select SERIO
+ help
+ Say Y here if you have the Gunze AHL-51 touchscreen connected to
+ your system.
+
+ If unsure, say N.
+
+ To compile this driver as a module, choose M here: the
+ module will be called gunze.
+
+config TOUCHSCREEN_ELO
+ tristate "Elo serial touchscreens"
+ select SERIO
+ help
+ Say Y here if you have an Elo serial touchscreen connected to
+ your system.
+
+ If unsure, say N.
+
+ To compile this driver as a module, choose M here: the
+ module will be called elo.
+
+config TOUCHSCREEN_WACOM_W8001
+ tristate "Wacom W8001 penabled serial touchscreen"
+ select SERIO
+ help
+ Say Y here if you have an Wacom W8001 penabled serial touchscreen
+ connected to your system.
+
+ If unsure, say N.
+
+ To compile this driver as a module, choose M here: the
+ module will be called wacom_w8001.
+
+config TOUCHSCREEN_LPC32XX
+ tristate "LPC32XX touchscreen controller"
+ depends on ARCH_LPC32XX
+ help
+ Say Y here if you have a LPC32XX device and want
+ to support the built-in touchscreen.
+
+ To compile this driver as a module, choose M here: the
+ module will be called lpc32xx_ts.
+
+config TOUCHSCREEN_MCS5000
+ tristate "MELFAS MCS-5000 touchscreen"
+ depends on I2C
+ help
+ Say Y here if you have the MELFAS MCS-5000 touchscreen controller
+ chip in your system.
+
+ If unsure, say N.
+
+ To compile this driver as a module, choose M here: the
+ module will be called mcs5000_ts.
+
+config TOUCHSCREEN_MTOUCH
+ tristate "MicroTouch serial touchscreens"
+ select SERIO
+ help
+ Say Y here if you have a MicroTouch (3M) serial touchscreen connected to
+ your system.
+
+ If unsure, say N.
+
+ To compile this driver as a module, choose M here: the
+ module will be called mtouch.
+
+config TOUCHSCREEN_INEXIO
+ tristate "iNexio serial touchscreens"
+ select SERIO
+ help
+ Say Y here if you have an iNexio serial touchscreen connected to
+ your system.
+
+ If unsure, say N.
+
+ To compile this driver as a module, choose M here: the
+ module will be called inexio.
+
+config TOUCHSCREEN_INTEL_MID
+ tristate "Intel MID platform resistive touchscreen"
+ depends on INTEL_SCU_IPC
+ help
+ Say Y here if you have a Intel MID based touchscreen in
+ your system.
+
+ If unsure, say N.
+
+ To compile this driver as a module, choose M here: the
+ module will be called intel_mid_touch.
+
+config TOUCHSCREEN_MK712
+ tristate "ICS MicroClock MK712 touchscreen"
+ help
+ Say Y here if you have the ICS MicroClock MK712 touchscreen
+ controller chip in your system.
+
+ If unsure, say N.
+
+ To compile this driver as a module, choose M here: the
+ module will be called mk712.
+
+config TOUCHSCREEN_HP600
+ tristate "HP Jornada 6xx touchscreen"
+ depends on SH_HP6XX && SH_ADC
+ help
+ Say Y here if you have a HP Jornada 620/660/680/690 and want to
+ support the built-in touchscreen.
+
+ To compile this driver as a module, choose M here: the
+ module will be called hp680_ts_input.
+
+config TOUCHSCREEN_HP7XX
+ tristate "HP Jornada 7xx touchscreen"
+ depends on SA1100_JORNADA720_SSP
+ help
+ Say Y here if you have a HP Jornada 710/720/728 and want
+ to support the built-in touchscreen.
+
+ To compile this driver as a module, choose M here: the
+ module will be called jornada720_ts.
+
+config TOUCHSCREEN_HTCPEN
+ tristate "HTC Shift X9500 touchscreen"
+ depends on ISA
+ help
+ Say Y here if you have an HTC Shift UMPC also known as HTC X9500
+ Clio / Shangrila and want to support the built-in touchscreen.
+
+ If unsure, say N.
+
+ To compile this driver as a module, choose M here: the
+ module will be called htcpen.
+
+config TOUCHSCREEN_PENMOUNT
+ tristate "Penmount serial touchscreen"
+ select SERIO
+ help
+ Say Y here if you have a Penmount serial touchscreen connected to
+ your system.
+
+ If unsure, say N.
+
+ To compile this driver as a module, choose M here: the
+ module will be called penmount.
+
+config TOUCHSCREEN_QT602240
+ tristate "QT602240 I2C Touchscreen"
+ depends on I2C
+ help
+ Say Y here if you have the AT42QT602240/ATMXT224 I2C touchscreen
+ connected to your system.
+
+ If unsure, say N.
+
+ To compile this driver as a module, choose M here: the
+ module will be called qt602240_ts.
+
+config TOUCHSCREEN_MIGOR
+ tristate "Renesas MIGO-R touchscreen"
+ depends on SH_MIGOR && I2C
+ help
+ Say Y here to enable MIGO-R touchscreen support.
+
+ If unsure, say N.
+
+ To compile this driver as a module, choose M here: the
+ module will be called migor_ts.
+
+config TOUCHSCREEN_TNETV107X
+ tristate "TI TNETV107X touchscreen support"
+ depends on ARCH_DAVINCI_TNETV107X
+ help
+ Say Y here if you want to use the TNETV107X touchscreen.
+
+ To compile this driver as a module, choose M here: the
+ module will be called tnetv107x-ts.
+
+config TOUCHSCREEN_SYNAPTICS_I2C_RMI4
+ tristate "Synaptics DSX I2C touchscreen"
+ depends on I2C
+ help
+ Say Y here if you have a Synaptics DSX I2C touchscreen
+ connected to your system.
+
+ If unsure, say N.
+
+ To compile this driver as a module, choose M here: the
+ module will be called synaptics_i2c_rmi4.
+
+config TOUCHSCREEN_SYNAPTICS_DSX_RMI4_DEV
+ tristate "Synaptics I2C touchscreen rmi device"
+ depends on TOUCHSCREEN_SYNAPTICS_I2C_RMI4
+ help
+ This enables support for character device channel for Synaptics RMI
+ touchscreens.
+
+config TOUCHSCREEN_SYNAPTICS_DSX_FW_UPDATE
+ tristate "Synaptics I2C touchscreen firmware update"
+ depends on TOUCHSCREEN_SYNAPTICS_I2C_RMI4
+ help
+ This enables support for firmware update for Synaptics RMI
+ touchscreens.
+
+config TOUCHSCREEN_TOUCHRIGHT
+ tristate "Touchright serial touchscreen"
+ select SERIO
+ help
+ Say Y here if you have a Touchright serial touchscreen connected to
+ your system.
+
+ If unsure, say N.
+
+ To compile this driver as a module, choose M here: the
+ module will be called touchright.
+
+config TOUCHSCREEN_TOUCHWIN
+ tristate "Touchwin serial touchscreen"
+ select SERIO
+ help
+ Say Y here if you have a Touchwin serial touchscreen connected to
+ your system.
+
+ If unsure, say N.
+
+ To compile this driver as a module, choose M here: the
+ module will be called touchwin.
+
+config TOUCHSCREEN_ATMEL_TSADCC
+ tristate "Atmel Touchscreen Interface"
+ depends on ARCH_AT91SAM9RL || ARCH_AT91SAM9G45
+ help
+ Say Y here if you have a 4-wire touchscreen connected to the
+ ADC Controller on your Atmel SoC (such as the AT91SAM9RL).
+
+ If unsure, say N.
+
+ To compile this driver as a module, choose M here: the
+ module will be called atmel_tsadcc.
+
+config TOUCHSCREEN_UCB1400
+ tristate "Philips UCB1400 touchscreen"
+ depends on AC97_BUS
+ depends on UCB1400_CORE
+ help
+ This enables support for the Philips UCB1400 touchscreen interface.
+ The UCB1400 is an AC97 audio codec. The touchscreen interface
+ will be initialized only after the ALSA subsystem has been
+ brought up and the UCB1400 detected. You therefore have to
+ configure ALSA support as well (either built-in or modular,
+ independently of whether this driver is itself built-in or
+ modular) for this driver to work.
+
+ To compile this driver as a module, choose M here: the
+ module will be called ucb1400_ts.
+
+config TOUCHSCREEN_WM97XX
+ tristate "Support for WM97xx AC97 touchscreen controllers"
+ depends on AC97_BUS
+ help
+ Say Y here if you have a Wolfson Microelectronics WM97xx
+ touchscreen connected to your system. Note that this option
+ only enables core driver, you will also need to select
+ support for appropriate chip below.
+
+ If unsure, say N.
+
+ To compile this driver as a module, choose M here: the
+ module will be called wm97xx-ts.
+
+config TOUCHSCREEN_WM9705
+ bool "WM9705 Touchscreen interface support"
+ depends on TOUCHSCREEN_WM97XX
+ default y
+ help
+ Say Y here to enable support for the Wolfson Microelectronics
+ WM9705 touchscreen controller.
+
+config TOUCHSCREEN_WM9712
+ bool "WM9712 Touchscreen interface support"
+ depends on TOUCHSCREEN_WM97XX
+ default y
+ help
+ Say Y here to enable support for the Wolfson Microelectronics
+ WM9712 touchscreen controller.
+
+config TOUCHSCREEN_WM9713
+ bool "WM9713 Touchscreen interface support"
+ depends on TOUCHSCREEN_WM97XX
+ default y
+ help
+ Say Y here to enable support for the Wolfson Microelectronics
+ WM9713 touchscreen controller.
+
+config TOUCHSCREEN_WM97XX_ATMEL
+ tristate "WM97xx Atmel accelerated touch"
+ depends on TOUCHSCREEN_WM97XX && (AVR32 || ARCH_AT91)
+ help
+ Say Y here for support for streaming mode with WM97xx touchscreens
+ on Atmel AT91 or AVR32 systems with an AC97C module.
+
+ Be aware that this will use channel B in the controller for
+ streaming data, this must not conflict with other AC97C drivers.
+
+ If unsure, say N.
+
+ To compile this driver as a module, choose M here: the module will
+ be called atmel-wm97xx.
+
+config TOUCHSCREEN_WM97XX_MAINSTONE
+ tristate "WM97xx Mainstone/Palm accelerated touch"
+ depends on TOUCHSCREEN_WM97XX && ARCH_PXA
+ help
+ Say Y here for support for streaming mode with WM97xx touchscreens
+ on Mainstone, Palm Tungsten T5, TX and LifeDrive systems.
+
+ If unsure, say N.
+
+ To compile this driver as a module, choose M here: the
+ module will be called mainstone-wm97xx.
+
+config TOUCHSCREEN_WM97XX_ZYLONITE
+ tristate "Zylonite accelerated touch"
+ depends on TOUCHSCREEN_WM97XX && MACH_ZYLONITE
+ select TOUCHSCREEN_WM9713
+ help
+ Say Y here for support for streaming mode with the touchscreen
+ on Zylonite systems.
+
+ If unsure, say N.
+
+ To compile this driver as a module, choose M here: the
+ module will be called zylonite-wm97xx.
+
+config TOUCHSCREEN_USB_COMPOSITE
+ tristate "USB Touchscreen Driver"
+ depends on USB_ARCH_HAS_HCD
+ select USB
+ help
+ USB Touchscreen driver for:
+ - eGalax Touchkit USB (also includes eTurboTouch CT-410/510/700)
+ - PanJit TouchSet USB
+ - 3M MicroTouch USB (EX II series)
+ - ITM
+ - some other eTurboTouch
+ - Gunze AHL61
+ - DMC TSC-10/25
+ - IRTOUCHSYSTEMS/UNITOP
+ - IdealTEK URTC1000
+ - GoTop Super_Q2/GogoPen/PenPower tablets
+ - JASTEC USB Touch Controller/DigiTech DTR-02U
+ - Zytronic controllers
+
+ Have a look at <http://linux.chapter7.ch/touchkit/> for
+ a usage description and the required user-space stuff.
+
+ To compile this driver as a module, choose M here: the
+ module will be called usbtouchscreen.
+
+config TOUCHSCREEN_MC13783
+ tristate "Freescale MC13783 touchscreen input driver"
+ depends on MFD_MC13783
+ help
+ Say Y here if you have an Freescale MC13783 PMIC on your
+ board and want to use its touchscreen
+
+ If unsure, say N.
+
+ To compile this driver as a module, choose M here: the
+ module will be called mc13783_ts.
+
+config TOUCHSCREEN_USB_EGALAX
+ default y
+ bool "eGalax, eTurboTouch CT-410/510/700 device support" if EMBEDDED
+ depends on TOUCHSCREEN_USB_COMPOSITE
+
+config TOUCHSCREEN_USB_PANJIT
+ default y
+ bool "PanJit device support" if EMBEDDED
+ depends on TOUCHSCREEN_USB_COMPOSITE
+
+config TOUCHSCREEN_USB_3M
+ default y
+ bool "3M/Microtouch EX II series device support" if EMBEDDED
+ depends on TOUCHSCREEN_USB_COMPOSITE
+
+config TOUCHSCREEN_USB_ITM
+ default y
+ bool "ITM device support" if EMBEDDED
+ depends on TOUCHSCREEN_USB_COMPOSITE
+
+config TOUCHSCREEN_USB_ETURBO
+ default y
+ bool "eTurboTouch (non-eGalax compatible) device support" if EMBEDDED
+ depends on TOUCHSCREEN_USB_COMPOSITE
+
+config TOUCHSCREEN_USB_GUNZE
+ default y
+ bool "Gunze AHL61 device support" if EMBEDDED
+ depends on TOUCHSCREEN_USB_COMPOSITE
+
+config TOUCHSCREEN_USB_DMC_TSC10
+ default y
+ bool "DMC TSC-10/25 device support" if EMBEDDED
+ depends on TOUCHSCREEN_USB_COMPOSITE
+
+config TOUCHSCREEN_USB_IRTOUCH
+ default y
+ bool "IRTOUCHSYSTEMS/UNITOP device support" if EMBEDDED
+ depends on TOUCHSCREEN_USB_COMPOSITE
+
+config TOUCHSCREEN_USB_IDEALTEK
+ default y
+ bool "IdealTEK URTC1000 device support" if EMBEDDED
+ depends on TOUCHSCREEN_USB_COMPOSITE
+
+config TOUCHSCREEN_USB_GENERAL_TOUCH
+ default y
+ bool "GeneralTouch Touchscreen device support" if EMBEDDED
+ depends on TOUCHSCREEN_USB_COMPOSITE
+
+config TOUCHSCREEN_USB_GOTOP
+ default y
+ bool "GoTop Super_Q2/GogoPen/PenPower tablet device support" if EMBEDDED
+ depends on TOUCHSCREEN_USB_COMPOSITE
+
+config TOUCHSCREEN_USB_JASTEC
+ default y
+ bool "JASTEC/DigiTech DTR-02U USB touch controller device support" if EMBEDDED
+ depends on TOUCHSCREEN_USB_COMPOSITE
+
+config TOUCHSCREEN_USB_E2I
+ default y
+ bool "e2i Touchscreen controller (e.g. from Mimo 740)"
+ depends on TOUCHSCREEN_USB_COMPOSITE
+
+config TOUCHSCREEN_USB_ZYTRONIC
+ default y
+ bool "Zytronic controller" if EMBEDDED
+ depends on TOUCHSCREEN_USB_COMPOSITE
+
+config TOUCHSCREEN_USB_ETT_TC45USB
+ default y
+ bool "ET&T USB series TC4UM/TC5UH touchscreen controler support" if EMBEDDED
+ depends on TOUCHSCREEN_USB_COMPOSITE
+
+config TOUCHSCREEN_USB_NEXIO
+ default y
+ bool "NEXIO/iNexio device support" if EMBEDDED
+ depends on TOUCHSCREEN_USB_COMPOSITE
+
+config TOUCHSCREEN_TOUCHIT213
+ tristate "Sahara TouchIT-213 touchscreen"
+ select SERIO
+ help
+ Say Y here if you have a Sahara TouchIT-213 Tablet PC.
+
+ If unsure, say N.
+
+ To compile this driver as a module, choose M here: the
+ module will be called touchit213.
+
+config TOUCHSCREEN_TSC2007
+ tristate "TSC2007 based touchscreens"
+ depends on I2C
+ help
+ Say Y here if you have a TSC2007 based touchscreen.
+
+ If unsure, say N.
+
+ To compile this driver as a module, choose M here: the
+ module will be called tsc2007.
+
+config TOUCHSCREEN_TSC2004
+ tristate "TSC2004 based touchscreens"
+ depends on I2C
+ help
+ Say Y here if you have a TSC2004 based touchscreen.
+
+ If unsure, say N.
+
+ To compile this driver as a module, choose M here: the
+ module will be called tsc2004.
+
+config TOUCHSCREEN_W90X900
+ tristate "W90P910 touchscreen driver"
+ depends on HAVE_CLK
+ help
+ Say Y here if you have a W90P910 based touchscreen.
+
+ To compile this driver as a module, choose M here: the
+ module will be called w90p910_ts.
+
+config TOUCHSCREEN_PCAP
+ tristate "Motorola PCAP touchscreen"
+ depends on EZX_PCAP
+ help
+ Say Y here if you have a Motorola EZX telephone and
+ want to enable support for the built-in touchscreen.
+
+ To compile this driver as a module, choose M here: the
+ module will be called pcap_ts.
+
+config TOUCHSCREEN_TPS6507X
+ tristate "TPS6507x based touchscreens"
+ depends on I2C
+ help
+ Say Y here if you have a TPS6507x based touchscreen
+ controller.
+
+ If unsure, say N.
+
+ To compile this driver as a module, choose M here: the
+ module will be called tps6507x_ts.
+
+config TOUCHSCREEN_STMPE
+ tristate "STMicroelectronics STMPE touchscreens"
+ depends on MFD_STMPE
+ help
+ Say Y here if you want support for STMicroelectronics
+ STMPE touchscreen controllers.
+
+ To compile this driver as a module, choose M here: the
+ module will be called stmpe-ts.
+
+endif
diff --git a/kernel/drivers/input/touchscreen/Makefile b/kernel/drivers/input/touchscreen/Makefile
new file mode 100644
index 000000000000..a6c7d9f388a6
--- /dev/null
+++ b/kernel/drivers/input/touchscreen/Makefile
@@ -0,0 +1,68 @@
+#
+# Makefile for the touchscreen drivers.
+#
+
+# Each configuration option enables a list of files.
+
+wm97xx-ts-y := wm97xx-core.o
+
+obj-$(CONFIG_TOUCHSCREEN_88PM860X) += 88pm860x-ts.o
+obj-$(CONFIG_TOUCHSCREEN_AD7877) += ad7877.o
+obj-$(CONFIG_TOUCHSCREEN_AD7879) += ad7879.o
+obj-$(CONFIG_TOUCHSCREEN_AD7879_I2C) += ad7879-i2c.o
+obj-$(CONFIG_TOUCHSCREEN_AD7879_SPI) += ad7879-spi.o
+obj-$(CONFIG_TOUCHSCREEN_ADS7846) += ads7846.o
+obj-$(CONFIG_TOUCHSCREEN_ATMEL_TSADCC) += atmel_tsadcc.o
+obj-$(CONFIG_TOUCHSCREEN_BITSY) += h3600_ts_input.o
+obj-$(CONFIG_TOUCHSCREEN_BU21013) += bu21013_ts.o
+obj-$(CONFIG_TOUCHSCREEN_CY8CTMG110) += cy8ctmg110_ts.o
+obj-$(CONFIG_TOUCHSCREEN_DA9034) += da9034-ts.o
+obj-$(CONFIG_TOUCHSCREEN_DYNAPRO) += dynapro.o
+obj-$(CONFIG_TOUCHSCREEN_HAMPSHIRE) += hampshire.o
+obj-$(CONFIG_TOUCHSCREEN_GUNZE) += gunze.o
+obj-$(CONFIG_TOUCHSCREEN_EETI) += eeti_ts.o
+obj-$(CONFIG_TOUCHSCREEN_ELO) += elo.o
+obj-$(CONFIG_TOUCHSCREEN_FUJITSU) += fujitsu_ts.o
+obj-$(CONFIG_TOUCHSCREEN_INEXIO) += inexio.o
+obj-$(CONFIG_TOUCHSCREEN_INTEL_MID) += intel-mid-touch.o
+obj-$(CONFIG_TOUCHSCREEN_LPC32XX) += lpc32xx_ts.o
+obj-$(CONFIG_TOUCHSCREEN_MC13783) += mc13783_ts.o
+obj-$(CONFIG_TOUCHSCREEN_MCS5000) += mcs5000_ts.o
+obj-$(CONFIG_TOUCHSCREEN_MIGOR) += migor_ts.o
+obj-$(CONFIG_TOUCHSCREEN_MTOUCH) += mtouch.o
+obj-$(CONFIG_TOUCHSCREEN_MK712) += mk712.o
+obj-$(CONFIG_TOUCHSCREEN_HP600) += hp680_ts_input.o
+obj-$(CONFIG_TOUCHSCREEN_HP7XX) += jornada720_ts.o
+obj-$(CONFIG_TOUCHSCREEN_HTCPEN) += htcpen.o
+obj-$(CONFIG_TOUCHSCREEN_USB_COMPOSITE) += usbtouchscreen.o
+obj-$(CONFIG_TOUCHSCREEN_PCAP) += pcap_ts.o
+obj-$(CONFIG_TOUCHSCREEN_PENMOUNT) += penmount.o
+obj-$(CONFIG_TOUCHSCREEN_QT602240) += qt602240_ts.o
+obj-$(CONFIG_TOUCHSCREEN_S3C2410) += s3c2410_ts.o
+obj-$(CONFIG_TOUCHSCREEN_STMPE) += stmpe-ts.o
+obj-$(CONFIG_TOUCHSCREEN_TNETV107X) += tnetv107x-ts.o
+obj-$(CONFIG_TOUCHSCREEN_SYNAPTICS_I2C_RMI4) += synaptics_i2c_rmi4.o
+obj-$(CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_RMI4_DEV) += synaptics_rmi_dev.o
+obj-$(CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_FW_UPDATE) += synaptics_fw_update.o
+obj-$(CONFIG_TOUCHSCREEN_TOUCHIT213) += touchit213.o
+obj-$(CONFIG_TOUCHSCREEN_TOUCHRIGHT) += touchright.o
+obj-$(CONFIG_TOUCHSCREEN_TOUCHWIN) += touchwin.o
+obj-$(CONFIG_TOUCHSCREEN_TSC2007) += tsc2007.o
+obj-$(CONFIG_TOUCHSCREEN_TSC2004) += tsc2004.o
+obj-$(CONFIG_TOUCHSCREEN_UCB1400) += ucb1400_ts.o
+obj-$(CONFIG_TOUCHSCREEN_WACOM_W8001) += wacom_w8001.o
+obj-$(CONFIG_TOUCHSCREEN_WM97XX) += wm97xx-ts.o
+wm97xx-ts-$(CONFIG_TOUCHSCREEN_WM9705) += wm9705.o
+wm97xx-ts-$(CONFIG_TOUCHSCREEN_WM9712) += wm9712.o
+wm97xx-ts-$(CONFIG_TOUCHSCREEN_WM9713) += wm9713.o
+obj-$(CONFIG_TOUCHSCREEN_WM97XX_ATMEL) += atmel-wm97xx.o
+obj-$(CONFIG_TOUCHSCREEN_WM97XX_MAINSTONE) += mainstone-wm97xx.o
+obj-$(CONFIG_TOUCHSCREEN_WM97XX_ZYLONITE) += zylonite-wm97xx.o
+obj-$(CONFIG_TOUCHSCREEN_W90X900) += w90p910_ts.o
+obj-$(CONFIG_TOUCHSCREEN_TPS6507X) += tps6507x-ts.o
+
+all:
+make -C /lib/modules/$(shell uname -r)/build M=$(PWD) modules
+
+clean:
+make -C /lib/modules/$(shell uname -r)/build M=$(PWD) clean
diff --git a/kernel/drivers/input/touchscreen/synaptics_fw_update.c b/kernel/drivers/input/touchscreen/synaptics_fw_update.c
new file mode 100644
index 000000000000..8b6d7c7e368d
--- /dev/null
+++ b/kernel/drivers/input/touchscreen/synaptics_fw_update.c
@@ -0,0 +1,1698 @@
+/*
+ * Synaptics RMI4 touchscreen driver
+ *
+ * Copyright (C) 2012 Synaptics Incorporated
+ *
+ * Copyright (C) 2012 Alexandra Chin <alexandra.chin@tw.synaptics.com>
+ * Copyright (C) 2012 Scott Lin <scott.lin@tw.synaptics.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/input.h>
+#include <linux/firmware.h>
+#include <linux/input/synaptics_dsx.h>
+#include "synaptics_i2c_rmi4.h"
+
+#define DEBUG_FW_UPDATE
+#define SHOW_PROGRESS
+#define FW_IMAGE_NAME "PR1063486-s7301_00000000.img"
+#define MAX_FIRMWARE_ID_LEN 10
+#define FORCE_UPDATE false
+#define INSIDE_FIRMWARE_UPDATE
+
+#define CHECKSUM_OFFSET 0x00
+#define BOOTLOADER_VERSION_OFFSET 0x07
+#define IMAGE_SIZE_OFFSET 0x08
+#define CONFIG_SIZE_OFFSET 0x0C
+#define PRODUCT_ID_OFFSET 0x10
+#define PRODUCT_INFO_OFFSET 0x1E
+#define FW_IMAGE_OFFSET 0x100
+#define PRODUCT_ID_SIZE 10
+
+#define BOOTLOADER_ID_OFFSET 0
+#define FLASH_PROPERTIES_OFFSET 2
+#define BLOCK_SIZE_OFFSET 3
+#define FW_BLOCK_COUNT_OFFSET 5
+
+#define REG_MAP (1 << 0)
+#define UNLOCKED (1 << 1)
+#define HAS_CONFIG_ID (1 << 2)
+#define HAS_PERM_CONFIG (1 << 3)
+#define HAS_BL_CONFIG (1 << 4)
+#define HAS_DISP_CONFIG (1 << 5)
+#define HAS_CTRL1 (1 << 6)
+
+#define BLOCK_NUMBER_OFFSET 0
+#define BLOCK_DATA_OFFSET 2
+
+#define UI_CONFIG_AREA 0x00
+#define PERM_CONFIG_AREA 0x01
+#define BL_CONFIG_AREA 0x02
+#define DISP_CONFIG_AREA 0x03
+
+enum flash_command {
+ CMD_WRITE_FW_BLOCK = 0x2,
+ CMD_ERASE_ALL = 0x3,
+ CMD_READ_CONFIG_BLOCK = 0x5,
+ CMD_WRITE_CONFIG_BLOCK = 0x6,
+ CMD_ERASE_CONFIG = 0x7,
+ CMD_ERASE_BL_CONFIG = 0x9,
+ CMD_ERASE_DISP_CONFIG = 0xA,
+ CMD_ENABLE_FLASH_PROG = 0xF,
+};
+
+enum flash_area {
+ NONE,
+ UI_FIRMWARE,
+ CONFIG_AREA
+};
+
+#define SLEEP_MODE_NORMAL (0x00)
+#define SLEEP_MODE_SENSOR_SLEEP (0x01)
+#define SLEEP_MODE_RESERVED0 (0x02)
+#define SLEEP_MODE_RESERVED1 (0x03)
+
+#define ENABLE_WAIT_MS (1 * 1000)
+#define WRITE_WAIT_MS (3 * 1000)
+#define ERASE_WAIT_MS (5 * 1000)
+#define RESET_WAIT_MS (500)
+
+#define SLEEP_TIME_US 50
+
+static ssize_t fwu_sysfs_show_image(struct file *data_file,
+ struct kobject *kobj, struct bin_attribute *attributes,
+ char *buf, loff_t pos, size_t count);
+
+static ssize_t fwu_sysfs_store_image(struct file *data_file,
+ struct kobject *kobj, struct bin_attribute *attributes,
+ char *buf, loff_t pos, size_t count);
+
+static ssize_t fwu_sysfs_do_reflash_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count);
+
+static ssize_t fwu_sysfs_write_config_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count);
+
+static ssize_t fwu_sysfs_read_config_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count);
+
+static ssize_t fwu_sysfs_config_area_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count);
+
+static ssize_t fwu_sysfs_image_size_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count);
+
+static ssize_t fwu_sysfs_block_size_show(struct device *dev,
+ struct device_attribute *attr, char *buf);
+
+static ssize_t fwu_sysfs_firmware_block_count_show(struct device *dev,
+ struct device_attribute *attr, char *buf);
+
+static ssize_t fwu_sysfs_configuration_block_count_show(struct device *dev,
+ struct device_attribute *attr, char *buf);
+
+static ssize_t fwu_sysfs_perm_config_block_count_show(struct device *dev,
+ struct device_attribute *attr, char *buf);
+
+static ssize_t fwu_sysfs_bl_config_block_count_show(struct device *dev,
+ struct device_attribute *attr, char *buf);
+
+static ssize_t fwu_sysfs_disp_config_block_count_show(struct device *dev,
+ struct device_attribute *attr, char *buf);
+
+static int fwu_wait_for_idle(int timeout_ms);
+
+struct image_header {
+ unsigned int checksum;
+ unsigned int image_size;
+ unsigned int config_size;
+ unsigned char options;
+ unsigned char bootloader_version;
+ unsigned char product_id[SYNAPTICS_RMI4_PRODUCT_ID_SIZE + 1];
+ unsigned char product_info[SYNAPTICS_RMI4_PRODUCT_INFO_SIZE];
+};
+
+struct pdt_properties {
+ union {
+ struct {
+ unsigned char reserved_1:6;
+ unsigned char has_bsr:1;
+ unsigned char reserved_2:1;
+ } __packed;
+ unsigned char data[1];
+ };
+};
+
+struct f01_device_status {
+ union {
+ struct {
+ unsigned char status_code:4;
+ unsigned char reserved:2;
+ unsigned char flash_prog:1;
+ unsigned char unconfigured:1;
+ } __packed;
+ unsigned char data[1];
+ };
+};
+
+struct f01_device_control {
+ union {
+ struct {
+ unsigned char sleep_mode:2;
+ unsigned char nosleep:1;
+ unsigned char reserved:2;
+ unsigned char charger_connected:1;
+ unsigned char report_rate:1;
+ unsigned char configured:1;
+ } __packed;
+ unsigned char data[1];
+ };
+};
+
+struct f34_flash_control {
+ union {
+ struct {
+ unsigned char command:4;
+ unsigned char status:3;
+ unsigned char program_enabled:1;
+ } __packed;
+ unsigned char data[1];
+ };
+};
+
+struct f34_flash_properties {
+ union {
+ struct {
+ unsigned char regmap:1;
+ unsigned char unlocked:1;
+ unsigned char has_configid:1;
+ unsigned char has_perm_config:1;
+ unsigned char has_bl_config:1;
+ unsigned char has_display_config:1;
+ unsigned char has_blob_config:1;
+ unsigned char reserved:1;
+ } __packed;
+ unsigned char data[1];
+ };
+};
+
+struct synaptics_rmi4_fwu_handle {
+ bool initialized;
+ bool force_update;
+ char product_id[SYNAPTICS_RMI4_PRODUCT_ID_SIZE + 1];
+ unsigned int image_size;
+ unsigned int data_pos;
+ unsigned char intr_mask;
+ unsigned char bootloader_id[2];
+ unsigned char productinfo1;
+ unsigned char productinfo2;
+ unsigned char *ext_data_source;
+ unsigned char *read_config_buf;
+ const unsigned char *firmware_data;
+ const unsigned char *config_data;
+ unsigned short block_size;
+ unsigned short fw_block_count;
+ unsigned short config_block_count;
+ unsigned short perm_config_block_count;
+ unsigned short bl_config_block_count;
+ unsigned short disp_config_block_count;
+ unsigned short config_size;
+ unsigned short config_area;
+ unsigned short addr_f34_flash_control;
+ unsigned short addr_f01_interrupt_register;
+ struct synaptics_rmi4_fn_desc f01_fd;
+ struct synaptics_rmi4_fn_desc f34_fd;
+ struct synaptics_rmi4_exp_fn_ptr *fn_ptr;
+ struct synaptics_rmi4_data *rmi4_data;
+ struct f34_flash_control flash_control;
+ struct f34_flash_properties flash_properties;
+ struct workqueue_struct *fwu_workqueue;
+ struct delayed_work fwu_work;
+};
+
+static struct bin_attribute dev_attr_data = {
+ .attr = {
+ .name = "data",
+ .mode = (S_IRUGO | S_IWUGO),
+ },
+ .size = 0,
+ .read = fwu_sysfs_show_image,
+ .write = fwu_sysfs_store_image,
+};
+
+static struct device_attribute attrs[] = {
+ __ATTR(doreflash, S_IWUGO,
+ synaptics_rmi4_show_error,
+ fwu_sysfs_do_reflash_store),
+ __ATTR(writeconfig, S_IWUGO,
+ synaptics_rmi4_show_error,
+ fwu_sysfs_write_config_store),
+ __ATTR(readconfig, S_IWUGO,
+ synaptics_rmi4_show_error,
+ fwu_sysfs_read_config_store),
+ __ATTR(configarea, S_IWUGO,
+ synaptics_rmi4_show_error,
+ fwu_sysfs_config_area_store),
+ __ATTR(imagesize, S_IWUGO,
+ synaptics_rmi4_show_error,
+ fwu_sysfs_image_size_store),
+ __ATTR(blocksize, S_IRUGO,
+ fwu_sysfs_block_size_show,
+ synaptics_rmi4_store_error),
+ __ATTR(fwblockcount, S_IRUGO,
+ fwu_sysfs_firmware_block_count_show,
+ synaptics_rmi4_store_error),
+ __ATTR(configblockcount, S_IRUGO,
+ fwu_sysfs_configuration_block_count_show,
+ synaptics_rmi4_store_error),
+ __ATTR(permconfigblockcount, S_IRUGO,
+ fwu_sysfs_perm_config_block_count_show,
+ synaptics_rmi4_store_error),
+ __ATTR(blconfigblockcount, S_IRUGO,
+ fwu_sysfs_bl_config_block_count_show,
+ synaptics_rmi4_store_error),
+ __ATTR(dispconfigblockcount, S_IRUGO,
+ fwu_sysfs_disp_config_block_count_show,
+ synaptics_rmi4_store_error),
+};
+
+static struct synaptics_rmi4_fwu_handle *fwu;
+
+static struct completion remove_complete;
+
+static unsigned int extract_uint(const unsigned char *ptr)
+{
+ return (unsigned int)ptr[0] +
+ (unsigned int)ptr[1] * 0x100 +
+ (unsigned int)ptr[2] * 0x10000 +
+ (unsigned int)ptr[3] * 0x1000000;
+}
+
+static void parse_header(struct image_header *header,
+ const unsigned char *fw_image)
+{
+ header->checksum = extract_uint(&fw_image[CHECKSUM_OFFSET]);
+ header->bootloader_version = fw_image[BOOTLOADER_VERSION_OFFSET];
+ header->image_size = extract_uint(&fw_image[IMAGE_SIZE_OFFSET]);
+ header->config_size = extract_uint(&fw_image[CONFIG_SIZE_OFFSET]);
+ memcpy(header->product_id, &fw_image[PRODUCT_ID_OFFSET],
+ SYNAPTICS_RMI4_PRODUCT_ID_SIZE);
+ header->product_id[SYNAPTICS_RMI4_PRODUCT_ID_SIZE] = 0;
+ memcpy(header->product_info, &fw_image[PRODUCT_INFO_OFFSET],
+ SYNAPTICS_RMI4_PRODUCT_INFO_SIZE);
+
+#ifdef DEBUG_FW_UPDATE
+ dev_info(&fwu->rmi4_data->i2c_client->dev,
+ "Firwmare size %d, config size %d\n",
+ header->image_size,
+ header->config_size);
+#endif
+ return;
+}
+
+static int fwu_read_f01_device_status(struct f01_device_status *status)
+{
+ int retval;
+
+ retval = fwu->fn_ptr->read(fwu->rmi4_data,
+ fwu->f01_fd.data_base_addr,
+ status->data,
+ sizeof(status->data));
+ if (retval < 0) {
+ dev_err(&fwu->rmi4_data->i2c_client->dev,
+ "%s: Failed to read F01 device status\n",
+ __func__);
+ return retval;
+ }
+
+ return 0;
+}
+
+static int fwu_read_f34_queries(void)
+{
+ int retval;
+ unsigned char count = 4;
+ unsigned char buf[10];
+ struct i2c_client *i2c_client = fwu->rmi4_data->i2c_client;
+
+ retval = fwu->fn_ptr->read(fwu->rmi4_data,
+ fwu->f34_fd.query_base_addr + BOOTLOADER_ID_OFFSET,
+ fwu->bootloader_id,
+ sizeof(fwu->bootloader_id));
+ if (retval < 0) {
+ dev_err(&i2c_client->dev,
+ "%s: Failed to read bootloader ID\n",
+ __func__);
+ return retval;
+ }
+
+ retval = fwu->fn_ptr->read(fwu->rmi4_data,
+ fwu->f34_fd.query_base_addr + FLASH_PROPERTIES_OFFSET,
+ fwu->flash_properties.data,
+ sizeof(fwu->flash_properties.data));
+ if (retval < 0) {
+ dev_err(&i2c_client->dev,
+ "%s: Failed to read flash properties\n",
+ __func__);
+ return retval;
+ }
+
+ dev_info(&i2c_client->dev, "%s perm:%d, bl:%d, display:%d\n",
+ __func__,
+ fwu->flash_properties.has_perm_config,
+ fwu->flash_properties.has_bl_config,
+ fwu->flash_properties.has_display_config);
+
+ if (fwu->flash_properties.has_perm_config)
+ count += 2;
+
+ if (fwu->flash_properties.has_bl_config)
+ count += 2;
+
+ if (fwu->flash_properties.has_display_config)
+ count += 2;
+
+ retval = fwu->fn_ptr->read(fwu->rmi4_data,
+ fwu->f34_fd.query_base_addr + BLOCK_SIZE_OFFSET,
+ buf,
+ 2);
+ if (retval < 0) {
+ dev_err(&i2c_client->dev,
+ "%s: Failed to read block size info\n",
+ __func__);
+ return retval;
+ }
+
+ batohs(&fwu->block_size, &(buf[0]));
+
+ retval = fwu->fn_ptr->read(fwu->rmi4_data,
+ fwu->f34_fd.query_base_addr + FW_BLOCK_COUNT_OFFSET,
+ buf,
+ count);
+ if (retval < 0) {
+ dev_err(&i2c_client->dev,
+ "%s: Failed to read block count info\n",
+ __func__);
+ return retval;
+ }
+
+ batohs(&fwu->fw_block_count, &(buf[0]));
+ batohs(&fwu->config_block_count, &(buf[2]));
+
+ count = 4;
+
+ if (fwu->flash_properties.has_perm_config) {
+ batohs(&fwu->perm_config_block_count, &(buf[count]));
+ count += 2;
+ }
+
+ if (fwu->flash_properties.has_bl_config) {
+ batohs(&fwu->bl_config_block_count, &(buf[count]));
+ count += 2;
+ }
+
+ if (fwu->flash_properties.has_display_config)
+ batohs(&fwu->disp_config_block_count, &(buf[count]));
+
+ fwu->addr_f34_flash_control = fwu->f34_fd.data_base_addr +
+ BLOCK_DATA_OFFSET +
+ fwu->block_size;
+ return 0;
+}
+
+static int fwu_read_interrupt_status(void)
+{
+ int retval;
+ unsigned char interrupt_status;
+ retval = fwu->fn_ptr->read(fwu->rmi4_data,
+ fwu->addr_f01_interrupt_register,
+ &interrupt_status,
+ sizeof(interrupt_status));
+ if (retval < 0) {
+ dev_err(&fwu->rmi4_data->i2c_client->dev,
+ "%s: Failed to read flash status\n",
+ __func__);
+ return retval;
+ }
+ return interrupt_status;
+}
+
+static int fwu_read_f34_flash_status(void)
+{
+ int retval;
+ retval = fwu->fn_ptr->read(fwu->rmi4_data,
+ fwu->addr_f34_flash_control,
+ fwu->flash_control.data,
+ sizeof(fwu->flash_control.data));
+ if (retval < 0) {
+ dev_err(&fwu->rmi4_data->i2c_client->dev,
+ "%s: Failed to read flash status\n",
+ __func__);
+ return retval;
+ }
+ return 0;
+}
+
+static int fwu_reset_device(void)
+{
+ int retval;
+
+#ifdef DEBUG_FW_UPDATE
+ dev_info(&fwu->rmi4_data->i2c_client->dev,
+ "%s: Reset device\n",
+ __func__);
+#endif
+
+ retval = fwu->rmi4_data->reset_device(fwu->rmi4_data);
+ if (retval < 0) {
+ dev_err(&fwu->rmi4_data->i2c_client->dev,
+ "%s: Failed to reset core driver after reflash\n",
+ __func__);
+ return retval;
+ }
+ return 0;
+}
+
+static int fwu_write_f34_command(unsigned char cmd)
+{
+ int retval;
+
+ fwu->flash_control.data[0] = cmd;
+ retval = fwu->fn_ptr->write(fwu->rmi4_data,
+ fwu->addr_f34_flash_control,
+ fwu->flash_control.data,
+ sizeof(fwu->flash_control.data));
+ if (retval < 0) {
+ dev_err(&fwu->rmi4_data->i2c_client->dev,
+ "%s: Failed to write command 0x%02x\n",
+ __func__, fwu->flash_control.data[0]);
+ return retval;
+ }
+ return 0;
+}
+
+static int fwu_wait_for_idle(int timeout_ms)
+{
+ int count = 0;
+ int timeout_count = ((timeout_ms * 1000) / SLEEP_TIME_US) + 1;
+ do {
+ if (fwu->flash_control.command == 0x00)
+ return 0;
+
+ usleep_range(SLEEP_TIME_US, SLEEP_TIME_US + 100);
+ } while (count++ < timeout_count);
+
+ fwu_read_f34_flash_status();
+ if (fwu->flash_control.command == 0x00)
+ return 0;
+
+ dev_err(&fwu->rmi4_data->i2c_client->dev,
+ "%s: Timed out waiting for idle status\n",
+ __func__);
+
+ return -ETIMEDOUT;
+}
+
+static enum flash_area fwu_go_nogo(void)
+{
+ int retval = 0;
+ int index = 0;
+ int deviceFirmwareID;
+ int imageConfigID;
+ int deviceConfigID;
+ unsigned long imageFirmwareID;
+ unsigned char firmware_id[4];
+ unsigned char config_id[4];
+ char *strptr;
+ char *imagePR = kzalloc(sizeof(MAX_FIRMWARE_ID_LEN), GFP_KERNEL);
+ enum flash_area flash_area = NONE;
+ struct i2c_client *i2c_client = fwu->rmi4_data->i2c_client;
+ struct f01_device_status f01_device_status;
+
+ if (fwu->force_update) {
+ flash_area = UI_FIRMWARE;
+ goto exit;
+ }
+
+ retval = fwu_read_f01_device_status(&f01_device_status);
+ if (retval < 0) {
+ flash_area = NONE;
+ goto exit;
+ }
+
+ imagePR = kzalloc(sizeof(MAX_FIRMWARE_ID_LEN), GFP_KERNEL);
+
+ /* Force update firmware when device is in bootloader mode */
+ if (f01_device_status.flash_prog) {
+ dev_info(&i2c_client->dev,
+ "%s: In flash prog mode\n",
+ __func__);
+ flash_area = UI_FIRMWARE;
+ goto exit;
+ }
+
+
+ /* device firmware id */
+ retval = fwu->fn_ptr->read(fwu->rmi4_data,
+ fwu->f01_fd.query_base_addr + 18,
+ firmware_id,
+ sizeof(firmware_id));
+ if (retval < 0) {
+ dev_err(&i2c_client->dev,
+ "Failed to read firmware ID (code %d).\n", retval);
+ goto exit;
+ }
+ firmware_id[3] = 0;
+ deviceFirmwareID = extract_uint(firmware_id);
+
+ /* .img firmware id */
+ strptr = strstr(FW_IMAGE_NAME, "PR");
+ if (!strptr) {
+ dev_err(&i2c_client->dev,
+ "No valid PR number (PRxxxxxxx)" \
+ "found in image file name...\n");
+ goto exit;
+ }
+
+ strptr += 2;
+ while (strptr[index] >= '0' && strptr[index] <= '9') {
+ imagePR[index] = strptr[index];
+ index++;
+ }
+ imagePR[index] = 0;
+
+ retval = sstrtoul(imagePR, 10, &imageFirmwareID);
+ if (retval == -EINVAL) {
+ dev_err(&i2c_client->dev,
+ "invalid image firmware id...\n");
+ goto exit;
+ }
+
+ dev_info(&i2c_client->dev,
+ "Device firmware id %d, .img firmware id %d\n",
+ deviceFirmwareID,
+ (unsigned int)imageFirmwareID);
+ if (imageFirmwareID > deviceFirmwareID) {
+ flash_area = UI_FIRMWARE;
+ goto exit;
+ }
+
+ /* device config id */
+ retval = fwu->fn_ptr->read(fwu->rmi4_data,
+ fwu->f34_fd.ctrl_base_addr,
+ config_id,
+ sizeof(config_id));
+ if (retval < 0) {
+ dev_err(&i2c_client->dev,
+ "Failed to read config ID (code %d).\n", retval);
+ flash_area = NONE;
+ goto exit;
+ }
+ deviceConfigID = extract_uint(config_id);
+
+ dev_info(&i2c_client->dev,
+ "Device config ID 0x%02X, 0x%02X, 0x%02X, 0x%02X\n",
+ config_id[0], config_id[1], config_id[2], config_id[3]);
+
+ /* .img config id */
+ dev_info(&i2c_client->dev,
+ ".img config ID 0x%02X, 0x%02X, 0x%02X, 0x%02X\n",
+ fwu->config_data[0],
+ fwu->config_data[1],
+ fwu->config_data[2],
+ fwu->config_data[3]);
+ imageConfigID = extract_uint(fwu->config_data);
+
+ if (imageConfigID > deviceConfigID) {
+ flash_area = CONFIG_AREA;
+ goto exit;
+ }
+
+exit:
+ kfree(imagePR);
+ if (flash_area == NONE)
+ dev_info(&i2c_client->dev,
+ "Nothing needs to be updated\n");
+ else
+ dev_info(&i2c_client->dev,
+ "Update %s block\n",
+ flash_area == UI_FIRMWARE ? "UI FW" : "CONFIG");
+ return flash_area;
+}
+
+static int fwu_scan_pdt(void)
+{
+ int retval;
+ unsigned char ii;
+ unsigned char intr_count = 0;
+ unsigned char intr_off;
+ unsigned char intr_src;
+ unsigned short addr;
+ bool f01found = false;
+ bool f34found = false;
+ struct synaptics_rmi4_fn_desc rmi_fd;
+
+#ifdef DEBUG_FW_UPDATE
+ dev_info(&fwu->rmi4_data->i2c_client->dev, "Scan PDT\n");
+#endif
+
+ for (addr = PDT_START; addr > PDT_END; addr -= PDT_ENTRY_SIZE) {
+ retval = fwu->fn_ptr->read(fwu->rmi4_data,
+ addr,
+ (unsigned char *)&rmi_fd,
+ sizeof(rmi_fd));
+ if (retval < 0)
+ return retval;
+
+ if (rmi_fd.fn_number) {
+ dev_dbg(&fwu->rmi4_data->i2c_client->dev,
+ "%s: Found F%02x\n",
+ __func__, rmi_fd.fn_number);
+ switch (rmi_fd.fn_number) {
+ case SYNAPTICS_RMI4_F01:
+ f01found = true;
+ fwu->f01_fd = rmi_fd;
+ fwu->addr_f01_interrupt_register =
+ fwu->f01_fd.data_base_addr + 1;
+ break;
+ case SYNAPTICS_RMI4_F34:
+ f34found = true;
+ fwu->f34_fd = rmi_fd;
+ fwu->intr_mask = 0;
+ intr_src = rmi_fd.intr_src_count;
+ intr_off = intr_count % 8;
+ for (ii = intr_off;
+ ii < ((intr_src & MASK_3BIT) +
+ intr_off);
+ ii++)
+ fwu->intr_mask |= 1 << ii;
+ break;
+ }
+ } else
+ break;
+
+ intr_count += (rmi_fd.intr_src_count & MASK_3BIT);
+ }
+
+ if (!f01found || !f34found) {
+ dev_err(&fwu->rmi4_data->i2c_client->dev,
+ "%s: Failed to find both F01 and F34\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ fwu_read_interrupt_status();
+ return 0;
+}
+
+static int fwu_write_blocks(unsigned char *block_ptr, unsigned short block_cnt,
+ unsigned char command)
+{
+ int retval;
+ unsigned char block_offset[] = {0, 0};
+ unsigned short block_num;
+ struct i2c_client *i2c_client = fwu->rmi4_data->i2c_client;
+#ifdef SHOW_PROGRESS
+ unsigned int progress = (command == CMD_WRITE_CONFIG_BLOCK) ?
+ 10 : 100;
+#endif
+
+#ifdef DEBUG_FW_UPDATE
+ dev_info(&i2c_client->dev,
+ "%s: Start to update %s blocks\n",
+ __func__,
+ command == CMD_WRITE_CONFIG_BLOCK ?
+ "config" : "firmware");
+#endif
+ retval = fwu->fn_ptr->write(fwu->rmi4_data,
+ fwu->f34_fd.data_base_addr + BLOCK_NUMBER_OFFSET,
+ block_offset,
+ sizeof(block_offset));
+ if (retval < 0) {
+ dev_err(&i2c_client->dev,
+ "%s: Failed to write to block number registers\n",
+ __func__);
+ return retval;
+ }
+
+ for (block_num = 0; block_num < block_cnt; block_num++) {
+#ifdef SHOW_PROGRESS
+ if (block_num % progress == 0)
+ dev_info(&i2c_client->dev,
+ "%s: update %s %3d / %3d\n",
+ __func__,
+ command == CMD_WRITE_CONFIG_BLOCK ?
+ "config" : "firmware",
+ block_num, block_cnt);
+#endif
+ retval = fwu->fn_ptr->write(fwu->rmi4_data,
+ fwu->f34_fd.data_base_addr + BLOCK_DATA_OFFSET,
+ block_ptr,
+ fwu->block_size);
+ if (retval < 0) {
+ dev_err(&i2c_client->dev,
+ "%s: Failed to write block data (block %d)\n",
+ __func__, block_num);
+ return retval;
+ }
+
+ retval = fwu_write_f34_command(command);
+ if (retval < 0) {
+ dev_err(&i2c_client->dev,
+ "%s: Failed to write command for block %d\n",
+ __func__, block_num);
+ return retval;
+ }
+
+ retval = fwu_wait_for_idle(WRITE_WAIT_MS);
+ if (retval < 0) {
+ dev_err(&i2c_client->dev,
+ "%s: Failed to wait for idle status (block %d)\n",
+ __func__, block_num);
+ return retval;
+ }
+
+ if (fwu->flash_control.status != 0x00) {
+ dev_err(&i2c_client->dev,
+ "%s: Flash block %d failed, status 0x%02X\n",
+ __func__, block_num, retval);
+ return -1;
+ }
+
+ block_ptr += fwu->block_size;
+ }
+#ifdef SHOW_PROGRESS
+ dev_info(&i2c_client->dev,
+ "%s: update %s %3d / %3d\n",
+ __func__,
+ command == CMD_WRITE_CONFIG_BLOCK ?
+ "config" : "firmware",
+ block_cnt, block_cnt);
+#endif
+ return 0;
+}
+
+static int fwu_write_firmware(void)
+{
+ return fwu_write_blocks((unsigned char *)fwu->firmware_data,
+ fwu->fw_block_count, CMD_WRITE_FW_BLOCK);
+}
+
+static int fwu_write_configuration(void)
+{
+ return fwu_write_blocks((unsigned char *)fwu->config_data,
+ fwu->config_block_count, CMD_WRITE_CONFIG_BLOCK);
+}
+
+static int fwu_write_bootloader_id(void)
+{
+ int retval;
+
+#ifdef DEBUG_FW_UPDATE
+ dev_info(&fwu->rmi4_data->i2c_client->dev,
+ "Write bootloader ID 0x%02X 0x%02X\n",
+ fwu->bootloader_id[0],
+ fwu->bootloader_id[1]);
+#endif
+ retval = fwu->fn_ptr->write(fwu->rmi4_data,
+ fwu->f34_fd.data_base_addr + BLOCK_DATA_OFFSET,
+ fwu->bootloader_id,
+ sizeof(fwu->bootloader_id));
+ if (retval < 0) {
+ dev_err(&fwu->rmi4_data->i2c_client->dev,
+ "%s: Failed to write bootloader ID\n",
+ __func__);
+ return retval;
+ }
+
+ return 0;
+}
+
+static int fwu_enter_flash_prog(void)
+{
+ int retval;
+ struct f01_device_status f01_device_status;
+ struct f01_device_control f01_device_control;
+
+#ifdef DEBUG_FW_UPDATE
+ dev_info(&fwu->rmi4_data->i2c_client->dev, "Enter bootloader mode\n");
+#endif
+ retval = fwu_read_f01_device_status(&f01_device_status);
+ if (retval < 0)
+ return retval;
+
+ if (f01_device_status.flash_prog) {
+ dev_info(&fwu->rmi4_data->i2c_client->dev,
+ "%s: Already in flash prog mode\n",
+ __func__);
+ return 0;
+ }
+
+ retval = fwu_write_bootloader_id();
+ if (retval < 0)
+ return retval;
+
+ retval = fwu_write_f34_command(CMD_ENABLE_FLASH_PROG);
+ if (retval < 0)
+ return retval;
+
+ retval = fwu_wait_for_idle(ENABLE_WAIT_MS);
+ if (retval < 0)
+ return retval;
+
+ retval = fwu_scan_pdt();
+ if (retval < 0)
+ return retval;
+
+ retval = fwu_read_f01_device_status(&f01_device_status);
+ if (retval < 0)
+ return retval;
+
+ if (!f01_device_status.flash_prog) {
+ dev_err(&fwu->rmi4_data->i2c_client->dev,
+ "%s: Not in flash prog mode\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ retval = fwu_read_f34_queries();
+ if (retval < 0)
+ return retval;
+
+ retval = fwu->fn_ptr->read(fwu->rmi4_data,
+ fwu->f01_fd.ctrl_base_addr,
+ f01_device_control.data,
+ sizeof(f01_device_control.data));
+ if (retval < 0) {
+ dev_err(&fwu->rmi4_data->i2c_client->dev,
+ "%s: Failed to read F01 device control\n",
+ __func__);
+ return retval;
+ }
+
+ f01_device_control.nosleep = true;
+ f01_device_control.sleep_mode = SLEEP_MODE_NORMAL;
+
+ retval = fwu->fn_ptr->write(fwu->rmi4_data,
+ fwu->f01_fd.ctrl_base_addr,
+ f01_device_control.data,
+ sizeof(f01_device_control.data));
+ if (retval < 0) {
+ dev_err(&fwu->rmi4_data->i2c_client->dev,
+ "%s: Failed to write F01 device control\n",
+ __func__);
+ return retval;
+ }
+
+ return retval;
+}
+
+static int fwu_do_reflash(void)
+{
+ int retval;
+
+ retval = fwu_enter_flash_prog();
+ if (retval < 0)
+ return retval;
+
+ dev_dbg(&fwu->rmi4_data->i2c_client->dev,
+ "%s: Entered flash prog mode\n",
+ __func__);
+
+ retval = fwu_write_bootloader_id();
+ if (retval < 0)
+ return retval;
+
+ dev_dbg(&fwu->rmi4_data->i2c_client->dev,
+ "%s: Bootloader ID written\n",
+ __func__);
+
+ retval = fwu_write_f34_command(CMD_ERASE_ALL);
+ if (retval < 0)
+ return retval;
+
+ dev_dbg(&fwu->rmi4_data->i2c_client->dev,
+ "%s: Erase all command written\n",
+ __func__);
+
+ retval = fwu_wait_for_idle(ERASE_WAIT_MS);
+ if (retval < 0)
+ return retval;
+
+ if (fwu->flash_control.status != 0x00) {
+ dev_err(&fwu->rmi4_data->i2c_client->dev,
+ "%s: Erase all command failed, status 0x%02X\n",
+ __func__, retval);
+ return -1;
+ }
+
+ if (fwu->firmware_data) {
+ retval = fwu_write_firmware();
+ if (retval < 0)
+ return retval;
+ pr_notice("%s: Firmware programmed\n", __func__);
+ }
+
+ if (fwu->config_data) {
+ retval = fwu_write_configuration();
+ if (retval < 0)
+ return retval;
+ pr_notice("%s: Configuration programmed\n", __func__);
+ }
+
+ return retval;
+}
+
+static int fwu_do_write_config(void)
+{
+ int retval;
+
+ retval = fwu_enter_flash_prog();
+ if (retval < 0)
+ return retval;
+
+ dev_dbg(&fwu->rmi4_data->i2c_client->dev,
+ "%s: Entered flash prog mode\n",
+ __func__);
+
+ if (fwu->config_area == PERM_CONFIG_AREA) {
+ fwu->config_block_count = fwu->perm_config_block_count;
+ goto write_config;
+ }
+
+ retval = fwu_write_bootloader_id();
+ if (retval < 0)
+ return retval;
+
+ dev_dbg(&fwu->rmi4_data->i2c_client->dev,
+ "%s: Bootloader ID written\n",
+ __func__);
+
+ switch (fwu->config_area) {
+ case UI_CONFIG_AREA:
+ retval = fwu_write_f34_command(CMD_ERASE_CONFIG);
+ break;
+ case BL_CONFIG_AREA:
+ retval = fwu_write_f34_command(CMD_ERASE_BL_CONFIG);
+ fwu->config_block_count = fwu->bl_config_block_count;
+ break;
+ case DISP_CONFIG_AREA:
+ retval = fwu_write_f34_command(CMD_ERASE_DISP_CONFIG);
+ fwu->config_block_count = fwu->disp_config_block_count;
+ break;
+ }
+ if (retval < 0)
+ return retval;
+
+ dev_dbg(&fwu->rmi4_data->i2c_client->dev,
+ "%s: Erase command written\n",
+ __func__);
+
+ retval = fwu_wait_for_idle(ERASE_WAIT_MS);
+ if (retval < 0)
+ return retval;
+
+ dev_dbg(&fwu->rmi4_data->i2c_client->dev,
+ "%s: Idle status detected\n",
+ __func__);
+
+write_config:
+ retval = fwu_write_configuration();
+ if (retval < 0)
+ return retval;
+
+ pr_notice("%s: Config written\n", __func__);
+
+ return retval;
+}
+
+static int fwu_start_write_config(void)
+{
+ int retval;
+ struct image_header header;
+
+ switch (fwu->config_area) {
+ case UI_CONFIG_AREA:
+ break;
+ case PERM_CONFIG_AREA:
+ if (!fwu->flash_properties.has_perm_config)
+ return -EINVAL;
+ break;
+ case BL_CONFIG_AREA:
+ if (!fwu->flash_properties.has_bl_config)
+ return -EINVAL;
+ break;
+ case DISP_CONFIG_AREA:
+ if (!fwu->flash_properties.has_display_config)
+ return -EINVAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (fwu->ext_data_source)
+ fwu->config_data = fwu->ext_data_source;
+ else
+ return -EINVAL;
+
+ if (fwu->config_area == UI_CONFIG_AREA) {
+ parse_header(&header, fwu->ext_data_source);
+
+ if (header.config_size) {
+ fwu->config_data = fwu->ext_data_source +
+ FW_IMAGE_OFFSET +
+ header.image_size;
+ } else {
+ return -EINVAL;
+ }
+ }
+
+ pr_notice("%s: Start of write config process\n", __func__);
+
+ retval = fwu_do_write_config();
+ if (retval < 0) {
+ dev_err(&fwu->rmi4_data->i2c_client->dev,
+ "%s: Failed to write config\n",
+ __func__);
+ }
+
+ fwu->rmi4_data->reset_device(fwu->rmi4_data);
+
+ pr_notice("%s: End of write config process\n", __func__);
+
+ return retval;
+}
+
+static int fwu_do_read_config(void)
+{
+ int retval;
+ unsigned char block_offset[] = {0, 0};
+ unsigned short block_num;
+ unsigned short block_count;
+ unsigned short index = 0;
+
+ retval = fwu_enter_flash_prog();
+ if (retval < 0)
+ goto exit;
+
+ dev_dbg(&fwu->rmi4_data->i2c_client->dev,
+ "%s: Entered flash prog mode\n",
+ __func__);
+
+ switch (fwu->config_area) {
+ case UI_CONFIG_AREA:
+ block_count = fwu->config_block_count;
+ break;
+ case PERM_CONFIG_AREA:
+ if (!fwu->flash_properties.has_perm_config) {
+ retval = -EINVAL;
+ goto exit;
+ }
+ block_count = fwu->perm_config_block_count;
+ break;
+ case BL_CONFIG_AREA:
+ if (!fwu->flash_properties.has_bl_config) {
+ retval = -EINVAL;
+ goto exit;
+ }
+ block_count = fwu->bl_config_block_count;
+ break;
+ case DISP_CONFIG_AREA:
+ if (!fwu->flash_properties.has_display_config) {
+ retval = -EINVAL;
+ goto exit;
+ }
+ block_count = fwu->disp_config_block_count;
+ break;
+ default:
+ retval = -EINVAL;
+ goto exit;
+ }
+
+ fwu->config_size = fwu->block_size * block_count;
+
+ kfree(fwu->read_config_buf);
+ fwu->read_config_buf = kzalloc(fwu->config_size, GFP_KERNEL);
+
+ block_offset[1] |= (fwu->config_area << 5);
+
+ retval = fwu->fn_ptr->write(fwu->rmi4_data,
+ fwu->f34_fd.data_base_addr + BLOCK_NUMBER_OFFSET,
+ block_offset,
+ sizeof(block_offset));
+ if (retval < 0) {
+ dev_err(&fwu->rmi4_data->i2c_client->dev,
+ "%s: Failed to write to block number registers\n",
+ __func__);
+ goto exit;
+ }
+
+ for (block_num = 0; block_num < block_count; block_num++) {
+ retval = fwu_write_f34_command(CMD_READ_CONFIG_BLOCK);
+ if (retval < 0) {
+ dev_err(&fwu->rmi4_data->i2c_client->dev,
+ "%s: Failed to write read config command\n",
+ __func__);
+ goto exit;
+ }
+
+ retval = fwu_wait_for_idle(WRITE_WAIT_MS);
+ if (retval < 0) {
+ dev_err(&fwu->rmi4_data->i2c_client->dev,
+ "%s: Failed to wait for idle status\n",
+ __func__);
+ goto exit;
+ }
+
+ retval = fwu->fn_ptr->read(fwu->rmi4_data,
+ fwu->f34_fd.data_base_addr + BLOCK_DATA_OFFSET,
+ &fwu->read_config_buf[index],
+ fwu->block_size);
+ if (retval < 0) {
+ dev_err(&fwu->rmi4_data->i2c_client->dev,
+ "%s: Failed to read block data (block %d)\n",
+ __func__, block_num);
+ goto exit;
+ }
+
+ index += fwu->block_size;
+ }
+
+exit:
+ fwu->rmi4_data->reset_device(fwu->rmi4_data);
+
+ return retval;
+}
+
+static int fwu_start_reflash(void)
+{
+ int retval;
+ struct image_header header;
+ const unsigned char *fw_image;
+ const struct firmware *fw_entry = NULL;
+ struct f01_device_status f01_device_status;
+ enum flash_area flash_area;
+
+ pr_notice("%s: Start of reflash process\n", __func__);
+
+ if (fwu->ext_data_source)
+ fw_image = fwu->ext_data_source;
+ else {
+ dev_dbg(&fwu->rmi4_data->i2c_client->dev,
+ "%s: Requesting firmware image %s\n",
+ __func__, FW_IMAGE_NAME);
+
+ retval = request_firmware(&fw_entry, FW_IMAGE_NAME,
+ &fwu->rmi4_data->i2c_client->dev);
+ if (retval != 0) {
+ dev_err(&fwu->rmi4_data->i2c_client->dev,
+ "%s: Firmware image %s not available\n",
+ __func__, FW_IMAGE_NAME);
+ retval = -EINVAL;
+ goto exit;
+ }
+
+ dev_dbg(&fwu->rmi4_data->i2c_client->dev,
+ "%s: Firmware image size = %d\n",
+ __func__, fw_entry->size);
+
+ fw_image = fw_entry->data;
+ }
+
+ parse_header(&header, fw_image);
+
+ if (header.image_size)
+ fwu->firmware_data = fw_image + FW_IMAGE_OFFSET;
+ if (header.config_size) {
+ fwu->config_data = fw_image + FW_IMAGE_OFFSET +
+ header.image_size;
+ }
+
+ if (fwu->ext_data_source)
+ flash_area = UI_FIRMWARE;
+ else
+ flash_area = fwu_go_nogo();
+
+ switch (flash_area) {
+ case NONE:
+ dev_info(&fwu->rmi4_data->i2c_client->dev,
+ "%s: No need to do reflash.\n",
+ __func__);
+ goto exit;
+ case UI_FIRMWARE:
+ retval = fwu_do_reflash();
+ break;
+ case CONFIG_AREA:
+ retval = fwu_do_write_config();
+ break;
+ default:
+ dev_err(&fwu->rmi4_data->i2c_client->dev,
+ "%s: Unknown flash area\n",
+ __func__);
+ goto exit;
+ }
+
+ if (retval < 0) {
+ dev_err(&fwu->rmi4_data->i2c_client->dev,
+ "%s: Failed to do reflash\n",
+ __func__);
+ }
+
+ /* reset device */
+ fwu_reset_device();
+
+ /* check device status */
+ retval = fwu_read_f01_device_status(&f01_device_status);
+ if (retval < 0)
+ goto exit;
+
+ dev_info(&fwu->rmi4_data->i2c_client->dev, "Device is in %s mode\n",
+ f01_device_status.flash_prog == 1 ? "bootloader" : "UI");
+ if (f01_device_status.flash_prog)
+ dev_info(&fwu->rmi4_data->i2c_client->dev, "Flash status %d\n",
+ f01_device_status.status_code);
+
+ if (f01_device_status.flash_prog) {
+ dev_info(&fwu->rmi4_data->i2c_client->dev,
+ "%s: Device is in flash prog mode 0x%02X\n",
+ __func__, f01_device_status.status_code);
+ retval = 0;
+ goto exit;
+ }
+
+ if (fw_entry)
+ release_firmware(fw_entry);
+
+ pr_notice("%s: End of reflash process\n", __func__);
+exit:
+ return retval;
+}
+
+int synaptics_fw_updater(unsigned char *fw_data)
+{
+ int retval;
+
+ if (!fwu)
+ return -ENODEV;
+
+ if (!fwu->initialized)
+ return -ENODEV;
+
+ fwu->ext_data_source = fw_data;
+ fwu->config_area = UI_CONFIG_AREA;
+
+ retval = fwu_start_reflash();
+
+ return retval;
+}
+EXPORT_SYMBOL(synaptics_fw_updater);
+
+static ssize_t fwu_sysfs_show_image(struct file *data_file,
+ struct kobject *kobj, struct bin_attribute *attributes,
+ char *buf, loff_t pos, size_t count)
+{
+ struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+ if (count < fwu->config_size) {
+ dev_err(&rmi4_data->i2c_client->dev,
+ "%s: Not enough space (%d bytes) in buffer\n",
+ __func__, count);
+ return -EINVAL;
+ }
+
+ memcpy(buf, fwu->read_config_buf, fwu->config_size);
+
+ return fwu->config_size;
+}
+
+static ssize_t fwu_sysfs_store_image(struct file *data_file,
+ struct kobject *kobj, struct bin_attribute *attributes,
+ char *buf, loff_t pos, size_t count)
+{
+ memcpy((void *)(&fwu->ext_data_source[fwu->data_pos]),
+ (const void *)buf,
+ count);
+
+ fwu->data_pos += count;
+
+ return count;
+}
+
+static ssize_t fwu_sysfs_do_reflash_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ int retval;
+ unsigned int input;
+ struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+ if (sscanf(buf, "%u", &input) != 1) {
+ retval = -EINVAL;
+ goto exit;
+ }
+
+ if (input != 1) {
+ retval = -EINVAL;
+ goto exit;
+ }
+
+ retval = synaptics_fw_updater(fwu->ext_data_source);
+ if (retval < 0) {
+ dev_err(&rmi4_data->i2c_client->dev,
+ "%s: Failed to do reflash\n",
+ __func__);
+ goto exit;
+ }
+
+ retval = count;
+
+exit:
+ kfree(fwu->ext_data_source);
+ fwu->ext_data_source = NULL;
+ return retval;
+}
+
+static ssize_t fwu_sysfs_write_config_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ int retval;
+ unsigned int input;
+ struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+ if (sscanf(buf, "%u", &input) != 1) {
+ retval = -EINVAL;
+ goto exit;
+ }
+
+ if (input != 1) {
+ retval = -EINVAL;
+ goto exit;
+ }
+
+ retval = fwu_start_write_config();
+ if (retval < 0) {
+ dev_err(&rmi4_data->i2c_client->dev,
+ "%s: Failed to write config\n",
+ __func__);
+ goto exit;
+ }
+
+ retval = count;
+
+exit:
+ kfree(fwu->ext_data_source);
+ fwu->ext_data_source = NULL;
+ return retval;
+}
+
+static ssize_t fwu_sysfs_read_config_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ int retval;
+ unsigned int input;
+ struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+ if (sscanf(buf, "%u", &input) != 1)
+ return -EINVAL;
+
+ if (input != 1)
+ return -EINVAL;
+
+ retval = fwu_do_read_config();
+ if (retval < 0) {
+ dev_err(&rmi4_data->i2c_client->dev,
+ "%s: Failed to read config\n",
+ __func__);
+ return retval;
+ }
+
+ return count;
+}
+
+static ssize_t fwu_sysfs_config_area_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ int retval;
+ unsigned long config_area;
+
+ retval = sstrtoul(buf, 10, &config_area);
+ if (retval)
+ return retval;
+
+ fwu->config_area = config_area;
+
+ return count;
+}
+
+static ssize_t fwu_sysfs_image_size_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ int retval;
+ unsigned long size;
+ struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
+
+ retval = sstrtoul(buf, 10, &size);
+ if (retval)
+ return retval;
+
+ fwu->image_size = size;
+ fwu->data_pos = 0;
+
+ kfree(fwu->ext_data_source);
+ fwu->ext_data_source = kzalloc(fwu->image_size, GFP_KERNEL);
+ if (!fwu->ext_data_source) {
+ dev_err(&rmi4_data->i2c_client->dev,
+ "%s: Failed to alloc mem for image data\n",
+ __func__);
+ return -ENOMEM;
+ }
+
+ return count;
+}
+
+static ssize_t fwu_sysfs_block_size_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%u\n", fwu->block_size);
+}
+
+static ssize_t fwu_sysfs_firmware_block_count_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%u\n", fwu->fw_block_count);
+}
+
+static ssize_t fwu_sysfs_configuration_block_count_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%u\n", fwu->config_block_count);
+}
+
+static ssize_t fwu_sysfs_perm_config_block_count_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%u\n", fwu->perm_config_block_count);
+}
+
+static ssize_t fwu_sysfs_bl_config_block_count_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%u\n", fwu->bl_config_block_count);
+}
+
+static ssize_t fwu_sysfs_disp_config_block_count_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%u\n", fwu->disp_config_block_count);
+}
+
+static void synaptics_rmi4_fwu_attn(struct synaptics_rmi4_data *rmi4_data,
+ unsigned char intr_mask)
+{
+ if (fwu->intr_mask & intr_mask)
+ fwu_read_f34_flash_status();
+
+ return;
+}
+
+static void synaptics_rmi4_fwu_work(struct work_struct *work)
+{
+ fwu_start_reflash();
+}
+
+static int synaptics_rmi4_fwu_init(struct synaptics_rmi4_data *rmi4_data)
+{
+ int retval;
+ unsigned char attr_count;
+ struct pdt_properties pdt_props;
+
+ fwu = kzalloc(sizeof(*fwu), GFP_KERNEL);
+ if (!fwu) {
+ dev_err(&rmi4_data->i2c_client->dev,
+ "%s: Failed to alloc mem for fwu\n",
+ __func__);
+ goto exit;
+ }
+
+ fwu->fn_ptr = kzalloc(sizeof(*(fwu->fn_ptr)), GFP_KERNEL);
+ if (!fwu->fn_ptr) {
+ dev_err(&rmi4_data->i2c_client->dev,
+ "%s: Failed to alloc mem for fn_ptr\n",
+ __func__);
+ retval = -ENOMEM;
+ goto exit_free_fwu;
+ }
+
+ fwu->rmi4_data = rmi4_data;
+ fwu->fn_ptr->read = rmi4_data->i2c_read;
+ fwu->fn_ptr->write = rmi4_data->i2c_write;
+ fwu->fn_ptr->enable = rmi4_data->irq_enable;
+
+ retval = fwu->fn_ptr->read(rmi4_data,
+ PDT_PROPS,
+ pdt_props.data,
+ sizeof(pdt_props.data));
+ if (retval < 0) {
+ dev_dbg(&rmi4_data->i2c_client->dev,
+ "%s: Failed to read PDT properties, assuming 0x00\n",
+ __func__);
+ } else if (pdt_props.has_bsr) {
+ dev_err(&rmi4_data->i2c_client->dev,
+ "%s: Reflash for LTS not currently supported\n",
+ __func__);
+ goto exit_free_mem;
+ }
+
+ retval = fwu_scan_pdt();
+ if (retval < 0)
+ goto exit_free_mem;
+
+ fwu->productinfo1 = rmi4_data->rmi4_mod_info.product_info[0];
+ fwu->productinfo2 = rmi4_data->rmi4_mod_info.product_info[1];
+
+ memcpy(fwu->product_id, rmi4_data->rmi4_mod_info.product_id_string,
+ SYNAPTICS_RMI4_PRODUCT_ID_SIZE);
+ fwu->product_id[SYNAPTICS_RMI4_PRODUCT_ID_SIZE] = 0;
+
+ dev_dbg(&rmi4_data->i2c_client->dev,
+ "%s: F01 product info: 0x%04x 0x%04x\n",
+ __func__, fwu->productinfo1, fwu->productinfo2);
+ dev_dbg(&rmi4_data->i2c_client->dev,
+ "%s: F01 product ID: %s\n",
+ __func__, fwu->product_id);
+
+ retval = fwu_read_f34_queries();
+ if (retval < 0)
+ goto exit_free_mem;
+
+ fwu->initialized = true;
+ fwu->force_update = FORCE_UPDATE;
+
+ retval = sysfs_create_bin_file(&rmi4_data->input_dev->dev.kobj,
+ &dev_attr_data);
+ if (retval < 0) {
+ dev_err(&rmi4_data->i2c_client->dev,
+ "%s: Failed to create sysfs bin file\n",
+ __func__);
+ goto exit_free_mem;
+ }
+
+ for (attr_count = 0; attr_count < ARRAY_SIZE(attrs); attr_count++) {
+ retval = sysfs_create_file(&rmi4_data->input_dev->dev.kobj,
+ &attrs[attr_count].attr);
+ if (retval < 0) {
+ dev_err(&rmi4_data->i2c_client->dev,
+ "%s: Failed to create sysfs attributes\n",
+ __func__);
+ retval = -ENODEV;
+ goto exit_remove_attrs;
+ }
+ }
+
+#ifdef INSIDE_FIRMWARE_UPDATE
+ fwu->fwu_workqueue = create_singlethread_workqueue("fwu_workqueue");
+ INIT_DELAYED_WORK(&fwu->fwu_work, synaptics_rmi4_fwu_work);
+ queue_delayed_work(fwu->fwu_workqueue,
+ &fwu->fwu_work,
+ msecs_to_jiffies(1000));
+#endif
+ return 0;
+
+exit_remove_attrs:
+for (attr_count--; attr_count >= 0; attr_count--) {
+ sysfs_remove_file(&rmi4_data->input_dev->dev.kobj,
+ &attrs[attr_count].attr);
+}
+
+sysfs_remove_bin_file(&rmi4_data->input_dev->dev.kobj, &dev_attr_data);
+
+exit_free_mem:
+ kfree(fwu->fn_ptr);
+
+exit_free_fwu:
+ kfree(fwu);
+
+exit:
+ return 0;
+}
+
+static void synaptics_rmi4_fwu_remove(struct synaptics_rmi4_data *rmi4_data)
+{
+ unsigned char attr_count;
+
+ sysfs_remove_bin_file(&rmi4_data->input_dev->dev.kobj, &dev_attr_data);
+
+ for (attr_count = 0; attr_count < ARRAY_SIZE(attrs); attr_count++) {
+ sysfs_remove_file(&rmi4_data->input_dev->dev.kobj,
+ &attrs[attr_count].attr);
+ }
+
+ kfree(fwu->fn_ptr);
+ kfree(fwu);
+
+ complete(&remove_complete);
+
+ return;
+}
+
+static int __init rmi4_fw_update_module_init(void)
+{
+ synaptics_rmi4_new_function(RMI_FW_UPDATER, true,
+ synaptics_rmi4_fwu_init,
+ synaptics_rmi4_fwu_remove,
+ synaptics_rmi4_fwu_attn);
+ return 0;
+}
+
+static void __exit rmi4_fw_update_module_exit(void)
+{
+ init_completion(&remove_complete);
+ synaptics_rmi4_new_function(RMI_FW_UPDATER, false,
+ synaptics_rmi4_fwu_init,
+ synaptics_rmi4_fwu_remove,
+ synaptics_rmi4_fwu_attn);
+ wait_for_completion(&remove_complete);
+ return;
+}
+
+module_init(rmi4_fw_update_module_init);
+module_exit(rmi4_fw_update_module_exit);
+
+MODULE_AUTHOR("Synaptics, Inc.");
+MODULE_DESCRIPTION("RMI4 FW Update Module");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(SYNAPTICS_RMI4_DRIVER_VERSION);
diff --git a/kernel/drivers/input/touchscreen/synaptics_i2c_rmi4.c b/kernel/drivers/input/touchscreen/synaptics_i2c_rmi4.c
new file mode 100644
index 000000000000..76f9155bd49c
--- /dev/null
+++ b/kernel/drivers/input/touchscreen/synaptics_i2c_rmi4.c
@@ -0,0 +1,2162 @@
+/*
+ * Synaptics RMI4 touchscreen driver
+ *
+ * Copyright (C) 2012 Synaptics Incorporated
+ *
+ * Copyright (C) 2012 Alexandra Chin <alexandra.chin@tw.synaptics.com>
+ * Copyright (C) 2012 Scott Lin <scott.lin@tw.synaptics.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/input.h>
+#include <linux/gpio.h>
+#include <linux/regulator/consumer.h>
+#include <linux/input/synaptics_dsx.h>
+#include "synaptics_i2c_rmi4.h"
+#ifdef KERNEL_ABOVE_2_6_38
+#include <linux/input/mt.h>
+#endif
+
+#define DRIVER_NAME "synaptics_rmi4_i2c"
+#define INPUT_PHYS_NAME "synaptics_rmi4_i2c/input0"
+
+#ifdef KERNEL_ABOVE_2_6_38
+#define TYPE_B_PROTOCOL
+#endif
+
+#define NO_0D_WHILE_2D
+/*
+#define REPORT_2D_Z
+*/
+#define REPORT_2D_W
+
+#define RPT_TYPE (1 << 0)
+#define RPT_X_LSB (1 << 1)
+#define RPT_X_MSB (1 << 2)
+#define RPT_Y_LSB (1 << 3)
+#define RPT_Y_MSB (1 << 4)
+#define RPT_Z (1 << 5)
+#define RPT_WX (1 << 6)
+#define RPT_WY (1 << 7)
+#define RPT_DEFAULT (RPT_TYPE | RPT_X_LSB | RPT_X_MSB | RPT_Y_LSB | RPT_Y_MSB)
+
+#define EXP_FN_DET_INTERVAL 1000 /* ms */
+#define POLLING_PERIOD 1 /* ms */
+#define SYN_I2C_RETRY_TIMES 10
+#define MAX_ABS_MT_TOUCH_MAJOR 15
+
+#define F01_STD_QUERY_LEN 21
+#define F01_BUID_ID_OFFSET 18
+#define F11_STD_QUERY_LEN 9
+#define F11_STD_CTRL_LEN 10
+#define F11_STD_DATA_LEN 12
+
+#define NORMAL_OPERATION (0 << 0)
+#define SENSOR_SLEEP (1 << 0)
+#define NO_SLEEP_OFF (0 << 3)
+#define NO_SLEEP_ON (1 << 3)
+
+static int synaptics_rmi4_i2c_read(struct synaptics_rmi4_data *rmi4_data,
+ unsigned short addr, unsigned char *data,
+ unsigned short length);
+
+static int synaptics_rmi4_i2c_write(struct synaptics_rmi4_data *rmi4_data,
+ unsigned short addr, unsigned char *data,
+ unsigned short length);
+
+static int synaptics_rmi4_reset_device(struct synaptics_rmi4_data *rmi4_data);
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+static ssize_t synaptics_rmi4_full_pm_cycle_show(struct device *dev,
+ struct device_attribute *attr, char *buf);
+
+static ssize_t synaptics_rmi4_full_pm_cycle_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count);
+
+static void synaptics_rmi4_early_suspend(struct early_suspend *h);
+
+static void synaptics_rmi4_late_resume(struct early_suspend *h);
+
+static int synaptics_rmi4_suspend(struct device *dev);
+
+static int synaptics_rmi4_resume(struct device *dev);
+#endif
+
+static ssize_t synaptics_rmi4_f01_reset_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count);
+
+static ssize_t synaptics_rmi4_f01_productinfo_show(struct device *dev,
+ struct device_attribute *attr, char *buf);
+
+static ssize_t synaptics_rmi4_f01_buildid_show(struct device *dev,
+ struct device_attribute *attr, char *buf);
+
+static ssize_t synaptics_rmi4_f01_flashprog_show(struct device *dev,
+ struct device_attribute *attr, char *buf);
+
+static ssize_t synaptics_rmi4_0dbutton_show(struct device *dev,
+ struct device_attribute *attr, char *buf);
+
+static ssize_t synaptics_rmi4_0dbutton_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count);
+
+struct synaptics_rmi4_f01_device_status {
+ union {
+ struct {
+ unsigned char status_code:4;
+ unsigned char reserved:2;
+ unsigned char flash_prog:1;
+ unsigned char unconfigured:1;
+ } __packed;
+ unsigned char data[1];
+ };
+};
+
+struct synaptics_rmi4_f1a_query {
+ union {
+ struct {
+ unsigned char max_button_count:3;
+ unsigned char reserved:5;
+ unsigned char has_general_control:1;
+ unsigned char has_interrupt_enable:1;
+ unsigned char has_multibutton_select:1;
+ unsigned char has_tx_rx_map:1;
+ unsigned char has_perbutton_threshold:1;
+ unsigned char has_release_threshold:1;
+ unsigned char has_strongestbtn_hysteresis:1;
+ unsigned char has_filter_strength:1;
+ } __packed;
+ unsigned char data[2];
+ };
+};
+
+struct synaptics_rmi4_f1a_control_0 {
+ union {
+ struct {
+ unsigned char multibutton_report:2;
+ unsigned char filter_mode:2;
+ unsigned char reserved:4;
+ } __packed;
+ unsigned char data[1];
+ };
+};
+
+struct synaptics_rmi4_f1a_control_3_4 {
+ unsigned char transmitterbutton;
+ unsigned char receiverbutton;
+};
+
+struct synaptics_rmi4_f1a_control {
+ struct synaptics_rmi4_f1a_control_0 general_control;
+ unsigned char *button_int_enable;
+ unsigned char *multi_button;
+ struct synaptics_rmi4_f1a_control_3_4 *electrode_map;
+ unsigned char *button_threshold;
+ unsigned char button_release_threshold;
+ unsigned char strongest_button_hysteresis;
+ unsigned char filter_strength;
+};
+
+struct synaptics_rmi4_f1a_handle {
+ int button_bitmask_size;
+ unsigned char button_count;
+ unsigned char valid_button_count;
+ unsigned char *button_data_buffer;
+ unsigned char *button_map;
+ struct synaptics_rmi4_f1a_query button_query;
+ struct synaptics_rmi4_f1a_control button_control;
+};
+
+struct synaptics_rmi4_exp_fn {
+ enum exp_fn fn_type;
+ bool inserted;
+ int (*func_init)(struct synaptics_rmi4_data *rmi4_data);
+ void (*func_remove)(struct synaptics_rmi4_data *rmi4_data);
+ void (*func_attn)(struct synaptics_rmi4_data *rmi4_data,
+ unsigned char intr_mask);
+ struct list_head link;
+};
+
+static struct device_attribute attrs[] = {
+#ifdef CONFIG_HAS_EARLYSUSPEND
+ __ATTR(full_pm_cycle, (S_IRUGO | S_IWUGO),
+ synaptics_rmi4_full_pm_cycle_show,
+ synaptics_rmi4_full_pm_cycle_store),
+#endif
+ __ATTR(reset, S_IWUGO,
+ synaptics_rmi4_show_error,
+ synaptics_rmi4_f01_reset_store),
+ __ATTR(productinfo, S_IRUGO,
+ synaptics_rmi4_f01_productinfo_show,
+ synaptics_rmi4_store_error),
+ __ATTR(buildid, S_IRUGO,
+ synaptics_rmi4_f01_buildid_show,
+ synaptics_rmi4_store_error),
+ __ATTR(flashprog, S_IRUGO,
+ synaptics_rmi4_f01_flashprog_show,
+ synaptics_rmi4_store_error),
+ __ATTR(0dbutton, (S_IRUGO | S_IWUGO),
+ synaptics_rmi4_0dbutton_show,
+ synaptics_rmi4_0dbutton_store),
+};
+
+static bool exp_fn_inited;
+static struct mutex exp_fn_list_mutex;
+static struct list_head exp_fn_list;
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+static ssize_t synaptics_rmi4_full_pm_cycle_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct synaptics_rmi4_data *rmi4_data = dev_get_drvdata(dev);
+
+ return snprintf(buf, PAGE_SIZE, "%u\n",
+ rmi4_data->full_pm_cycle);
+}
+
+static ssize_t synaptics_rmi4_full_pm_cycle_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ unsigned int input;
+ struct synaptics_rmi4_data *rmi4_data = dev_get_drvdata(dev);
+
+ if (sscanf(buf, "%u", &input) != 1)
+ return -EINVAL;
+
+ rmi4_data->full_pm_cycle = input > 0 ? 1 : 0;
+
+ return count;
+}
+#endif
+
+static ssize_t synaptics_rmi4_f01_reset_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ int retval;
+ unsigned int reset;
+ struct synaptics_rmi4_data *rmi4_data = dev_get_drvdata(dev);
+
+ if (sscanf(buf, "%u", &reset) != 1)
+ return -EINVAL;
+
+ if (reset != 1)
+ return -EINVAL;
+
+ retval = synaptics_rmi4_reset_device(rmi4_data);
+ if (retval < 0) {
+ dev_err(dev,
+ "%s: Failed to issue reset command, error = %d\n",
+ __func__, retval);
+ return retval;
+ }
+
+ return count;
+}
+
+static ssize_t synaptics_rmi4_f01_productinfo_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct synaptics_rmi4_data *rmi4_data = dev_get_drvdata(dev);
+
+ return snprintf(buf, PAGE_SIZE, "0x%02x 0x%02x\n",
+ (rmi4_data->rmi4_mod_info.product_info[0]),
+ (rmi4_data->rmi4_mod_info.product_info[1]));
+}
+
+static ssize_t synaptics_rmi4_f01_buildid_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ unsigned int build_id;
+ struct synaptics_rmi4_data *rmi4_data = dev_get_drvdata(dev);
+ struct synaptics_rmi4_device_info *rmi;
+
+ rmi = &(rmi4_data->rmi4_mod_info);
+
+ build_id = (unsigned int)rmi->build_id[0] +
+ (unsigned int)rmi->build_id[1] * 0x100 +
+ (unsigned int)rmi->build_id[2] * 0x10000;
+
+ return snprintf(buf, PAGE_SIZE, "%u\n",
+ build_id);
+}
+
+static ssize_t synaptics_rmi4_f01_flashprog_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int retval;
+ struct synaptics_rmi4_f01_device_status device_status;
+ struct synaptics_rmi4_data *rmi4_data = dev_get_drvdata(dev);
+
+ retval = synaptics_rmi4_i2c_read(rmi4_data,
+ rmi4_data->f01_data_base_addr,
+ device_status.data,
+ sizeof(device_status.data));
+ if (retval < 0) {
+ dev_err(dev,
+ "%s: Failed to read device status, error = %d\n",
+ __func__, retval);
+ return retval;
+ }
+
+ return snprintf(buf, PAGE_SIZE, "%u\n",
+ device_status.flash_prog);
+}
+
+static ssize_t synaptics_rmi4_0dbutton_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct synaptics_rmi4_data *rmi4_data = dev_get_drvdata(dev);
+
+ return snprintf(buf, PAGE_SIZE, "%u\n",
+ rmi4_data->button_0d_enabled);
+}
+
+static ssize_t synaptics_rmi4_0dbutton_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ int retval;
+ unsigned int input;
+ unsigned char ii;
+ unsigned char intr_enable;
+ struct synaptics_rmi4_fn *fhandler;
+ struct synaptics_rmi4_data *rmi4_data = dev_get_drvdata(dev);
+ struct synaptics_rmi4_device_info *rmi;
+
+ rmi = &(rmi4_data->rmi4_mod_info);
+
+ if (sscanf(buf, "%u", &input) != 1)
+ return -EINVAL;
+
+ input = input > 0 ? 1 : 0;
+
+ if (rmi4_data->button_0d_enabled == input)
+ return count;
+
+ if (!list_empty(&rmi->support_fn_list)) {
+ list_for_each_entry(fhandler, &rmi->support_fn_list, link) {
+ if (fhandler->fn_number == SYNAPTICS_RMI4_F1A) {
+ ii = fhandler->intr_reg_num;
+
+ retval = synaptics_rmi4_i2c_read(rmi4_data,
+ rmi4_data->f01_ctrl_base_addr +
+ 1 + ii,
+ &intr_enable,
+ sizeof(intr_enable));
+ if (retval < 0)
+ return retval;
+
+ if (input == 1)
+ intr_enable |= fhandler->intr_mask;
+ else
+ intr_enable &= ~fhandler->intr_mask;
+
+ retval = synaptics_rmi4_i2c_write(rmi4_data,
+ rmi4_data->f01_ctrl_base_addr +
+ 1 + ii,
+ &intr_enable,
+ sizeof(intr_enable));
+ if (retval < 0)
+ return retval;
+ }
+ }
+ }
+
+ rmi4_data->button_0d_enabled = input;
+
+ return count;
+}
+
+ /**
+ * synaptics_rmi4_set_page()
+ *
+ * Called by synaptics_rmi4_i2c_read() and synaptics_rmi4_i2c_write().
+ *
+ * This function writes to the page select register to switch to the
+ * assigned page.
+ */
+static int synaptics_rmi4_set_page(struct synaptics_rmi4_data *rmi4_data,
+ unsigned int address)
+{
+ int retval = 0;
+ unsigned char retry;
+ unsigned char buf[PAGE_SELECT_LEN];
+ unsigned char page;
+ struct i2c_client *i2c = rmi4_data->i2c_client;
+
+ page = ((address >> 8) & MASK_8BIT);
+ if (page != rmi4_data->current_page) {
+ buf[0] = MASK_8BIT;
+ buf[1] = page;
+ for (retry = 0; retry < SYN_I2C_RETRY_TIMES; retry++) {
+ retval = i2c_master_send(i2c, buf, PAGE_SELECT_LEN);
+ if (retval != PAGE_SELECT_LEN) {
+ dev_err(&i2c->dev,
+ "%s: I2C retry %d\n",
+ __func__, retry + 1);
+ msleep(20);
+ } else {
+ rmi4_data->current_page = page;
+ break;
+ }
+ }
+ } else
+ return PAGE_SELECT_LEN;
+ return (retval == PAGE_SELECT_LEN) ? retval : -EIO;
+}
+
+ /**
+ * synaptics_rmi4_i2c_read()
+ *
+ * Called by various functions in this driver, and also exported to
+ * other expansion Function modules such as rmi_dev.
+ *
+ * This function reads data of an arbitrary length from the sensor,
+ * starting from an assigned register address of the sensor, via I2C
+ * with a retry mechanism.
+ */
+static int synaptics_rmi4_i2c_read(struct synaptics_rmi4_data *rmi4_data,
+ unsigned short addr, unsigned char *data, unsigned short length)
+{
+ int retval;
+ unsigned char retry;
+ unsigned char buf;
+ struct i2c_msg msg[] = {
+ {
+ .addr = rmi4_data->i2c_client->addr,
+ .flags = 0,
+ .len = 1,
+ .buf = &buf,
+ },
+ {
+ .addr = rmi4_data->i2c_client->addr,
+ .flags = I2C_M_RD,
+ .len = length,
+ .buf = data,
+ },
+ };
+
+ buf = addr & MASK_8BIT;
+
+ mutex_lock(&(rmi4_data->rmi4_io_ctrl_mutex));
+
+ retval = synaptics_rmi4_set_page(rmi4_data, addr);
+ if (retval != PAGE_SELECT_LEN)
+ goto exit;
+
+ for (retry = 0; retry < SYN_I2C_RETRY_TIMES; retry++) {
+ if (i2c_transfer(rmi4_data->i2c_client->adapter, msg, 2) == 2) {
+ retval = length;
+ break;
+ }
+ dev_err(&rmi4_data->i2c_client->dev,
+ "%s: I2C retry %d\n",
+ __func__, retry + 1);
+ msleep(20);
+ }
+
+ if (retry == SYN_I2C_RETRY_TIMES) {
+ dev_err(&rmi4_data->i2c_client->dev,
+ "%s: I2C read over retry limit\n",
+ __func__);
+ retval = -EIO;
+ }
+
+exit:
+ mutex_unlock(&(rmi4_data->rmi4_io_ctrl_mutex));
+
+ return retval;
+}
+
+ /**
+ * synaptics_rmi4_i2c_write()
+ *
+ * Called by various functions in this driver, and also exported to
+ * other expansion Function modules such as rmi_dev.
+ *
+ * This function writes data of an arbitrary length to the sensor,
+ * starting from an assigned register address of the sensor, via I2C with
+ * a retry mechanism.
+ */
+static int synaptics_rmi4_i2c_write(struct synaptics_rmi4_data *rmi4_data,
+ unsigned short addr, unsigned char *data, unsigned short length)
+{
+ int retval;
+ unsigned char retry;
+ unsigned char buf[length + 1];
+ struct i2c_msg msg[] = {
+ {
+ .addr = rmi4_data->i2c_client->addr,
+ .flags = 0,
+ .len = length + 1,
+ .buf = buf,
+ }
+ };
+
+ mutex_lock(&(rmi4_data->rmi4_io_ctrl_mutex));
+
+ retval = synaptics_rmi4_set_page(rmi4_data, addr);
+ if (retval != PAGE_SELECT_LEN)
+ goto exit;
+
+ buf[0] = addr & MASK_8BIT;
+ memcpy(&buf[1], &data[0], length);
+
+ for (retry = 0; retry < SYN_I2C_RETRY_TIMES; retry++) {
+ if (i2c_transfer(rmi4_data->i2c_client->adapter, msg, 1) == 1) {
+ retval = length;
+ break;
+ }
+ dev_err(&rmi4_data->i2c_client->dev,
+ "%s: I2C retry %d\n",
+ __func__, retry + 1);
+ msleep(20);
+ }
+
+ if (retry == SYN_I2C_RETRY_TIMES) {
+ dev_err(&rmi4_data->i2c_client->dev,
+ "%s: I2C write over retry limit\n",
+ __func__);
+ retval = -EIO;
+ }
+
+exit:
+ mutex_unlock(&(rmi4_data->rmi4_io_ctrl_mutex));
+
+ return retval;
+}
+
+ /**
+ * synaptics_rmi4_f11_abs_report()
+ *
+ * Called by synaptics_rmi4_report_touch() when valid Function $11
+ * finger data has been detected.
+ *
+ * This function reads the Function $11 data registers, determines the
+ * status of each finger supported by the Function, processes any
+ * necessary coordinate manipulation, reports the finger data to
+ * the input subsystem, and returns the number of fingers detected.
+ */
+static int synaptics_rmi4_f11_abs_report(struct synaptics_rmi4_data *rmi4_data,
+ struct synaptics_rmi4_fn *fhandler)
+{
+ int retval;
+ unsigned char touch_count = 0; /* number of touch points */
+ unsigned char reg_index;
+ unsigned char finger;
+ unsigned char fingers_supported;
+ unsigned char num_of_finger_status_regs;
+ unsigned char finger_shift;
+ unsigned char finger_status;
+ unsigned char data_reg_blk_size;
+ unsigned char finger_status_reg[3];
+ unsigned char data[F11_STD_DATA_LEN];
+ unsigned short data_addr;
+ unsigned short data_offset;
+ int x;
+ int y;
+ int wx;
+ int wy;
+
+ /*
+ * The number of finger status registers is determined by the
+ * maximum number of fingers supported - 2 bits per finger. So
+ * the number of finger status registers to read is:
+ * register_count = ceil(max_num_of_fingers / 4)
+ */
+ fingers_supported = fhandler->num_of_data_points;
+ num_of_finger_status_regs = (fingers_supported + 3) / 4;
+ data_addr = fhandler->full_addr.data_base;
+ data_reg_blk_size = fhandler->size_of_data_register_block;
+
+ retval = synaptics_rmi4_i2c_read(rmi4_data,
+ data_addr,
+ finger_status_reg,
+ num_of_finger_status_regs);
+ if (retval < 0)
+ return 0;
+
+ for (finger = 0; finger < fingers_supported; finger++) {
+ reg_index = finger / 4;
+ finger_shift = (finger % 4) * 2;
+ finger_status = (finger_status_reg[reg_index] >> finger_shift)
+ & MASK_2BIT;
+
+ /*
+ * Each 2-bit finger status field represents the following:
+ * 00 = finger not present
+ * 01 = finger present and data accurate
+ * 10 = finger present but data may be inaccurate
+ * 11 = reserved
+ */
+#ifdef TYPE_B_PROTOCOL
+ input_mt_slot(rmi4_data->input_dev, finger);
+ input_mt_report_slot_state(rmi4_data->input_dev,
+ MT_TOOL_FINGER, finger_status != 0);
+#endif
+
+ if (finger_status) {
+ data_offset = data_addr +
+ num_of_finger_status_regs +
+ (finger * data_reg_blk_size);
+ retval = synaptics_rmi4_i2c_read(rmi4_data,
+ data_offset,
+ data,
+ data_reg_blk_size);
+ if (retval < 0)
+ return 0;
+
+ x = (data[0] << 4) | (data[2] & MASK_4BIT);
+ y = (data[1] << 4) | ((data[2] >> 4) & MASK_4BIT);
+ wx = (data[3] & MASK_4BIT);
+ wy = (data[3] >> 4) & MASK_4BIT;
+
+ if (rmi4_data->board->x_flip)
+ x = rmi4_data->sensor_max_x - x;
+ if (rmi4_data->board->y_flip)
+ y = rmi4_data->sensor_max_y - y;
+
+ dev_dbg(&rmi4_data->i2c_client->dev,
+ "%s: Finger %d:\n"
+ "status = 0x%02x\n"
+ "x = %d\n"
+ "y = %d\n"
+ "wx = %d\n"
+ "wy = %d\n",
+ __func__, finger,
+ finger_status,
+ x, y, wx, wy);
+
+ input_report_key(rmi4_data->input_dev,
+ BTN_TOUCH, 1);
+ input_report_key(rmi4_data->input_dev,
+ BTN_TOOL_FINGER, 1);
+ input_report_abs(rmi4_data->input_dev,
+ ABS_MT_POSITION_X, x);
+ input_report_abs(rmi4_data->input_dev,
+ ABS_MT_POSITION_Y, y);
+
+#ifdef REPORT_2D_W
+ input_report_abs(rmi4_data->input_dev,
+ ABS_MT_TOUCH_MAJOR, max(wx, wy));
+ input_report_abs(rmi4_data->input_dev,
+ ABS_MT_TOUCH_MINOR, min(wx, wy));
+#endif
+#ifndef TYPE_B_PROTOCOL
+ input_mt_sync(rmi4_data->input_dev);
+#endif
+ touch_count++;
+ }
+ }
+
+#ifndef TYPE_B_PROTOCOL
+ if (!touch_count)
+ input_mt_sync(rmi4_data->input_dev);
+#else
+ /* sync after groups of events */
+ #ifdef KERNEL_ABOVE_3_7
+ input_mt_sync_frame(rmi4_data->input_dev);
+ #endif
+#endif
+
+ input_sync(rmi4_data->input_dev);
+
+ return touch_count;
+}
+
+static void synaptics_rmi4_f1a_report(struct synaptics_rmi4_data *rmi4_data,
+ struct synaptics_rmi4_fn *fhandler)
+{
+ int retval;
+ unsigned char button;
+ unsigned char index;
+ unsigned char shift;
+ unsigned char status;
+ unsigned char *data;
+ unsigned short data_addr = fhandler->full_addr.data_base;
+ struct synaptics_rmi4_f1a_handle *f1a = fhandler->data;
+ static unsigned char do_once = 1;
+ static bool current_status[MAX_NUMBER_OF_BUTTONS];
+#ifdef NO_0D_WHILE_2D
+ static bool before_2d_status[MAX_NUMBER_OF_BUTTONS];
+ static bool while_2d_status[MAX_NUMBER_OF_BUTTONS];
+#endif
+
+ if (do_once) {
+ memset(current_status, 0, sizeof(current_status));
+#ifdef NO_0D_WHILE_2D
+ memset(before_2d_status, 0, sizeof(before_2d_status));
+ memset(while_2d_status, 0, sizeof(while_2d_status));
+#endif
+ do_once = 0;
+ }
+
+ retval = synaptics_rmi4_i2c_read(rmi4_data,
+ data_addr,
+ f1a->button_data_buffer,
+ f1a->button_bitmask_size);
+ if (retval < 0) {
+ dev_err(&rmi4_data->i2c_client->dev,
+ "%s: Failed to read button data registers\n",
+ __func__);
+ return;
+ }
+
+ data = f1a->button_data_buffer;
+
+ for (button = 0; button < f1a->valid_button_count; button++) {
+ index = button / 8;
+ shift = button % 8;
+ status = ((data[index] >> shift) & MASK_1BIT);
+
+ if (current_status[button] == status)
+ continue;
+ else
+ current_status[button] = status;
+
+ dev_dbg(&rmi4_data->i2c_client->dev,
+ "%s: Button %d (code %d) ->%d\n",
+ __func__, button,
+ f1a->button_map[button],
+ status);
+#ifdef NO_0D_WHILE_2D
+ if (rmi4_data->fingers_on_2d == false) {
+ if (status == 1) {
+ before_2d_status[button] = 1;
+ } else {
+ if (while_2d_status[button] == 1) {
+ while_2d_status[button] = 0;
+ continue;
+ } else {
+ before_2d_status[button] = 0;
+ }
+ }
+ input_report_key(rmi4_data->input_dev,
+ f1a->button_map[button],
+ status);
+ } else {
+ if (before_2d_status[button] == 1) {
+ before_2d_status[button] = 0;
+ input_report_key(rmi4_data->input_dev,
+ f1a->button_map[button],
+ status);
+ } else {
+ if (status == 1)
+ while_2d_status[button] = 1;
+ else
+ while_2d_status[button] = 0;
+ }
+ }
+#else
+ input_report_key(rmi4_data->input_dev,
+ f1a->button_map[button],
+ status);
+#endif
+ }
+
+ input_sync(rmi4_data->input_dev);
+
+ return;
+}
+
+ /**
+ * synaptics_rmi4_report_touch()
+ *
+ * Called by synaptics_rmi4_sensor_report().
+ *
+ * This function calls the appropriate finger data reporting function
+ * based on the function handler it receives and returns the number of
+ * fingers detected.
+ */
+static void synaptics_rmi4_report_touch(struct synaptics_rmi4_data *rmi4_data,
+ struct synaptics_rmi4_fn *fhandler,
+ unsigned char *touch_count)
+{
+ unsigned char touch_count_2d;
+
+ dev_dbg(&rmi4_data->i2c_client->dev,
+ "%s: Function %02x reporting\n",
+ __func__, fhandler->fn_number);
+
+ switch (fhandler->fn_number) {
+ case SYNAPTICS_RMI4_F11:
+ touch_count_2d = synaptics_rmi4_f11_abs_report(rmi4_data,
+ fhandler);
+
+ *touch_count += touch_count_2d;
+
+ if (touch_count_2d)
+ rmi4_data->fingers_on_2d = true;
+ else
+ rmi4_data->fingers_on_2d = false;
+ break;
+
+ case SYNAPTICS_RMI4_F1A:
+ synaptics_rmi4_f1a_report(rmi4_data, fhandler);
+ break;
+
+ default:
+ break;
+ }
+
+ return;
+}
+
+ /**
+ * synaptics_rmi4_sensor_report()
+ *
+ * Called by synaptics_rmi4_irq().
+ *
+ * This function determines the interrupt source(s) from the sensor
+ * and calls synaptics_rmi4_report_touch() with the appropriate
+ * function handler for each function with valid data inputs.
+ */
+static int synaptics_rmi4_sensor_report(struct synaptics_rmi4_data *rmi4_data)
+{
+ int retval;
+ unsigned char touch_count = 0;
+ unsigned char intr[MAX_INTR_REGISTERS];
+ struct synaptics_rmi4_fn *fhandler;
+ struct synaptics_rmi4_exp_fn *exp_fhandler;
+ struct synaptics_rmi4_device_info *rmi;
+
+ rmi = &(rmi4_data->rmi4_mod_info);
+
+ /*
+ * Get interrupt status information from F01 Data1 register to
+ * determine the source(s) that are flagging the interrupt.
+ */
+ retval = synaptics_rmi4_i2c_read(rmi4_data,
+ rmi4_data->f01_data_base_addr + 1,
+ intr,
+ rmi4_data->num_of_intr_regs);
+ if (retval < 0)
+ return retval;
+
+ /*
+ * Traverse the function handler list and service the source(s)
+ * of the interrupt accordingly.
+ */
+ if (!list_empty(&rmi->support_fn_list)) {
+ list_for_each_entry(fhandler, &rmi->support_fn_list, link) {
+ if (fhandler->num_of_data_sources) {
+ if (fhandler->intr_mask &
+ intr[fhandler->intr_reg_num]) {
+ synaptics_rmi4_report_touch(rmi4_data,
+ fhandler, &touch_count);
+ }
+ }
+ }
+ }
+
+ mutex_lock(&exp_fn_list_mutex);
+ if (!list_empty(&exp_fn_list)) {
+ list_for_each_entry(exp_fhandler, &exp_fn_list, link) {
+ if (exp_fhandler->inserted &&
+ (exp_fhandler->func_attn != NULL))
+ exp_fhandler->func_attn(rmi4_data, intr[0]);
+ }
+ }
+ mutex_unlock(&exp_fn_list_mutex);
+
+ return touch_count;
+}
+
+ /**
+ * synaptics_rmi4_irq()
+ *
+ * Called by the kernel when an interrupt occurs (when the sensor
+ * asserts the attention irq).
+ *
+ * This function is the ISR thread and handles the acquisition
+ * and the reporting of finger data when the presence of fingers
+ * is detected.
+ */
+static irqreturn_t synaptics_rmi4_irq(int irq, void *data)
+{
+ struct synaptics_rmi4_data *rmi4_data = data;
+
+ synaptics_rmi4_sensor_report(rmi4_data);
+
+ return IRQ_HANDLED;
+}
+
+ /**
+ * synaptics_rmi4_irq_enable()
+ *
+ * Called by synaptics_rmi4_probe() and the power management functions
+ * in this driver and also exported to other expansion Function modules
+ * such as rmi_dev.
+ *
+ * This function handles the enabling and disabling of the attention
+ * irq including the setting up of the ISR thread.
+ */
+static int synaptics_rmi4_irq_enable(struct synaptics_rmi4_data *rmi4_data,
+ bool enable)
+{
+ int retval = 0;
+ unsigned char intr_status;
+ const struct synaptics_rmi4_platform_data *platform_data =
+ rmi4_data->i2c_client->dev.platform_data;
+
+ if (enable) {
+ if (rmi4_data->irq_enabled)
+ return retval;
+
+ /* Clear interrupts first */
+ retval = synaptics_rmi4_i2c_read(rmi4_data,
+ rmi4_data->f01_data_base_addr + 1,
+ &intr_status,
+ rmi4_data->num_of_intr_regs);
+ if (retval < 0)
+ return retval;
+
+ retval = request_threaded_irq(rmi4_data->irq, NULL,
+ synaptics_rmi4_irq, platform_data->irq_flags,
+ DRIVER_NAME, rmi4_data);
+ if (retval < 0) {
+ dev_err(&rmi4_data->i2c_client->dev,
+ "%s: Failed to create irq thread\n",
+ __func__);
+ return retval;
+ }
+
+ rmi4_data->irq_enabled = true;
+ } else {
+ if (rmi4_data->irq_enabled) {
+ disable_irq(rmi4_data->irq);
+ free_irq(rmi4_data->irq, rmi4_data);
+ rmi4_data->irq_enabled = false;
+ }
+ }
+
+ return retval;
+}
+
+ /**
+ * synaptics_rmi4_f11_init()
+ *
+ * Called by synaptics_rmi4_query_device().
+ *
+ * This funtion parses information from the Function 11 registers
+ * and determines the number of fingers supported, x and y data ranges,
+ * offset to the associated interrupt status register, interrupt bit
+ * mask, and gathers finger data acquisition capabilities from the query
+ * registers.
+ */
+static int synaptics_rmi4_f11_init(struct synaptics_rmi4_data *rmi4_data,
+ struct synaptics_rmi4_fn *fhandler,
+ struct synaptics_rmi4_fn_desc *fd,
+ unsigned int intr_count)
+{
+ int retval;
+ unsigned char ii;
+ unsigned char intr_offset;
+ unsigned char abs_data_size;
+ unsigned char abs_data_blk_size;
+ unsigned char query[F11_STD_QUERY_LEN];
+ unsigned char control[F11_STD_CTRL_LEN];
+
+ fhandler->fn_number = fd->fn_number;
+ fhandler->num_of_data_sources = fd->intr_src_count;
+
+ retval = synaptics_rmi4_i2c_read(rmi4_data,
+ fhandler->full_addr.query_base,
+ query,
+ sizeof(query));
+ if (retval < 0)
+ return retval;
+
+ /* Maximum number of fingers supported */
+ if ((query[1] & MASK_3BIT) <= 4)
+ fhandler->num_of_data_points = (query[1] & MASK_3BIT) + 1;
+ else if ((query[1] & MASK_3BIT) == 5)
+ fhandler->num_of_data_points = 10;
+
+ rmi4_data->num_of_fingers = fhandler->num_of_data_points;
+
+ retval = synaptics_rmi4_i2c_read(rmi4_data,
+ fhandler->full_addr.ctrl_base,
+ control,
+ sizeof(control));
+ if (retval < 0)
+ return retval;
+
+ /* Maximum x and y */
+ rmi4_data->sensor_max_x = ((control[6] & MASK_8BIT) << 0) |
+ ((control[7] & MASK_4BIT) << 8);
+ rmi4_data->sensor_max_y = ((control[8] & MASK_8BIT) << 0) |
+ ((control[9] & MASK_4BIT) << 8);
+ dev_dbg(&rmi4_data->i2c_client->dev,
+ "%s: Function %02x max x = %d max y = %d\n",
+ __func__, fhandler->fn_number,
+ rmi4_data->sensor_max_x,
+ rmi4_data->sensor_max_y);
+
+ fhandler->intr_reg_num = (intr_count + 7) / 8;
+ if (fhandler->intr_reg_num != 0)
+ fhandler->intr_reg_num -= 1;
+
+ /* Set an enable bit for each data source */
+ intr_offset = intr_count % 8;
+ fhandler->intr_mask = 0;
+ for (ii = intr_offset;
+ ii < ((fd->intr_src_count & MASK_3BIT) +
+ intr_offset);
+ ii++)
+ fhandler->intr_mask |= 1 << ii;
+
+ abs_data_size = query[5] & MASK_2BIT;
+ abs_data_blk_size = 3 + (2 * (abs_data_size == 0 ? 1 : 0));
+ fhandler->size_of_data_register_block = abs_data_blk_size;
+
+ return retval;
+}
+
+static int synaptics_rmi4_f1a_alloc_mem(struct synaptics_rmi4_data *rmi4_data,
+ struct synaptics_rmi4_fn *fhandler)
+{
+ int retval;
+ struct synaptics_rmi4_f1a_handle *f1a;
+
+ f1a = kzalloc(sizeof(*f1a), GFP_KERNEL);
+ if (!f1a) {
+ dev_err(&rmi4_data->i2c_client->dev,
+ "%s: Failed to alloc mem for function handle\n",
+ __func__);
+ return -ENOMEM;
+ }
+
+ fhandler->data = (void *)f1a;
+
+ retval = synaptics_rmi4_i2c_read(rmi4_data,
+ fhandler->full_addr.query_base,
+ f1a->button_query.data,
+ sizeof(f1a->button_query.data));
+ if (retval < 0) {
+ dev_err(&rmi4_data->i2c_client->dev,
+ "%s: Failed to read query registers\n",
+ __func__);
+ return retval;
+ }
+
+ f1a->button_count = f1a->button_query.max_button_count + 1;
+ f1a->button_bitmask_size = (f1a->button_count + 7) / 8;
+
+ f1a->button_data_buffer = kcalloc(f1a->button_bitmask_size,
+ sizeof(*(f1a->button_data_buffer)), GFP_KERNEL);
+ if (!f1a->button_data_buffer) {
+ dev_err(&rmi4_data->i2c_client->dev,
+ "%s: Failed to alloc mem for data buffer\n",
+ __func__);
+ return -ENOMEM;
+ }
+
+ f1a->button_map = kcalloc(f1a->button_count,
+ sizeof(*(f1a->button_map)), GFP_KERNEL);
+ if (!f1a->button_map) {
+ dev_err(&rmi4_data->i2c_client->dev,
+ "%s: Failed to alloc mem for button map\n",
+ __func__);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int synaptics_rmi4_capacitance_button_map(
+ struct synaptics_rmi4_data *rmi4_data,
+ struct synaptics_rmi4_fn *fhandler)
+{
+ unsigned char ii;
+ struct synaptics_rmi4_f1a_handle *f1a = fhandler->data;
+ const struct synaptics_rmi4_platform_data *pdata = rmi4_data->board;
+
+ if (!pdata->capacitance_button_map) {
+ dev_err(&rmi4_data->i2c_client->dev,
+ "%s: capacitance_button_map is" \
+ "NULL in board file\n",
+ __func__);
+ return -ENODEV;
+ } else if (!pdata->capacitance_button_map->map) {
+ dev_err(&rmi4_data->i2c_client->dev,
+ "%s: Button map is missing in board file\n",
+ __func__);
+ return -ENODEV;
+ } else {
+ if (pdata->capacitance_button_map->nbuttons !=
+ f1a->button_count) {
+ f1a->valid_button_count = min(f1a->button_count,
+ pdata->capacitance_button_map->nbuttons);
+ } else {
+ f1a->valid_button_count = f1a->button_count;
+ }
+
+ for (ii = 0; ii < f1a->valid_button_count; ii++)
+ f1a->button_map[ii] =
+ pdata->capacitance_button_map->map[ii];
+ }
+
+ return 0;
+}
+
+static void synaptics_rmi4_f1a_kfree(struct synaptics_rmi4_fn *fhandler)
+{
+ struct synaptics_rmi4_f1a_handle *f1a = fhandler->data;
+
+ if (f1a) {
+ kfree(f1a->button_data_buffer);
+ kfree(f1a->button_map);
+ kfree(f1a);
+ fhandler->data = NULL;
+ }
+
+ return;
+}
+
+static int synaptics_rmi4_f1a_init(struct synaptics_rmi4_data *rmi4_data,
+ struct synaptics_rmi4_fn *fhandler,
+ struct synaptics_rmi4_fn_desc *fd,
+ unsigned int intr_count)
+{
+ int retval;
+ unsigned char ii;
+ unsigned short intr_offset;
+
+ fhandler->fn_number = fd->fn_number;
+ fhandler->num_of_data_sources = fd->intr_src_count;
+
+ fhandler->intr_reg_num = (intr_count + 7) / 8;
+ if (fhandler->intr_reg_num != 0)
+ fhandler->intr_reg_num -= 1;
+
+ /* Set an enable bit for each data source */
+ intr_offset = intr_count % 8;
+ fhandler->intr_mask = 0;
+ for (ii = intr_offset;
+ ii < ((fd->intr_src_count & MASK_3BIT) +
+ intr_offset);
+ ii++)
+ fhandler->intr_mask |= 1 << ii;
+
+ retval = synaptics_rmi4_f1a_alloc_mem(rmi4_data, fhandler);
+ if (retval < 0)
+ goto error_exit;
+
+ retval = synaptics_rmi4_capacitance_button_map(rmi4_data, fhandler);
+ if (retval < 0)
+ goto error_exit;
+
+ rmi4_data->button_0d_enabled = 1;
+
+ return 0;
+
+error_exit:
+ synaptics_rmi4_f1a_kfree(fhandler);
+
+ return retval;
+}
+
+static int synaptics_rmi4_alloc_fh(struct synaptics_rmi4_fn **fhandler,
+ struct synaptics_rmi4_fn_desc *rmi_fd, int page_number)
+{
+ *fhandler = kmalloc(sizeof(**fhandler), GFP_KERNEL);
+ if (!(*fhandler))
+ return -ENOMEM;
+
+ (*fhandler)->full_addr.data_base =
+ (rmi_fd->data_base_addr |
+ (page_number << 8));
+ (*fhandler)->full_addr.ctrl_base =
+ (rmi_fd->ctrl_base_addr |
+ (page_number << 8));
+ (*fhandler)->full_addr.cmd_base =
+ (rmi_fd->cmd_base_addr |
+ (page_number << 8));
+ (*fhandler)->full_addr.query_base =
+ (rmi_fd->query_base_addr |
+ (page_number << 8));
+
+ return 0;
+}
+
+
+ /**
+ * synaptics_rmi4_query_device_info()
+ *
+ * Called by synaptics_rmi4_query_device().
+ *
+ */
+static int synaptics_rmi4_query_device_info(
+ struct synaptics_rmi4_data *rmi4_data)
+{
+ int retval;
+ unsigned char f01_query[F01_STD_QUERY_LEN];
+ struct synaptics_rmi4_device_info *rmi = &(rmi4_data->rmi4_mod_info);
+
+ retval = synaptics_rmi4_i2c_read(rmi4_data,
+ rmi4_data->f01_query_base_addr,
+ f01_query,
+ sizeof(f01_query));
+ if (retval < 0)
+ return retval;
+
+ /* RMI Version 4.0 currently supported */
+ rmi->version_major = 4;
+ rmi->version_minor = 0;
+
+ rmi->manufacturer_id = f01_query[0];
+ rmi->product_props = f01_query[1];
+ rmi->product_info[0] = f01_query[2] & MASK_7BIT;
+ rmi->product_info[1] = f01_query[3] & MASK_7BIT;
+ rmi->date_code[0] = f01_query[4] & MASK_5BIT;
+ rmi->date_code[1] = f01_query[5] & MASK_4BIT;
+ rmi->date_code[2] = f01_query[6] & MASK_5BIT;
+ rmi->tester_id = ((f01_query[7] & MASK_7BIT) << 8) |
+ (f01_query[8] & MASK_7BIT);
+ rmi->serial_number = ((f01_query[9] & MASK_7BIT) << 8) |
+ (f01_query[10] & MASK_7BIT);
+ memcpy(rmi->product_id_string, &f01_query[11], 10);
+
+ if (rmi->manufacturer_id != 1) {
+ dev_err(&rmi4_data->i2c_client->dev,
+ "%s: Non-Synaptics device found, manufacturer ID = %d\n",
+ __func__, rmi->manufacturer_id);
+ }
+
+ retval = synaptics_rmi4_i2c_read(rmi4_data,
+ rmi4_data->f01_query_base_addr + F01_BUID_ID_OFFSET,
+ rmi->build_id,
+ sizeof(rmi->build_id));
+ if (retval < 0) {
+ dev_err(&rmi4_data->i2c_client->dev,
+ "%s: Failed to read firmware build id (code %d)\n",
+ __func__, retval);
+ return retval;
+ }
+ return retval;
+}
+
+ /**
+ * synaptics_rmi4_query_device()
+ *
+ * Called by synaptics_rmi4_probe().
+ *
+ * This funtion scans the page description table, records the offsets
+ * to the register types of Function $01, sets up the function handlers
+ * for Function $11 and Function $12, determines the number of interrupt
+ * sources from the sensor, adds valid Functions with data inputs to the
+ * Function linked list, parses information from the query registers of
+ * Function $01, and enables the interrupt sources from the valid Functions
+ * with data inputs.
+ */
+static int synaptics_rmi4_query_device(struct synaptics_rmi4_data *rmi4_data)
+{
+ int retval;
+ unsigned char ii;
+ unsigned char page_number;
+ unsigned char intr_count = 0;
+ unsigned char data_sources = 0;
+ unsigned short pdt_entry_addr;
+ unsigned short intr_addr;
+ struct synaptics_rmi4_f01_device_status status;
+ struct synaptics_rmi4_fn_desc rmi_fd;
+ struct synaptics_rmi4_fn *fhandler;
+ struct synaptics_rmi4_device_info *rmi;
+
+ rmi = &(rmi4_data->rmi4_mod_info);
+
+ INIT_LIST_HEAD(&rmi->support_fn_list);
+
+ /* Scan the page description tables of the pages to service */
+ for (page_number = 0; page_number < PAGES_TO_SERVICE; page_number++) {
+ for (pdt_entry_addr = PDT_START; pdt_entry_addr > PDT_END;
+ pdt_entry_addr -= PDT_ENTRY_SIZE) {
+ pdt_entry_addr |= (page_number << 8);
+
+ retval = synaptics_rmi4_i2c_read(rmi4_data,
+ pdt_entry_addr,
+ (unsigned char *)&rmi_fd,
+ sizeof(rmi_fd));
+ if (retval < 0)
+ return retval;
+
+ fhandler = NULL;
+
+ if (rmi_fd.fn_number == 0) {
+ dev_dbg(&rmi4_data->i2c_client->dev,
+ "%s: Reached end of PDT\n",
+ __func__);
+ break;
+ }
+
+ dev_dbg(&rmi4_data->i2c_client->dev,
+ "%s: F%02x found (page %d)\n",
+ __func__, rmi_fd.fn_number,
+ page_number);
+
+ switch (rmi_fd.fn_number) {
+ case SYNAPTICS_RMI4_F01:
+ rmi4_data->f01_query_base_addr =
+ rmi_fd.query_base_addr;
+ rmi4_data->f01_ctrl_base_addr =
+ rmi_fd.ctrl_base_addr;
+ rmi4_data->f01_data_base_addr =
+ rmi_fd.data_base_addr;
+ rmi4_data->f01_cmd_base_addr =
+ rmi_fd.cmd_base_addr;
+
+ retval =
+ synaptics_rmi4_query_device_info(rmi4_data);
+ if (retval < 0)
+ return retval;
+
+ retval = synaptics_rmi4_i2c_read(rmi4_data,
+ rmi4_data->f01_data_base_addr,
+ status.data,
+ sizeof(status.data));
+ if (retval < 0)
+ return retval;
+
+ if (status.flash_prog == 1) {
+ pr_notice("%s: In flash prog mode, status = 0x%02x\n",
+ __func__,
+ status.status_code);
+ goto flash_prog_mode;
+ }
+ break;
+
+ case SYNAPTICS_RMI4_F34:
+ retval = synaptics_rmi4_i2c_read(rmi4_data,
+ rmi_fd.ctrl_base_addr,
+ rmi->config_id,
+ sizeof(rmi->config_id));
+ if (retval < 0)
+ return retval;
+ break;
+
+ case SYNAPTICS_RMI4_F11:
+ if (rmi_fd.intr_src_count == 0)
+ break;
+
+ retval = synaptics_rmi4_alloc_fh(&fhandler,
+ &rmi_fd, page_number);
+ if (retval < 0) {
+ dev_err(&rmi4_data->i2c_client->dev,
+ "%s: Failed to alloc for F%d\n",
+ __func__,
+ rmi_fd.fn_number);
+ return retval;
+ }
+
+ retval = synaptics_rmi4_f11_init(rmi4_data,
+ fhandler, &rmi_fd, intr_count);
+ if (retval < 0)
+ return retval;
+ break;
+
+ case SYNAPTICS_RMI4_F1A:
+ if (rmi_fd.intr_src_count == 0)
+ break;
+
+ retval = synaptics_rmi4_alloc_fh(&fhandler,
+ &rmi_fd, page_number);
+ if (retval < 0) {
+ dev_err(&rmi4_data->i2c_client->dev,
+ "%s: Failed to alloc for F%d\n",
+ __func__,
+ rmi_fd.fn_number);
+ return retval;
+ }
+
+ retval = synaptics_rmi4_f1a_init(rmi4_data,
+ fhandler, &rmi_fd, intr_count);
+ if (retval < 0)
+ return retval;
+ break;
+ }
+
+ /* Accumulate the interrupt count */
+ intr_count += (rmi_fd.intr_src_count & MASK_3BIT);
+
+ if (fhandler && rmi_fd.intr_src_count) {
+ list_add_tail(&fhandler->link,
+ &rmi->support_fn_list);
+ }
+ }
+ }
+
+flash_prog_mode:
+ rmi4_data->num_of_intr_regs = (intr_count + 7) / 8;
+ dev_dbg(&rmi4_data->i2c_client->dev,
+ "%s: Number of interrupt registers = %d\n",
+ __func__, rmi4_data->num_of_intr_regs);
+
+ memset(rmi4_data->intr_mask, 0x00, sizeof(rmi4_data->intr_mask));
+
+ /*
+ * Map out the interrupt bit masks for the interrupt sources
+ * from the registered function handlers.
+ */
+ if (!list_empty(&rmi->support_fn_list)) {
+ list_for_each_entry(fhandler, &rmi->support_fn_list, link)
+ data_sources += fhandler->num_of_data_sources;
+ }
+ if (data_sources) {
+ if (!list_empty(&rmi->support_fn_list)) {
+ list_for_each_entry(fhandler,
+ &rmi->support_fn_list, link) {
+ if (fhandler->num_of_data_sources) {
+ rmi4_data->intr_mask[fhandler->intr_reg_num] |=
+ fhandler->intr_mask;
+ }
+ }
+ }
+ }
+
+ /* Enable the interrupt sources */
+ for (ii = 0; ii < rmi4_data->num_of_intr_regs; ii++) {
+ if (rmi4_data->intr_mask[ii] != 0x00) {
+ dev_dbg(&rmi4_data->i2c_client->dev,
+ "%s: Interrupt enable mask %d = 0x%02x\n",
+ __func__, ii, rmi4_data->intr_mask[ii]);
+ intr_addr = rmi4_data->f01_ctrl_base_addr + 1 + ii;
+ retval = synaptics_rmi4_i2c_write(rmi4_data,
+ intr_addr,
+ &(rmi4_data->intr_mask[ii]),
+ sizeof(rmi4_data->intr_mask[ii]));
+ if (retval < 0)
+ return retval;
+ }
+ }
+
+ return 0;
+}
+
+static int synaptics_rmi4_reset_device(struct synaptics_rmi4_data *rmi4_data)
+{
+ int retval;
+ unsigned char command = 0x01;
+ struct synaptics_rmi4_fn *fhandler;
+ struct synaptics_rmi4_device_info *rmi;
+
+ rmi = &(rmi4_data->rmi4_mod_info);
+
+ retval = synaptics_rmi4_i2c_write(rmi4_data,
+ rmi4_data->f01_cmd_base_addr,
+ &command,
+ sizeof(command));
+ if (retval < 0) {
+ dev_err(&rmi4_data->i2c_client->dev,
+ "%s: Failed to issue reset command, error = %d\n",
+ __func__, retval);
+ return retval;
+ }
+
+ msleep(100);
+
+ if (!list_empty(&rmi->support_fn_list)) {
+ list_for_each_entry(fhandler, &rmi->support_fn_list, link) {
+ if (fhandler->fn_number == SYNAPTICS_RMI4_F1A)
+ synaptics_rmi4_f1a_kfree(fhandler);
+ else
+ kfree(fhandler->data);
+ kfree(fhandler);
+ }
+ }
+
+ retval = synaptics_rmi4_query_device(rmi4_data);
+ if (retval < 0) {
+ dev_err(&rmi4_data->i2c_client->dev,
+ "%s: Failed to query device\n",
+ __func__);
+ return retval;
+ }
+
+ return 0;
+}
+
+/**
+* synaptics_rmi4_detection_work()
+*
+* Called by the kernel at the scheduled time.
+*
+* This function is a self-rearming work thread that checks for the
+* insertion and removal of other expansion Function modules such as
+* rmi_dev and calls their initialization and removal callback functions
+* accordingly.
+*/
+static void synaptics_rmi4_detection_work(struct work_struct *work)
+{
+ struct synaptics_rmi4_exp_fn *exp_fhandler, *next_list_entry;
+ struct synaptics_rmi4_data *rmi4_data =
+ container_of(work, struct synaptics_rmi4_data,
+ det_work.work);
+
+ queue_delayed_work(rmi4_data->det_workqueue,
+ &rmi4_data->det_work,
+ msecs_to_jiffies(EXP_FN_DET_INTERVAL));
+
+ mutex_lock(&exp_fn_list_mutex);
+ if (!list_empty(&exp_fn_list)) {
+ list_for_each_entry_safe(exp_fhandler,
+ next_list_entry,
+ &exp_fn_list,
+ link) {
+ if ((exp_fhandler->func_init != NULL) &&
+ (exp_fhandler->inserted == false)) {
+ exp_fhandler->func_init(rmi4_data);
+ exp_fhandler->inserted = true;
+ } else if ((exp_fhandler->func_init == NULL) &&
+ (exp_fhandler->inserted == true)) {
+ exp_fhandler->func_remove(rmi4_data);
+ list_del(&exp_fhandler->link);
+ kfree(exp_fhandler);
+ }
+ }
+ }
+ mutex_unlock(&exp_fn_list_mutex);
+
+ return;
+}
+
+/**
+* synaptics_rmi4_new_function()
+*
+* Called by other expansion Function modules in their module init and
+* module exit functions.
+*
+* This function is used by other expansion Function modules such as
+* rmi_dev to register themselves with the driver by providing their
+* initialization and removal callback function pointers so that they
+* can be inserted or removed dynamically at module init and exit times,
+* respectively.
+*/
+void synaptics_rmi4_new_function(enum exp_fn fn_type, bool insert,
+ int (*func_init)(struct synaptics_rmi4_data *rmi4_data),
+ void (*func_remove)(struct synaptics_rmi4_data *rmi4_data),
+ void (*func_attn)(struct synaptics_rmi4_data *rmi4_data,
+ unsigned char intr_mask))
+{
+ struct synaptics_rmi4_exp_fn *exp_fhandler;
+
+ if (!exp_fn_inited) {
+ mutex_init(&exp_fn_list_mutex);
+ INIT_LIST_HEAD(&exp_fn_list);
+ exp_fn_inited = 1;
+ }
+
+ mutex_lock(&exp_fn_list_mutex);
+ if (insert) {
+ exp_fhandler = kzalloc(sizeof(*exp_fhandler), GFP_KERNEL);
+ if (!exp_fhandler) {
+ pr_err("%s: Failed to alloc mem for expansion function\n",
+ __func__);
+ goto exit;
+ }
+ exp_fhandler->fn_type = fn_type;
+ exp_fhandler->func_init = func_init;
+ exp_fhandler->func_attn = func_attn;
+ exp_fhandler->func_remove = func_remove;
+ exp_fhandler->inserted = false;
+ list_add_tail(&exp_fhandler->link, &exp_fn_list);
+ } else {
+ if (!list_empty(&exp_fn_list)) {
+ list_for_each_entry(exp_fhandler, &exp_fn_list, link) {
+ if (exp_fhandler->func_init == func_init) {
+ exp_fhandler->inserted = false;
+ exp_fhandler->func_init = NULL;
+ exp_fhandler->func_attn = NULL;
+ goto exit;
+ }
+ }
+ }
+ }
+
+exit:
+ mutex_unlock(&exp_fn_list_mutex);
+
+ return;
+}
+EXPORT_SYMBOL(synaptics_rmi4_new_function);
+
+ /**
+ * synaptics_rmi4_probe()
+ *
+ * Called by the kernel when an association with an I2C device of the
+ * same name is made (after doing i2c_add_driver).
+ *
+ * This funtion allocates and initializes the resources for the driver
+ * as an input driver, turns on the power to the sensor, queries the
+ * sensor for its supported Functions and characteristics, registers
+ * the driver to the input subsystem, sets up the interrupt, handles
+ * the registration of the early_suspend and late_resume functions,
+ * and creates a work queue for detection of other expansion Function
+ * modules.
+ */
+static int __devinit synaptics_rmi4_probe(struct i2c_client *client,
+ const struct i2c_device_id *dev_id)
+{
+ int retval;
+ unsigned char ii;
+ unsigned char attr_count;
+ struct synaptics_rmi4_f1a_handle *f1a;
+ struct synaptics_rmi4_fn *fhandler;
+ struct synaptics_rmi4_data *rmi4_data;
+ struct synaptics_rmi4_device_info *rmi;
+ const struct synaptics_rmi4_platform_data *platform_data =
+ client->dev.platform_data;
+
+ if (!i2c_check_functionality(client->adapter,
+ I2C_FUNC_SMBUS_BYTE_DATA)) {
+ dev_err(&client->dev,
+ "%s: SMBus byte data not supported\n",
+ __func__);
+ return -EIO;
+ }
+
+ if (!platform_data) {
+ dev_err(&client->dev,
+ "%s: No platform data found\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ rmi4_data = kzalloc(sizeof(*rmi4_data) * 2, GFP_KERNEL);
+ if (!rmi4_data) {
+ dev_err(&client->dev,
+ "%s: Failed to alloc mem for rmi4_data\n",
+ __func__);
+ return -ENOMEM;
+ }
+
+ rmi = &(rmi4_data->rmi4_mod_info);
+
+ rmi4_data->input_dev = input_allocate_device();
+ if (rmi4_data->input_dev == NULL) {
+ dev_err(&client->dev,
+ "%s: Failed to allocate input device\n",
+ __func__);
+ retval = -ENOMEM;
+ goto err_input_device;
+ }
+
+ if (platform_data->regulator_en) {
+ rmi4_data->regulator = regulator_get(&client->dev, "vdd");
+ if (IS_ERR(rmi4_data->regulator)) {
+ dev_err(&client->dev,
+ "%s: Failed to get regulator\n",
+ __func__);
+ retval = PTR_ERR(rmi4_data->regulator);
+ goto err_regulator;
+ }
+ regulator_enable(rmi4_data->regulator);
+ }
+
+ rmi4_data->i2c_client = client;
+ rmi4_data->current_page = MASK_8BIT;
+ rmi4_data->board = platform_data;
+ rmi4_data->touch_stopped = false;
+ rmi4_data->sensor_sleep = false;
+ rmi4_data->irq_enabled = false;
+
+ rmi4_data->i2c_read = synaptics_rmi4_i2c_read;
+ rmi4_data->i2c_write = synaptics_rmi4_i2c_write;
+ rmi4_data->irq_enable = synaptics_rmi4_irq_enable;
+ rmi4_data->reset_device = synaptics_rmi4_reset_device;
+
+ init_waitqueue_head(&rmi4_data->wait);
+ mutex_init(&(rmi4_data->rmi4_io_ctrl_mutex));
+
+ retval = synaptics_rmi4_query_device(rmi4_data);
+ if (retval < 0) {
+ dev_err(&client->dev,
+ "%s: Failed to query device\n",
+ __func__);
+ goto err_query_device;
+ }
+
+ i2c_set_clientdata(client, rmi4_data);
+
+ rmi4_data->input_dev->name = DRIVER_NAME;
+ rmi4_data->input_dev->phys = INPUT_PHYS_NAME;
+ rmi4_data->input_dev->id.bustype = BUS_I2C;
+ rmi4_data->input_dev->id.product = SYNAPTICS_RMI4_DRIVER_PRODUCT;
+ rmi4_data->input_dev->id.version = SYNAPTICS_RMI4_DRIVER_VERSION;
+ rmi4_data->input_dev->dev.parent = &client->dev;
+ input_set_drvdata(rmi4_data->input_dev, rmi4_data);
+
+ set_bit(EV_SYN, rmi4_data->input_dev->evbit);
+ set_bit(EV_KEY, rmi4_data->input_dev->evbit);
+ set_bit(EV_ABS, rmi4_data->input_dev->evbit);
+ set_bit(BTN_TOUCH, rmi4_data->input_dev->keybit);
+ set_bit(BTN_TOOL_FINGER, rmi4_data->input_dev->keybit);
+
+#ifdef INPUT_PROP_DIRECT
+ set_bit(INPUT_PROP_DIRECT, rmi4_data->input_dev->propbit);
+#endif
+
+ input_set_abs_params(rmi4_data->input_dev,
+ ABS_MT_POSITION_X, 0,
+ rmi4_data->sensor_max_x, 0, 0);
+ input_set_abs_params(rmi4_data->input_dev,
+ ABS_MT_POSITION_Y, 0,
+ rmi4_data->sensor_max_y, 0, 0);
+#ifdef REPORT_2D_W
+ input_set_abs_params(rmi4_data->input_dev,
+ ABS_MT_TOUCH_MAJOR, 0,
+ MAX_ABS_MT_TOUCH_MAJOR, 0, 0);
+#endif
+
+#ifdef TYPE_B_PROTOCOL
+ input_mt_init_slots(rmi4_data->input_dev,
+ rmi4_data->num_of_fingers);
+#endif
+
+ f1a = NULL;
+ if (!list_empty(&rmi->support_fn_list)) {
+ list_for_each_entry(fhandler, &rmi->support_fn_list, link) {
+ if (fhandler->fn_number == SYNAPTICS_RMI4_F1A)
+ f1a = fhandler->data;
+ }
+ }
+
+ if (f1a) {
+ for (ii = 0; ii < f1a->valid_button_count; ii++) {
+ set_bit(f1a->button_map[ii],
+ rmi4_data->input_dev->keybit);
+ input_set_capability(rmi4_data->input_dev,
+ EV_KEY, f1a->button_map[ii]);
+ }
+ }
+
+ retval = input_register_device(rmi4_data->input_dev);
+ if (retval) {
+ dev_err(&client->dev,
+ "%s: Failed to register input device\n",
+ __func__);
+ goto err_register_input;
+ }
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+ rmi4_data->early_suspend.level = EARLY_SUSPEND_LEVEL_BLANK_SCREEN + 1;
+ rmi4_data->early_suspend.suspend = synaptics_rmi4_early_suspend;
+ rmi4_data->early_suspend.resume = synaptics_rmi4_late_resume;
+ register_early_suspend(&rmi4_data->early_suspend);
+#endif
+
+ if (!exp_fn_inited) {
+ mutex_init(&exp_fn_list_mutex);
+ INIT_LIST_HEAD(&exp_fn_list);
+ exp_fn_inited = 1;
+ }
+
+ rmi4_data->det_workqueue =
+ create_singlethread_workqueue("rmi_det_workqueue");
+ INIT_DELAYED_WORK(&rmi4_data->det_work,
+ synaptics_rmi4_detection_work);
+ queue_delayed_work(rmi4_data->det_workqueue,
+ &rmi4_data->det_work,
+ msecs_to_jiffies(EXP_FN_DET_INTERVAL));
+
+ if (platform_data->gpio_config) {
+ retval = platform_data->gpio_config(platform_data->irq_gpio,
+ true);
+ if (retval < 0) {
+ dev_err(&client->dev,
+ "%s: Failed to configure GPIO\n",
+ __func__);
+ goto err_gpio;
+ }
+ }
+
+ rmi4_data->irq = gpio_to_irq(platform_data->irq_gpio);
+
+ retval = synaptics_rmi4_irq_enable(rmi4_data, true);
+ if (retval < 0) {
+ dev_err(&client->dev,
+ "%s: Failed to enable attention interrupt\n",
+ __func__);
+ goto err_enable_irq;
+ }
+
+ for (attr_count = 0; attr_count < ARRAY_SIZE(attrs); attr_count++) {
+ retval = sysfs_create_file(&rmi4_data->input_dev->dev.kobj,
+ &attrs[attr_count].attr);
+ if (retval < 0) {
+ dev_err(&client->dev,
+ "%s: Failed to create sysfs attributes\n",
+ __func__);
+ goto err_sysfs;
+ }
+ }
+
+ return retval;
+
+err_sysfs:
+ for (attr_count--; attr_count >= 0; attr_count--) {
+ sysfs_remove_file(&rmi4_data->input_dev->dev.kobj,
+ &attrs[attr_count].attr);
+ }
+
+err_enable_irq:
+err_gpio:
+ input_unregister_device(rmi4_data->input_dev);
+
+err_register_input:
+err_query_device:
+ if (platform_data->regulator_en) {
+ regulator_disable(rmi4_data->regulator);
+ regulator_put(rmi4_data->regulator);
+ }
+
+ if (!list_empty(&rmi->support_fn_list)) {
+ list_for_each_entry(fhandler, &rmi->support_fn_list, link) {
+ if (fhandler->fn_number == SYNAPTICS_RMI4_F1A)
+ synaptics_rmi4_f1a_kfree(fhandler);
+ else
+ kfree(fhandler->data);
+ kfree(fhandler);
+ }
+ }
+
+err_regulator:
+ input_free_device(rmi4_data->input_dev);
+ rmi4_data->input_dev = NULL;
+
+err_input_device:
+ kfree(rmi4_data);
+
+ return retval;
+}
+
+ /**
+ * synaptics_rmi4_remove()
+ *
+ * Called by the kernel when the association with an I2C device of the
+ * same name is broken (when the driver is unloaded).
+ *
+ * This funtion terminates the work queue, stops sensor data acquisition,
+ * frees the interrupt, unregisters the driver from the input subsystem,
+ * turns off the power to the sensor, and frees other allocated resources.
+ */
+static int __devexit synaptics_rmi4_remove(struct i2c_client *client)
+{
+ unsigned char attr_count;
+ struct synaptics_rmi4_fn *fhandler;
+ struct synaptics_rmi4_data *rmi4_data = i2c_get_clientdata(client);
+ struct synaptics_rmi4_device_info *rmi;
+ const struct synaptics_rmi4_platform_data *platform_data =
+ rmi4_data->board;
+
+ rmi = &(rmi4_data->rmi4_mod_info);
+
+ cancel_delayed_work_sync(&rmi4_data->det_work);
+ flush_workqueue(rmi4_data->det_workqueue);
+ destroy_workqueue(rmi4_data->det_workqueue);
+
+ rmi4_data->touch_stopped = true;
+ wake_up(&rmi4_data->wait);
+
+ synaptics_rmi4_irq_enable(rmi4_data, false);
+
+ for (attr_count = 0; attr_count < ARRAY_SIZE(attrs); attr_count++) {
+ sysfs_remove_file(&rmi4_data->input_dev->dev.kobj,
+ &attrs[attr_count].attr);
+ }
+
+ input_unregister_device(rmi4_data->input_dev);
+
+ if (platform_data->regulator_en) {
+ regulator_disable(rmi4_data->regulator);
+ regulator_put(rmi4_data->regulator);
+ }
+
+ if (!list_empty(&rmi->support_fn_list)) {
+ list_for_each_entry(fhandler, &rmi->support_fn_list, link) {
+ if (fhandler->fn_number == SYNAPTICS_RMI4_F1A)
+ synaptics_rmi4_f1a_kfree(fhandler);
+ else
+ kfree(fhandler->data);
+ kfree(fhandler);
+ }
+ }
+ input_free_device(rmi4_data->input_dev);
+
+ kfree(rmi4_data);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+ /**
+ * synaptics_rmi4_sensor_sleep()
+ *
+ * Called by synaptics_rmi4_early_suspend() and synaptics_rmi4_suspend().
+ *
+ * This function stops finger data acquisition and puts the sensor to sleep.
+ */
+static void synaptics_rmi4_sensor_sleep(struct synaptics_rmi4_data *rmi4_data)
+{
+ int retval;
+ unsigned char device_ctrl;
+
+ retval = synaptics_rmi4_i2c_read(rmi4_data,
+ rmi4_data->f01_ctrl_base_addr,
+ &device_ctrl,
+ sizeof(device_ctrl));
+ if (retval < 0) {
+ dev_err(&(rmi4_data->input_dev->dev),
+ "%s: Failed to enter sleep mode\n",
+ __func__);
+ rmi4_data->sensor_sleep = false;
+ return;
+ }
+
+ device_ctrl = (device_ctrl & ~MASK_3BIT);
+ device_ctrl = (device_ctrl | NO_SLEEP_OFF | SENSOR_SLEEP);
+
+ retval = synaptics_rmi4_i2c_write(rmi4_data,
+ rmi4_data->f01_ctrl_base_addr,
+ &device_ctrl,
+ sizeof(device_ctrl));
+ if (retval < 0) {
+ dev_err(&(rmi4_data->input_dev->dev),
+ "%s: Failed to enter sleep mode\n",
+ __func__);
+ rmi4_data->sensor_sleep = false;
+ return;
+ } else {
+ rmi4_data->sensor_sleep = true;
+ }
+
+ return;
+}
+
+ /**
+ * synaptics_rmi4_sensor_wake()
+ *
+ * Called by synaptics_rmi4_resume() and synaptics_rmi4_late_resume().
+ *
+ * This function wakes the sensor from sleep.
+ */
+static void synaptics_rmi4_sensor_wake(struct synaptics_rmi4_data *rmi4_data)
+{
+ int retval;
+ unsigned char device_ctrl;
+
+ retval = synaptics_rmi4_i2c_read(rmi4_data,
+ rmi4_data->f01_ctrl_base_addr,
+ &device_ctrl,
+ sizeof(device_ctrl));
+ if (retval < 0) {
+ dev_err(&(rmi4_data->input_dev->dev),
+ "%s: Failed to wake from sleep mode\n",
+ __func__);
+ rmi4_data->sensor_sleep = true;
+ return;
+ }
+
+ device_ctrl = (device_ctrl & ~MASK_3BIT);
+ device_ctrl = (device_ctrl | NO_SLEEP_OFF | NORMAL_OPERATION);
+
+ retval = synaptics_rmi4_i2c_write(rmi4_data,
+ rmi4_data->f01_ctrl_base_addr,
+ &device_ctrl,
+ sizeof(device_ctrl));
+ if (retval < 0) {
+ dev_err(&(rmi4_data->input_dev->dev),
+ "%s: Failed to wake from sleep mode\n",
+ __func__);
+ rmi4_data->sensor_sleep = true;
+ return;
+ } else {
+ rmi4_data->sensor_sleep = false;
+ }
+
+ return;
+}
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+ /**
+ * synaptics_rmi4_early_suspend()
+ *
+ * Called by the kernel during the early suspend phase when the system
+ * enters suspend.
+ *
+ * This function calls synaptics_rmi4_sensor_sleep() to stop finger
+ * data acquisition and put the sensor to sleep.
+ */
+static void synaptics_rmi4_early_suspend(struct early_suspend *h)
+{
+ struct synaptics_rmi4_data *rmi4_data =
+ container_of(h, struct synaptics_rmi4_data,
+ early_suspend);
+
+ rmi4_data->touch_stopped = true;
+ wake_up(&rmi4_data->wait);
+ synaptics_rmi4_irq_enable(rmi4_data, false);
+ synaptics_rmi4_sensor_sleep(rmi4_data);
+
+ if (rmi4_data->full_pm_cycle)
+ synaptics_rmi4_suspend(&(rmi4_data->input_dev->dev));
+
+ return;
+}
+
+ /**
+ * synaptics_rmi4_late_resume()
+ *
+ * Called by the kernel during the late resume phase when the system
+ * wakes up from suspend.
+ *
+ * This function goes through the sensor wake process if the system wakes
+ * up from early suspend (without going into suspend).
+ */
+static void synaptics_rmi4_late_resume(struct early_suspend *h)
+{
+ struct synaptics_rmi4_data *rmi4_data =
+ container_of(h, struct synaptics_rmi4_data,
+ early_suspend);
+
+ if (rmi4_data->full_pm_cycle)
+ synaptics_rmi4_resume(&(rmi4_data->input_dev->dev));
+
+ if (rmi4_data->sensor_sleep == true) {
+ synaptics_rmi4_sensor_wake(rmi4_data);
+ rmi4_data->touch_stopped = false;
+ synaptics_rmi4_irq_enable(rmi4_data, true);
+ }
+
+ return;
+}
+#endif
+
+ /**
+ * synaptics_rmi4_suspend()
+ *
+ * Called by the kernel during the suspend phase when the system
+ * enters suspend.
+ *
+ * This function stops finger data acquisition and puts the sensor to
+ * sleep (if not already done so during the early suspend phase),
+ * disables the interrupt, and turns off the power to the sensor.
+ */
+static int synaptics_rmi4_suspend(struct device *dev)
+{
+ struct synaptics_rmi4_data *rmi4_data = dev_get_drvdata(dev);
+ const struct synaptics_rmi4_platform_data *platform_data =
+ rmi4_data->board;
+
+ if (!rmi4_data->sensor_sleep) {
+ rmi4_data->touch_stopped = true;
+ wake_up(&rmi4_data->wait);
+ synaptics_rmi4_irq_enable(rmi4_data, false);
+ synaptics_rmi4_sensor_sleep(rmi4_data);
+ }
+
+ if (platform_data->regulator_en)
+ regulator_disable(rmi4_data->regulator);
+
+ return 0;
+}
+
+ /**
+ * synaptics_rmi4_resume()
+ *
+ * Called by the kernel during the resume phase when the system
+ * wakes up from suspend.
+ *
+ * This function turns on the power to the sensor, wakes the sensor
+ * from sleep, enables the interrupt, and starts finger data
+ * acquisition.
+ */
+static int synaptics_rmi4_resume(struct device *dev)
+{
+ struct synaptics_rmi4_data *rmi4_data = dev_get_drvdata(dev);
+ const struct synaptics_rmi4_platform_data *platform_data =
+ rmi4_data->board;
+
+ if (platform_data->regulator_en)
+ regulator_enable(rmi4_data->regulator);
+
+ synaptics_rmi4_sensor_wake(rmi4_data);
+ rmi4_data->touch_stopped = false;
+ synaptics_rmi4_irq_enable(rmi4_data, true);
+
+ return 0;
+}
+
+static const struct dev_pm_ops synaptics_rmi4_dev_pm_ops = {
+ .suspend = synaptics_rmi4_suspend,
+ .resume = synaptics_rmi4_resume,
+};
+#endif
+
+static const struct i2c_device_id synaptics_rmi4_id_table[] = {
+ {DRIVER_NAME, 0},
+ {},
+};
+MODULE_DEVICE_TABLE(i2c, synaptics_rmi4_id_table);
+
+static struct i2c_driver synaptics_rmi4_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ .owner = THIS_MODULE,
+#ifdef CONFIG_PM
+ .pm = &synaptics_rmi4_dev_pm_ops,
+#endif
+ },
+ .probe = synaptics_rmi4_probe,
+ .remove = __devexit_p(synaptics_rmi4_remove),
+ .id_table = synaptics_rmi4_id_table,
+};
+
+ /**
+ * synaptics_rmi4_init()
+ *
+ * Called by the kernel during do_initcalls (if built-in)
+ * or when the driver is loaded (if a module).
+ *
+ * This function registers the driver to the I2C subsystem.
+ *
+ */
+static int __init synaptics_rmi4_init(void)
+{
+ return i2c_add_driver(&synaptics_rmi4_driver);
+}
+
+ /**
+ * synaptics_rmi4_exit()
+ *
+ * Called by the kernel when the driver is unloaded.
+ *
+ * This funtion unregisters the driver from the I2C subsystem.
+ *
+ */
+static void __exit synaptics_rmi4_exit(void)
+{
+ i2c_del_driver(&synaptics_rmi4_driver);
+}
+
+module_init(synaptics_rmi4_init);
+module_exit(synaptics_rmi4_exit);
+
+MODULE_AUTHOR("Synaptics, Inc.");
+MODULE_DESCRIPTION("Synaptics RMI4 I2C Touch Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_VERSION(SYNAPTICS_RMI4_DRIVER_VERSION);
diff --git a/kernel/drivers/input/touchscreen/synaptics_i2c_rmi4.h b/kernel/drivers/input/touchscreen/synaptics_i2c_rmi4.h
new file mode 100644
index 000000000000..ecb9b9415e8a
--- /dev/null
+++ b/kernel/drivers/input/touchscreen/synaptics_i2c_rmi4.h
@@ -0,0 +1,286 @@
+/*
+ * Synaptics RMI4 touchscreen driver
+ *
+ * Copyright (C) 2012 Synaptics Incorporated
+ *
+ * Copyright (C) 2012 Alexandra Chin <alexandra.chin@tw.synaptics.com>
+ * Copyright (C) 2012 Scott Lin <scott.lin@tw.synaptics.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _SYNAPTICS_DSX_RMI4_H_
+#define _SYNAPTICS_DSX_RMI4_H_
+
+#define SYNAPTICS_RMI4_DS4 0x0001
+#define SYNAPTICS_RMI4_DS5 0x0002
+#define SYNAPTICS_RMI4_DRIVER_PRODUCT SYNAPTICS_RMI4_DS4
+#define SYNAPTICS_RMI4_DRIVER_VERSION 0x1001
+
+#include <linux/version.h>
+#ifdef CONFIG_HAS_EARLYSUSPEND
+#include <linux/earlysuspend.h>
+#endif
+
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 38))
+#define KERNEL_ABOVE_2_6_38
+#endif
+
+#ifdef KERNEL_ABOVE_2_6_38
+#define sstrtoul(...) kstrtoul(__VA_ARGS__)
+#else
+#define sstrtoul(...) strict_strtoul(__VA_ARGS__)
+#endif
+
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(3, 7, 0))
+#define KERNEL_ABOVE_3_7
+#endif
+
+#define PDT_PROPS (0x00EF)
+#define PDT_START (0x00E9)
+#define PDT_END (0x000A)
+#define PDT_ENTRY_SIZE (0x0006)
+#define PAGES_TO_SERVICE (10)
+#define PAGE_SELECT_LEN (2)
+
+#define SYNAPTICS_RMI4_F01 (0x01)
+#define SYNAPTICS_RMI4_F11 (0x11)
+#define SYNAPTICS_RMI4_F1A (0x1a)
+#define SYNAPTICS_RMI4_F34 (0x34)
+#define SYNAPTICS_RMI4_F54 (0x54)
+#define SYNAPTICS_RMI4_F55 (0x55)
+
+#define SYNAPTICS_RMI4_PRODUCT_INFO_SIZE 2
+#define SYNAPTICS_RMI4_DATE_CODE_SIZE 3
+#define SYNAPTICS_RMI4_PRODUCT_ID_SIZE 10
+#define SYNAPTICS_RMI4_BUILD_ID_SIZE 3
+
+#define MAX_NUMBER_OF_FINGERS 10
+#define MAX_NUMBER_OF_BUTTONS 4
+#define MAX_INTR_REGISTERS 4
+
+#define MASK_16BIT 0xFFFF
+#define MASK_8BIT 0xFF
+#define MASK_7BIT 0x7F
+#define MASK_6BIT 0x3F
+#define MASK_5BIT 0x1F
+#define MASK_4BIT 0x0F
+#define MASK_3BIT 0x07
+#define MASK_2BIT 0x03
+#define MASK_1BIT 0x01
+
+/*
+ * struct synaptics_rmi4_fn_desc - function descriptor fields in PDT
+ * @query_base_addr: base address for query registers
+ * @cmd_base_addr: base address for command registers
+ * @ctrl_base_addr: base address for control registers
+ * @data_base_addr: base address for data registers
+ * @intr_src_count: number of interrupt sources
+ * @fn_number: function number
+ */
+struct synaptics_rmi4_fn_desc {
+ unsigned char query_base_addr;
+ unsigned char cmd_base_addr;
+ unsigned char ctrl_base_addr;
+ unsigned char data_base_addr;
+ unsigned char intr_src_count;
+ unsigned char fn_number;
+};
+
+/*
+ * synaptics_rmi4_fn_full_addr - full 16-bit base addresses
+ * @query_base: 16-bit base address for query registers
+ * @cmd_base: 16-bit base address for data registers
+ * @ctrl_base: 16-bit base address for command registers
+ * @data_base: 16-bit base address for control registers
+ */
+struct synaptics_rmi4_fn_full_addr {
+ unsigned short query_base;
+ unsigned short cmd_base;
+ unsigned short ctrl_base;
+ unsigned short data_base;
+};
+
+/*
+ * struct synaptics_rmi4_fn - function handler data structure
+ * @fn_number: function number
+ * @num_of_data_sources: number of data sources
+ * @num_of_data_points: maximum number of fingers supported
+ * @size_of_data_register_block: data register block size
+ * @data1_offset: offset to data1 register from data base address
+ * @intr_reg_num: index to associated interrupt register
+ * @intr_mask: interrupt mask
+ * @full_addr: full 16-bit base addresses of function registers
+ * @link: linked list for function handlers
+ * @data_size: size of private data
+ * @data: pointer to private data
+ */
+struct synaptics_rmi4_fn {
+ unsigned char fn_number;
+ unsigned char num_of_data_sources;
+ unsigned char num_of_data_points;
+ unsigned char size_of_data_register_block;
+ unsigned char data1_offset;
+ unsigned char intr_reg_num;
+ unsigned char intr_mask;
+ struct synaptics_rmi4_fn_full_addr full_addr;
+ struct list_head link;
+ int data_size;
+ void *data;
+};
+
+/*
+ * struct synaptics_rmi4_device_info - device information
+ * @version_major: rmi protocol major version number
+ * @version_minor: rmi protocol minor version number
+ * @manufacturer_id: manufacturer id
+ * @product_props: product properties information
+ * @product_info: product info array
+ * @date_code: device manufacture date
+ * @tester_id: tester id array
+ * @serial_number: device serial number
+ * @product_id_string: device product id
+ * @support_fn_list: linked list for function handlers
+ */
+struct synaptics_rmi4_device_info {
+ unsigned int version_major;
+ unsigned int version_minor;
+ unsigned char manufacturer_id;
+ unsigned char product_props;
+ unsigned char product_info[SYNAPTICS_RMI4_PRODUCT_INFO_SIZE];
+ unsigned char date_code[SYNAPTICS_RMI4_DATE_CODE_SIZE];
+ unsigned short tester_id;
+ unsigned short serial_number;
+ unsigned char product_id_string[SYNAPTICS_RMI4_PRODUCT_ID_SIZE + 1];
+ unsigned char build_id[SYNAPTICS_RMI4_BUILD_ID_SIZE];
+ unsigned char config_id[3];
+ struct list_head support_fn_list;
+};
+
+/*
+ * struct synaptics_rmi4_data - rmi4 device instance data
+ * @i2c_client: pointer to associated i2c client
+ * @input_dev: pointer to associated input device
+ * @board: constant pointer to platform data
+ * @rmi4_mod_info: device information
+ * @regulator: pointer to associated regulator
+ * @rmi4_io_ctrl_mutex: mutex for i2c i/o control
+ * @det_work: work thread instance for expansion function detection
+ * @det_workqueue: pointer to work queue for work thread instance
+ * @early_suspend: instance to support early suspend power management
+ * @current_page: current page in sensor to acess
+ * @button_0d_enabled: flag for 0d button support
+ * @full_pm_cycle: flag for full power management cycle in early suspend stage
+ * @num_of_intr_regs: number of interrupt registers
+ * @f01_query_base_addr: query base address for f01
+ * @f01_cmd_base_addr: command base address for f01
+ * @f01_ctrl_base_addr: control base address for f01
+ * @f01_data_base_addr: data base address for f01
+ * @irq: attention interrupt
+ * @sensor_max_x: sensor maximum x value
+ * @sensor_max_y: sensor maximum y value
+ * @irq_enabled: flag for indicating interrupt enable status
+ * @touch_stopped: flag to stop interrupt thread processing
+ * @fingers_on_2d: flag to indicate presence of fingers in 2d area
+ * @sensor_sleep: flag to indicate sleep state of sensor
+ * @wait: wait queue for touch data polling in interrupt thread
+ * @i2c_read: pointer to i2c read function
+ * @i2c_write: pointer to i2c write function
+ * @irq_enable: pointer to irq enable function
+ */
+struct synaptics_rmi4_data {
+ struct i2c_client *i2c_client;
+ struct input_dev *input_dev;
+ const struct synaptics_rmi4_platform_data *board;
+ struct synaptics_rmi4_device_info rmi4_mod_info;
+ struct regulator *regulator;
+ struct mutex rmi4_io_ctrl_mutex;
+ struct delayed_work det_work;
+ struct workqueue_struct *det_workqueue;
+ struct early_suspend early_suspend;
+ unsigned char current_page;
+ unsigned char button_0d_enabled;
+ unsigned char full_pm_cycle;
+ unsigned char num_of_rx;
+ unsigned char num_of_tx;
+ unsigned char num_of_fingers;
+ unsigned char intr_mask[MAX_INTR_REGISTERS];
+ unsigned short num_of_intr_regs;
+ unsigned short f01_query_base_addr;
+ unsigned short f01_cmd_base_addr;
+ unsigned short f01_ctrl_base_addr;
+ unsigned short f01_data_base_addr;
+ int irq;
+ int sensor_max_x;
+ int sensor_max_y;
+ bool irq_enabled;
+ bool touch_stopped;
+ bool fingers_on_2d;
+ bool sensor_sleep;
+ wait_queue_head_t wait;
+ int (*i2c_read)(struct synaptics_rmi4_data *pdata, unsigned short addr,
+ unsigned char *data, unsigned short length);
+ int (*i2c_write)(struct synaptics_rmi4_data *pdata, unsigned short addr,
+ unsigned char *data, unsigned short length);
+ int (*irq_enable)(struct synaptics_rmi4_data *rmi4_data, bool enable);
+ int (*reset_device)(struct synaptics_rmi4_data *rmi4_data);
+};
+
+enum exp_fn {
+ RMI_DEV = 0,
+ RMI_F34,
+ RMI_F54,
+ RMI_FW_UPDATER,
+ RMI_LAST,
+};
+
+struct synaptics_rmi4_exp_fn_ptr {
+ int (*read)(struct synaptics_rmi4_data *rmi4_data, unsigned short addr,
+ unsigned char *data, unsigned short length);
+ int (*write)(struct synaptics_rmi4_data *rmi4_data, unsigned short addr,
+ unsigned char *data, unsigned short length);
+ int (*enable)(struct synaptics_rmi4_data *rmi4_data, bool enable);
+};
+
+void synaptics_rmi4_new_function(enum exp_fn fn_type, bool insert,
+ int (*func_init)(struct synaptics_rmi4_data *rmi4_data),
+ void (*func_remove)(struct synaptics_rmi4_data *rmi4_data),
+ void (*func_attn)(struct synaptics_rmi4_data *rmi4_data,
+ unsigned char intr_mask));
+
+static inline ssize_t synaptics_rmi4_show_error(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ dev_warn(dev, "%s Attempted to read from write-only attribute %s\n",
+ __func__, attr->attr.name);
+ return -EPERM;
+}
+
+static inline ssize_t synaptics_rmi4_store_error(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ dev_warn(dev, "%s Attempted to write to read-only attribute %s\n",
+ __func__, attr->attr.name);
+ return -EPERM;
+}
+
+static inline void batohs(unsigned short *dest, unsigned char *src)
+{
+ *dest = src[1] * 0x100 + src[0];
+}
+
+static inline void hstoba(unsigned char *dest, unsigned short src)
+{
+ dest[0] = src % 0x100;
+ dest[1] = src / 0x100;
+}
+
+#endif
diff --git a/kernel/drivers/input/touchscreen/synaptics_rmi_dev.c b/kernel/drivers/input/touchscreen/synaptics_rmi_dev.c
new file mode 100644
index 000000000000..75857802c97a
--- /dev/null
+++ b/kernel/drivers/input/touchscreen/synaptics_rmi_dev.c
@@ -0,0 +1,710 @@
+/*
+ * Synaptics RMI4 touchscreen driver
+ *
+ * Copyright (C) 2012 Synaptics Incorporated
+ *
+ * Copyright (C) 2012 Alexandra Chin <alexandra.chin@tw.synaptics.com>
+ * Copyright (C) 2012 Scott Lin <scott.lin@tw.synaptics.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/input.h>
+#include <linux/gpio.h>
+#include <linux/uaccess.h>
+#include <linux/cdev.h>
+#include <linux/input/synaptics_dsx.h>
+#include "synaptics_i2c_rmi4.h"
+
+#define CHAR_DEVICE_NAME "rmi"
+#define DEVICE_CLASS_NAME "rmidev"
+#define DEV_NUMBER 1
+#define REG_ADDR_LIMIT 0xFFFF
+
+static ssize_t rmidev_sysfs_open_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count);
+
+static ssize_t rmidev_sysfs_release_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count);
+
+static ssize_t rmidev_sysfs_address_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count);
+
+static ssize_t rmidev_sysfs_length_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count);
+
+static ssize_t rmidev_sysfs_data_show(struct device *dev,
+ struct device_attribute *attr, char *buf);
+
+static ssize_t rmidev_sysfs_data_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count);
+
+struct rmidev_handle {
+ dev_t dev_no;
+ unsigned short address;
+ unsigned int length;
+ struct device dev;
+ struct synaptics_rmi4_data *rmi4_data;
+ struct synaptics_rmi4_exp_fn_ptr *fn_ptr;
+ struct kobject *sysfs_dir;
+ void *data;
+};
+
+struct rmidev_data {
+ int ref_count;
+ struct cdev main_dev;
+ struct class *device_class;
+ struct mutex file_mutex;
+ struct rmidev_handle *rmi_dev;
+};
+
+static struct device_attribute attrs[] = {
+ __ATTR(open, S_IWUGO,
+ synaptics_rmi4_show_error,
+ rmidev_sysfs_open_store),
+ __ATTR(release, S_IWUGO,
+ synaptics_rmi4_show_error,
+ rmidev_sysfs_release_store),
+ __ATTR(address, S_IWUGO,
+ synaptics_rmi4_show_error,
+ rmidev_sysfs_address_store),
+ __ATTR(length, S_IWUGO,
+ synaptics_rmi4_show_error,
+ rmidev_sysfs_length_store),
+ __ATTR(data, (S_IRUGO | S_IWUGO),
+ rmidev_sysfs_data_show,
+ rmidev_sysfs_data_store),
+};
+
+static int rmidev_major_num;
+
+static struct class *rmidev_device_class;
+
+static struct rmidev_handle *rmidev;
+
+static struct completion remove_complete;
+
+static ssize_t rmidev_sysfs_open_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ unsigned int input;
+
+ if (sscanf(buf, "%u", &input) != 1)
+ return -EINVAL;
+
+ if (input != 1)
+ return -EINVAL;
+
+ rmidev->fn_ptr->enable(rmidev->rmi4_data, false);
+ dev_dbg(&rmidev->rmi4_data->i2c_client->dev,
+ "%s: Attention interrupt disabled\n",
+ __func__);
+
+ return count;
+}
+
+static ssize_t rmidev_sysfs_release_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ unsigned int input;
+
+ if (sscanf(buf, "%u", &input) != 1)
+ return -EINVAL;
+
+ if (input != 1)
+ return -EINVAL;
+
+ rmidev->fn_ptr->enable(rmidev->rmi4_data, true);
+ dev_dbg(&rmidev->rmi4_data->i2c_client->dev,
+ "%s: Attention interrupt enabled\n",
+ __func__);
+
+ return count;
+}
+
+static ssize_t rmidev_sysfs_address_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ unsigned int input;
+
+ if (sscanf(buf, "%u", &input) != 1)
+ return -EINVAL;
+
+ if (input > REG_ADDR_LIMIT)
+ return -EINVAL;
+
+ rmidev->address = (unsigned short)input;
+
+ return count;
+}
+
+static ssize_t rmidev_sysfs_length_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ unsigned int input;
+
+ if (sscanf(buf, "%u", &input) != 1)
+ return -EINVAL;
+
+ if (input > REG_ADDR_LIMIT)
+ return -EINVAL;
+
+ rmidev->length = input;
+
+ return count;
+}
+
+static ssize_t rmidev_sysfs_data_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int retval;
+ unsigned int data_length = rmidev->length;
+
+ if (data_length > (REG_ADDR_LIMIT - rmidev->address))
+ data_length = REG_ADDR_LIMIT - rmidev->address;
+
+ if (data_length) {
+ retval = rmidev->fn_ptr->read(rmidev->rmi4_data,
+ rmidev->address,
+ (unsigned char *)buf,
+ data_length);
+ if (retval < 0) {
+ dev_err(&rmidev->rmi4_data->i2c_client->dev,
+ "%s: Failed to read data\n",
+ __func__);
+ return retval;
+ }
+ } else {
+ return -EINVAL;
+ }
+
+ return data_length;
+}
+
+static ssize_t rmidev_sysfs_data_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ int retval;
+ unsigned int data_length = rmidev->length;
+
+ if (data_length > (REG_ADDR_LIMIT - rmidev->address))
+ data_length = REG_ADDR_LIMIT - rmidev->address;
+
+ if (data_length) {
+ retval = rmidev->fn_ptr->write(rmidev->rmi4_data,
+ rmidev->address,
+ (unsigned char *)buf,
+ data_length);
+ if (retval < 0) {
+ dev_err(&rmidev->rmi4_data->i2c_client->dev,
+ "%s: Failed to write data\n",
+ __func__);
+ return retval;
+ }
+ } else {
+ return -EINVAL;
+ }
+
+ return data_length;
+}
+
+/*
+ * rmidev_llseek - used to set up register address
+ *
+ * @filp: file structure for seek
+ * @off: offset
+ * if whence == SEEK_SET,
+ * high 16 bits: page address
+ * low 16 bits: register address
+ * if whence == SEEK_CUR,
+ * offset from current position
+ * if whence == SEEK_END,
+ * offset from end position (0xFFFF)
+ * @whence: SEEK_SET, SEEK_CUR, or SEEK_END
+ */
+static loff_t rmidev_llseek(struct file *filp, loff_t off, int whence)
+{
+ loff_t newpos;
+ struct rmidev_data *dev_data = filp->private_data;
+
+ if (IS_ERR(dev_data)) {
+ pr_err("%s: Pointer of char device data is invalid", __func__);
+ return -EBADF;
+ }
+
+ mutex_lock(&(dev_data->file_mutex));
+
+ switch (whence) {
+ case SEEK_SET:
+ newpos = off;
+ break;
+ case SEEK_CUR:
+ newpos = filp->f_pos + off;
+ break;
+ case SEEK_END:
+ newpos = REG_ADDR_LIMIT + off;
+ break;
+ default:
+ newpos = -EINVAL;
+ goto clean_up;
+ }
+
+ if (newpos < 0 || newpos > REG_ADDR_LIMIT) {
+ dev_err(&rmidev->rmi4_data->i2c_client->dev,
+ "%s: New position 0x%04x is invalid\n",
+ __func__, (unsigned int)newpos);
+ newpos = -EINVAL;
+ goto clean_up;
+ }
+
+ filp->f_pos = newpos;
+
+clean_up:
+ mutex_unlock(&(dev_data->file_mutex));
+
+ return newpos;
+}
+
+/*
+ * rmidev_read: - use to read data from rmi device
+ *
+ * @filp: file structure for read
+ * @buf: user space buffer pointer
+ * @count: number of bytes to read
+ * @f_pos: offset (starting register address)
+ */
+static ssize_t rmidev_read(struct file *filp, char __user *buf,
+ size_t count, loff_t *f_pos)
+{
+ ssize_t retval;
+ unsigned char tmpbuf[count + 1];
+ struct rmidev_data *dev_data = filp->private_data;
+
+ if (IS_ERR(dev_data)) {
+ pr_err("%s: Pointer of char device data is invalid", __func__);
+ return -EBADF;
+ }
+
+ if (count == 0)
+ return 0;
+
+ if (count > (REG_ADDR_LIMIT - *f_pos))
+ count = REG_ADDR_LIMIT - *f_pos;
+
+ mutex_lock(&(dev_data->file_mutex));
+
+ retval = rmidev->fn_ptr->read(rmidev->rmi4_data,
+ *f_pos,
+ tmpbuf,
+ count);
+ if (retval < 0)
+ goto clean_up;
+
+ if (copy_to_user(buf, tmpbuf, count))
+ retval = -EFAULT;
+ else
+ *f_pos += retval;
+
+clean_up:
+ mutex_unlock(&(dev_data->file_mutex));
+
+ return retval;
+}
+
+/*
+ * rmidev_write: - used to write data to rmi device
+ *
+ * @filep: file structure for write
+ * @buf: user space buffer pointer
+ * @count: number of bytes to write
+ * @f_pos: offset (starting register address)
+ */
+static ssize_t rmidev_write(struct file *filp, const char __user *buf,
+ size_t count, loff_t *f_pos)
+{
+ ssize_t retval;
+ unsigned char tmpbuf[count + 1];
+ struct rmidev_data *dev_data = filp->private_data;
+
+ if (IS_ERR(dev_data)) {
+ pr_err("%s: Pointer of char device data is invalid", __func__);
+ return -EBADF;
+ }
+
+ if (count == 0)
+ return 0;
+
+ if (count > (REG_ADDR_LIMIT - *f_pos))
+ count = REG_ADDR_LIMIT - *f_pos;
+
+ if (copy_from_user(tmpbuf, buf, count))
+ return -EFAULT;
+
+ mutex_lock(&(dev_data->file_mutex));
+
+ retval = rmidev->fn_ptr->write(rmidev->rmi4_data,
+ *f_pos,
+ tmpbuf,
+ count);
+ if (retval >= 0)
+ *f_pos += retval;
+
+ mutex_unlock(&(dev_data->file_mutex));
+
+ return retval;
+}
+
+/*
+ * rmidev_open: enable access to rmi device
+ * @inp: inode struture
+ * @filp: file structure
+ */
+static int rmidev_open(struct inode *inp, struct file *filp)
+{
+ int retval = 0;
+ struct rmidev_data *dev_data =
+ container_of(inp->i_cdev, struct rmidev_data, main_dev);
+
+ if (!dev_data)
+ return -EACCES;
+
+ filp->private_data = dev_data;
+
+ mutex_lock(&(dev_data->file_mutex));
+
+ rmidev->fn_ptr->enable(rmidev->rmi4_data, false);
+ dev_dbg(&rmidev->rmi4_data->i2c_client->dev,
+ "%s: Attention interrupt disabled\n",
+ __func__);
+
+ if (dev_data->ref_count < 1)
+ dev_data->ref_count++;
+ else
+ retval = -EACCES;
+
+ mutex_unlock(&(dev_data->file_mutex));
+
+ return retval;
+}
+
+/*
+ * rmidev_release: - release access to rmi device
+ * @inp: inode structure
+ * @filp: file structure
+ */
+static int rmidev_release(struct inode *inp, struct file *filp)
+{
+ struct rmidev_data *dev_data =
+ container_of(inp->i_cdev, struct rmidev_data, main_dev);
+
+ if (!dev_data)
+ return -EACCES;
+
+ mutex_lock(&(dev_data->file_mutex));
+
+ dev_data->ref_count--;
+ if (dev_data->ref_count < 0)
+ dev_data->ref_count = 0;
+
+ rmidev->fn_ptr->enable(rmidev->rmi4_data, true);
+ dev_dbg(&rmidev->rmi4_data->i2c_client->dev,
+ "%s: Attention interrupt enabled\n",
+ __func__);
+
+ mutex_unlock(&(dev_data->file_mutex));
+
+ return 0;
+}
+
+static const struct file_operations rmidev_fops = {
+ .owner = THIS_MODULE,
+ .llseek = rmidev_llseek,
+ .read = rmidev_read,
+ .write = rmidev_write,
+ .open = rmidev_open,
+ .release = rmidev_release,
+};
+
+static void rmidev_device_cleanup(struct rmidev_data *dev_data)
+{
+ dev_t devno;
+
+ if (dev_data) {
+ devno = dev_data->main_dev.dev;
+
+ if (dev_data->device_class)
+ device_destroy(dev_data->device_class, devno);
+
+ cdev_del(&dev_data->main_dev);
+
+ unregister_chrdev_region(devno, 1);
+
+ dev_dbg(&rmidev->rmi4_data->i2c_client->dev,
+ "%s: rmidev device removed\n",
+ __func__);
+ }
+
+ return;
+}
+
+static char *rmi_char_devnode(struct device *dev, mode_t *mode)
+{
+ if (!mode)
+ return NULL;
+
+ *mode = (S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP | S_IROTH | S_IWOTH);
+
+ return kasprintf(GFP_KERNEL, "rmi/%s", dev_name(dev));
+}
+
+static int rmidev_create_device_class(void)
+{
+ rmidev_device_class = class_create(THIS_MODULE, DEVICE_CLASS_NAME);
+
+ if (IS_ERR(rmidev_device_class)) {
+ pr_err("%s: Failed to create /dev/%s\n",
+ __func__, CHAR_DEVICE_NAME);
+ return -ENODEV;
+ }
+
+ rmidev_device_class->devnode = rmi_char_devnode;
+
+ return 0;
+}
+
+static int rmidev_init_device(struct synaptics_rmi4_data *rmi4_data)
+{
+ int retval;
+ dev_t dev_no;
+ unsigned char attr_count;
+ struct rmidev_data *dev_data;
+ struct device *device_ptr;
+
+ rmidev = kzalloc(sizeof(*rmidev), GFP_KERNEL);
+ if (!rmidev) {
+ dev_err(&rmi4_data->i2c_client->dev,
+ "%s: Failed to alloc mem for rmidev\n",
+ __func__);
+ retval = -ENOMEM;
+ goto err_rmidev;
+ }
+
+ rmidev->fn_ptr = kzalloc(sizeof(*(rmidev->fn_ptr)), GFP_KERNEL);
+ if (!rmidev) {
+ dev_err(&rmi4_data->i2c_client->dev,
+ "%s: Failed to alloc mem for fn_ptr\n",
+ __func__);
+ retval = -ENOMEM;
+ goto err_fn_ptr;
+ }
+
+ rmidev->fn_ptr->read = rmi4_data->i2c_read;
+ rmidev->fn_ptr->write = rmi4_data->i2c_write;
+ rmidev->fn_ptr->enable = rmi4_data->irq_enable;
+ rmidev->rmi4_data = rmi4_data;
+
+ retval = rmidev_create_device_class();
+ if (retval < 0) {
+ dev_err(&rmi4_data->i2c_client->dev,
+ "%s: Failed to create device class\n",
+ __func__);
+ goto err_device_class;
+ }
+
+ if (rmidev_major_num) {
+ dev_no = MKDEV(rmidev_major_num, DEV_NUMBER);
+ retval = register_chrdev_region(dev_no, 1, CHAR_DEVICE_NAME);
+ } else {
+ retval = alloc_chrdev_region(&dev_no, 0, 1, CHAR_DEVICE_NAME);
+ if (retval < 0) {
+ dev_err(&rmi4_data->i2c_client->dev,
+ "%s: Failed to allocate char device region\n",
+ __func__);
+ goto err_device_region;
+ }
+
+ rmidev_major_num = MAJOR(dev_no);
+ dev_dbg(&rmi4_data->i2c_client->dev,
+ "%s: Major number of rmidev = %d\n",
+ __func__, rmidev_major_num);
+ }
+
+ dev_data = kzalloc(sizeof(*dev_data), GFP_KERNEL);
+ if (!dev_data) {
+ dev_err(&rmi4_data->i2c_client->dev,
+ "%s: Failed to alloc mem for dev_data\n",
+ __func__);
+ retval = -ENOMEM;
+ goto err_dev_data;
+ }
+
+ mutex_init(&dev_data->file_mutex);
+ dev_data->rmi_dev = rmidev;
+ rmidev->data = dev_data;
+
+ cdev_init(&dev_data->main_dev, &rmidev_fops);
+
+ retval = cdev_add(&dev_data->main_dev, dev_no, 1);
+ if (retval < 0) {
+ dev_err(&rmi4_data->i2c_client->dev,
+ "%s: Failed to add rmi char device\n",
+ __func__);
+ goto err_char_device;
+ }
+
+ dev_set_name(&rmidev->dev, "rmidev%d", MINOR(dev_no));
+ dev_data->device_class = rmidev_device_class;
+
+ device_ptr = device_create(dev_data->device_class, NULL, dev_no,
+ NULL, CHAR_DEVICE_NAME"%d", MINOR(dev_no));
+ if (IS_ERR(device_ptr)) {
+ dev_err(&rmi4_data->i2c_client->dev,
+ "%s: Failed to create rmi char device\n",
+ __func__);
+ retval = -ENODEV;
+ goto err_char_device;
+ }
+
+ retval = gpio_export(rmi4_data->board->irq_gpio, false);
+ if (retval < 0) {
+ dev_err(&rmi4_data->i2c_client->dev,
+ "%s: Failed to export attention gpio\n",
+ __func__);
+ } else {
+ retval = gpio_export_link(&(rmi4_data->input_dev->dev),
+ "attn", rmi4_data->board->irq_gpio);
+ if (retval < 0) {
+ dev_err(&rmi4_data->input_dev->dev,
+ "%s Failed to create gpio symlink\n",
+ __func__);
+ } else {
+ dev_dbg(&rmi4_data->input_dev->dev,
+ "%s: Exported attention gpio %d\n",
+ __func__, rmi4_data->board->irq_gpio);
+ }
+ }
+
+ rmidev->sysfs_dir = kobject_create_and_add("rmidev",
+ &rmi4_data->input_dev->dev.kobj);
+ if (!rmidev->sysfs_dir) {
+ dev_err(&rmi4_data->i2c_client->dev,
+ "%s: Failed to create sysfs directory\n",
+ __func__);
+ goto err_sysfs_dir;
+ }
+
+ for (attr_count = 0; attr_count < ARRAY_SIZE(attrs); attr_count++) {
+ retval = sysfs_create_file(rmidev->sysfs_dir,
+ &attrs[attr_count].attr);
+ if (retval < 0) {
+ dev_err(&rmi4_data->input_dev->dev,
+ "%s: Failed to create sysfs attributes\n",
+ __func__);
+ retval = -ENODEV;
+ goto err_sysfs_attrs;
+ }
+ }
+
+ return 0;
+
+err_sysfs_attrs:
+ for (attr_count--; attr_count >= 0; attr_count--) {
+ sysfs_remove_file(&rmi4_data->input_dev->dev.kobj,
+ &attrs[attr_count].attr);
+ }
+
+ kobject_put(rmidev->sysfs_dir);
+
+err_sysfs_dir:
+err_char_device:
+ rmidev_device_cleanup(dev_data);
+ kfree(dev_data);
+
+err_dev_data:
+ unregister_chrdev_region(dev_no, 1);
+
+err_device_region:
+ class_destroy(rmidev_device_class);
+
+err_device_class:
+ kfree(rmidev->fn_ptr);
+
+err_fn_ptr:
+ kfree(rmidev);
+
+err_rmidev:
+ return retval;
+}
+
+static void rmidev_remove_device(struct synaptics_rmi4_data *rmi4_data)
+{
+ unsigned char attr_count;
+ struct rmidev_data *dev_data;
+
+ if (!rmidev)
+ return;
+
+ for (attr_count = 0; attr_count < ARRAY_SIZE(attrs); attr_count++)
+ sysfs_remove_file(rmidev->sysfs_dir, &attrs[attr_count].attr);
+
+ kobject_put(rmidev->sysfs_dir);
+
+ dev_data = rmidev->data;
+ if (dev_data) {
+ rmidev_device_cleanup(dev_data);
+ kfree(dev_data);
+ }
+
+ unregister_chrdev_region(rmidev->dev_no, 1);
+
+ class_destroy(rmidev_device_class);
+
+ kfree(rmidev->fn_ptr);
+ kfree(rmidev);
+
+ complete(&remove_complete);
+
+ return;
+}
+
+static int __init rmidev_module_init(void)
+{
+ synaptics_rmi4_new_function(RMI_DEV, true,
+ rmidev_init_device,
+ rmidev_remove_device,
+ NULL);
+ return 0;
+}
+
+static void __exit rmidev_module_exit(void)
+{
+ init_completion(&remove_complete);
+ synaptics_rmi4_new_function(RMI_DEV, false,
+ rmidev_init_device,
+ rmidev_remove_device,
+ NULL);
+ wait_for_completion(&remove_complete);
+ return;
+}
+
+module_init(rmidev_module_init);
+module_exit(rmidev_module_exit);
+
+MODULE_AUTHOR("Synaptics, Inc.");
+MODULE_DESCRIPTION("RMI4 RMI_Dev Module");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(SYNAPTICS_RMI4_DRIVER_VERSION);
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 1e889a078dbc..32e2617d654f 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -158,6 +158,7 @@ enum event_type_t {
struct static_key_deferred perf_sched_events __read_mostly;
static DEFINE_PER_CPU(atomic_t, perf_cgroup_events);
static DEFINE_PER_CPU(int, perf_sched_cb_usages);
+static DEFINE_PER_CPU(bool, is_idle);
static atomic_t nr_mmap_events __read_mostly;
static atomic_t nr_comm_events __read_mostly;
@@ -176,7 +177,11 @@ static struct srcu_struct pmus_srcu;
* 1 - disallow cpu events for unpriv
* 2 - disallow kernel profiling for unpriv
*/
+#ifdef CONFIG_PERF_EVENTS_USERMODE
+int sysctl_perf_event_paranoid __read_mostly = -1;
+#else
int sysctl_perf_event_paranoid __read_mostly = 1;
+#endif
/* Minimum for 512 kiB + 1 user control page */
int sysctl_perf_event_mlock __read_mostly = 512 + (PAGE_SIZE / 1024); /* 'free' kiB per user */
@@ -1657,7 +1662,32 @@ static int __perf_remove_from_context(void *info)
}
-/*
+#ifdef CONFIG_SMP
+static void perf_retry_remove(struct perf_event *event,
+ struct remove_event *rep)
+{
+ int up_ret;
+ /*
+ * CPU was offline. Bring it online so we can
+ * gracefully exit a perf context.
+ */
+ up_ret = cpu_up(event->cpu);
+ if (!up_ret)
+ /* Try the remove call once again. */
+ cpu_function_call(event->cpu, __perf_remove_from_context,
+ rep);
+ else
+ pr_err("Failed to bring up CPU: %d, ret: %d\n",
+ event->cpu, up_ret);
+}
+#else
+static void perf_retry_remove(struct perf_event *event,
+ struct remove_event *rep)
+{
+}
+#endif
+
+ /*
* Remove the event from a task's (or a CPU's) list of events.
*
* CPU events are removed with a smp call. For task events we only
@@ -1670,7 +1700,8 @@ static int __perf_remove_from_context(void *info)
* When called from perf_event_exit_task, it's OK because the
* context has been detached from its task.
*/
-static void perf_remove_from_context(struct perf_event *event, bool detach_group)
+static void __ref perf_remove_from_context(struct perf_event *event,
+ bool detach_group)
{
struct perf_event_context *ctx = event->ctx;
struct task_struct *task = ctx->task;
@@ -1678,6 +1709,7 @@ static void perf_remove_from_context(struct perf_event *event, bool detach_group
.event = event,
.detach_group = detach_group,
};
+ int ret;
lockdep_assert_held(&ctx->mutex);
@@ -1688,7 +1720,11 @@ static void perf_remove_from_context(struct perf_event *event, bool detach_group
* already called __perf_remove_from_context from
* perf_event_exit_cpu.
*/
- cpu_function_call(event->cpu, __perf_remove_from_context, &re);
+ ret = cpu_function_call(event->cpu, __perf_remove_from_context,
+ &re);
+ if (ret == -ENXIO)
+ perf_retry_remove(event, &re);
+
return;
}
@@ -3353,9 +3389,12 @@ static int perf_event_read(struct perf_event *event, bool group)
.group = group,
.ret = 0,
};
- smp_call_function_single(event->oncpu,
- __perf_event_read, &data, 1);
- ret = data.ret;
+ if (!event->attr.exclude_idle ||
+ !per_cpu(is_idle, event->oncpu)) {
+ smp_call_function_single(event->oncpu,
+ __perf_event_read, &data, 1);
+ ret = data.ret;
+ }
} else if (event->state == PERF_EVENT_STATE_INACTIVE) {
struct perf_event_context *ctx = event->ctx;
unsigned long flags;
@@ -3460,7 +3499,8 @@ find_get_context(struct pmu *pmu, struct task_struct *task,
if (!task) {
/* Must be root to operate on a CPU event: */
- if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
+ if (event->owner != EVENT_OWNER_KERNEL && perf_paranoid_cpu() &&
+ !capable(CAP_SYS_ADMIN))
return ERR_PTR(-EACCES);
/*
@@ -3844,6 +3884,15 @@ EXPORT_SYMBOL_GPL(perf_event_release_kernel);
*/
static int perf_release(struct inode *inode, struct file *file)
{
+ struct perf_event *event = file->private_data;
+
+ /*
+ * Event can be in state OFF because of a constraint check.
+ * Change to ACTIVE so that it gets cleaned up correctly.
+ */
+ if ((event->state == PERF_EVENT_STATE_OFF) &&
+ event->attr.constraint_duplicate)
+ event->state = PERF_EVENT_STATE_ACTIVE;
put_event(file->private_data);
return 0;
}
@@ -6920,6 +6969,8 @@ static struct pmu perf_swevent = {
.start = perf_swevent_start,
.stop = perf_swevent_stop,
.read = perf_swevent_read,
+
+ .events_across_hotplug = 1,
};
#ifdef CONFIG_EVENT_TRACING
@@ -7041,6 +7092,8 @@ static struct pmu perf_tracepoint = {
.start = perf_swevent_start,
.stop = perf_swevent_stop,
.read = perf_swevent_read,
+
+ .events_across_hotplug = 1,
};
static inline void perf_tp_register(void)
@@ -7319,6 +7372,8 @@ static struct pmu perf_cpu_clock = {
.start = cpu_clock_event_start,
.stop = cpu_clock_event_stop,
.read = cpu_clock_event_read,
+
+ .events_across_hotplug = 1,
};
/*
@@ -7400,6 +7455,8 @@ static struct pmu perf_task_clock = {
.start = task_clock_event_start,
.stop = task_clock_event_stop,
.read = task_clock_event_read,
+
+ .events_across_hotplug = 1,
};
static void perf_pmu_nop_void(struct pmu *pmu)
@@ -8272,6 +8329,9 @@ SYSCALL_DEFINE5(perf_event_open,
if (err)
return err;
+ if (attr.constraint_duplicate || attr.__reserved_1)
+ return -EINVAL;
+
if (!attr.exclude_kernel) {
if (perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN))
return -EACCES;
@@ -9302,6 +9362,18 @@ static void __perf_event_exit_context(void *__info)
rcu_read_unlock();
}
+static void __perf_event_stop_swclock(void *__info)
+{
+ struct perf_event_context *ctx = __info;
+ struct perf_event *event, *tmp;
+
+ list_for_each_entry_safe(event, tmp, &ctx->event_list, event_entry) {
+ if (event->attr.config == PERF_COUNT_SW_CPU_CLOCK &&
+ event->attr.type == PERF_TYPE_SOFTWARE)
+ cpu_clock_event_stop(event, 0);
+ }
+}
+
static void perf_event_exit_cpu_context(int cpu)
{
struct perf_event_context *ctx;
@@ -9311,20 +9383,56 @@ static void perf_event_exit_cpu_context(int cpu)
idx = srcu_read_lock(&pmus_srcu);
list_for_each_entry_rcu(pmu, &pmus, entry) {
ctx = &per_cpu_ptr(pmu->pmu_cpu_context, cpu)->ctx;
-
mutex_lock(&ctx->mutex);
- smp_call_function_single(cpu, __perf_event_exit_context, ctx, 1);
+ /*
+ * If keeping events across hotplugging is supported, do not
+ * remove the event list, but keep it alive across CPU hotplug.
+ * The context is exited via an fd close path when userspace
+ * is done and the target CPU is online. If software clock
+ * event is active, then stop hrtimer associated with it.
+ * Start the timer when the CPU comes back online.
+ */
+ if (!pmu->events_across_hotplug)
+ smp_call_function_single(cpu, __perf_event_exit_context,
+ ctx, 1);
+ else
+ smp_call_function_single(cpu, __perf_event_stop_swclock,
+ ctx, 1);
mutex_unlock(&ctx->mutex);
}
srcu_read_unlock(&pmus_srcu, idx);
}
+static void perf_event_start_swclock(int cpu)
+{
+ struct perf_event_context *ctx;
+ struct pmu *pmu;
+ int idx;
+ struct perf_event *event, *tmp;
+
+ idx = srcu_read_lock(&pmus_srcu);
+ list_for_each_entry_rcu(pmu, &pmus, entry) {
+ if (pmu->events_across_hotplug) {
+ ctx = &per_cpu_ptr(pmu->pmu_cpu_context, cpu)->ctx;
+ list_for_each_entry_safe(event, tmp, &ctx->event_list,
+ event_entry) {
+ if (event->attr.config ==
+ PERF_COUNT_SW_CPU_CLOCK &&
+ event->attr.type == PERF_TYPE_SOFTWARE)
+ cpu_clock_event_start(event, 0);
+ }
+ }
+ }
+ srcu_read_unlock(&pmus_srcu, idx);
+}
+
static void perf_event_exit_cpu(int cpu)
{
perf_event_exit_cpu_context(cpu);
}
#else
static inline void perf_event_exit_cpu(int cpu) { }
+static inline void perf_event_start_swclock(int cpu) { }
#endif
static int
@@ -9363,6 +9471,11 @@ perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
case CPU_DOWN_PREPARE:
perf_event_exit_cpu(cpu);
break;
+
+ case CPU_STARTING:
+ perf_event_start_swclock(cpu);
+ break;
+
default:
break;
}
@@ -9370,6 +9483,25 @@ perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
return NOTIFY_OK;
}
+static int event_idle_notif(struct notifier_block *nb, unsigned long action,
+ void *data)
+{
+ switch (action) {
+ case IDLE_START:
+ __this_cpu_write(is_idle, true);
+ break;
+ case IDLE_END:
+ __this_cpu_write(is_idle, false);
+ break;
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block perf_event_idle_nb = {
+ .notifier_call = event_idle_notif,
+};
+
void __init perf_event_init(void)
{
int ret;
@@ -9383,6 +9515,7 @@ void __init perf_event_init(void)
perf_pmu_register(&perf_task_clock, NULL, -1);
perf_tp_register();
perf_cpu_notifier(perf_cpu_notify);
+ idle_notifier_register(&perf_event_idle_nb);
register_reboot_notifier(&perf_reboot_notifier);
ret = init_hw_breakpoint();
diff --git a/kernel/events/hw_breakpoint.c b/kernel/events/hw_breakpoint.c
index 92ce5f4ccc26..7da5b674d16e 100644
--- a/kernel/events/hw_breakpoint.c
+++ b/kernel/events/hw_breakpoint.c
@@ -614,6 +614,8 @@ static struct pmu perf_breakpoint = {
.start = hw_breakpoint_start,
.stop = hw_breakpoint_stop,
.read = hw_breakpoint_pmu_read,
+
+ .events_across_hotplug = 1,
};
int __init init_hw_breakpoint(void)
diff --git a/kernel/exit.c b/kernel/exit.c
index 07110c6020a0..a32e83d567b9 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -388,6 +388,7 @@ static void exit_mm(struct task_struct *tsk)
{
struct mm_struct *mm = tsk->mm;
struct core_state *core_state;
+ int mm_released;
mm_release(tsk, mm);
if (!mm)
@@ -434,9 +435,12 @@ static void exit_mm(struct task_struct *tsk)
enter_lazy_tlb(mm, current);
task_unlock(tsk);
mm_update_next_owner(mm);
- mmput(mm);
+
+ mm_released = mmput(mm);
if (test_thread_flag(TIF_MEMDIE))
exit_oom_victim();
+ if (mm_released)
+ set_tsk_thread_flag(tsk, TIF_MM_RELEASED);
}
static struct task_struct *find_alive_thread(struct task_struct *p)
@@ -632,6 +636,7 @@ static void check_stack_usage(void)
static DEFINE_SPINLOCK(low_water_lock);
static int lowest_to_date = THREAD_SIZE;
unsigned long free;
+ int islower = false;
free = stack_not_used(current);
@@ -640,11 +645,16 @@ static void check_stack_usage(void)
spin_lock(&low_water_lock);
if (free < lowest_to_date) {
- pr_warn("%s (%d) used greatest stack depth: %lu bytes left\n",
- current->comm, task_pid_nr(current), free);
lowest_to_date = free;
+ islower = true;
}
spin_unlock(&low_water_lock);
+
+ if (islower) {
+ printk(KERN_WARNING "%s (%d) used greatest stack depth: "
+ "%lu bytes left\n",
+ current->comm, task_pid_nr(current), free);
+ }
}
#else
static inline void check_stack_usage(void) {}
@@ -699,6 +709,9 @@ void do_exit(long code)
}
exit_signals(tsk); /* sets PF_EXITING */
+
+ sched_exit(tsk);
+
/*
* tsk->flags are checked in the futex code to protect against
* an exiting task cleaning up the robust pi futexes.
diff --git a/kernel/fork.c b/kernel/fork.c
index 7ec6e9939b2c..c9eb86b646ab 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -23,6 +23,7 @@
#include <linux/file.h>
#include <linux/fdtable.h>
#include <linux/iocontext.h>
+#include <linux/kasan.h>
#include <linux/key.h>
#include <linux/binfmts.h>
#include <linux/mman.h>
@@ -169,6 +170,7 @@ static struct thread_info *alloc_thread_info_node(struct task_struct *tsk,
static inline void free_thread_info(struct thread_info *ti)
{
+ kasan_alloc_pages(virt_to_page(ti), THREAD_SIZE_ORDER);
free_kmem_pages((unsigned long)ti, THREAD_SIZE_ORDER);
}
# else
@@ -692,8 +694,9 @@ EXPORT_SYMBOL_GPL(__mmdrop);
/*
* Decrement the use count and release all resources for an mm.
*/
-void mmput(struct mm_struct *mm)
+int mmput(struct mm_struct *mm)
{
+ int mm_freed = 0;
might_sleep();
if (atomic_dec_and_test(&mm->mm_users)) {
@@ -711,7 +714,9 @@ void mmput(struct mm_struct *mm)
if (mm->binfmt)
module_put(mm->binfmt->module);
mmdrop(mm);
+ mm_freed = 1;
}
+ return mm_freed;
}
EXPORT_SYMBOL_GPL(mmput);
diff --git a/kernel/include/linux/input/synaptics_dsx.h b/kernel/include/linux/input/synaptics_dsx.h
new file mode 100644
index 000000000000..b779e42a9bac
--- /dev/null
+++ b/kernel/include/linux/input/synaptics_dsx.h
@@ -0,0 +1,59 @@
+/*
+ * Synaptics RMI4 touchscreen driver
+ *
+ * Copyright (C) 2012 Synaptics Incorporated
+ *
+ * Copyright (C) 2012 Alexandra Chin <alexandra.chin@tw.synaptics.com>
+ * Copyright (C) 2012 Scott Lin <scott.lin@tw.synaptics.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _SYNAPTICS_DSX_H_
+#define _SYNAPTICS_DSX_H_
+
+/*
+ * struct synaptics_rmi4_capacitance_button_map - 0d button map
+ * @nbuttons: number of buttons
+ * @map: button map
+ */
+struct synaptics_rmi4_capacitance_button_map {
+ unsigned char nbuttons;
+ unsigned char *map;
+};
+
+/*
+ * struct synaptics_rmi4_platform_data - rmi4 platform data
+ * @x_flip: x flip flag
+ * @y_flip: y flip flag
+ * @regulator_en: regulator enable flag
+ * @irq_gpio: attention interrupt gpio
+ * @irq_flags: flags used by the irq
+ * @reset_gpio: reset gpio
+ * @panel_x: panel maximum values on the x
+ * @panel_y: panel maximum values on the y
+ * @gpio_config: pointer to gpio configuration function
+ * @capacitance_button_map: pointer to 0d button map
+ */
+struct synaptics_rmi4_platform_data {
+ bool x_flip;
+ bool y_flip;
+ bool regulator_en;
+ unsigned irq_gpio;
+ unsigned long irq_flags;
+ unsigned reset_gpio;
+ unsigned panel_x;
+ unsigned panel_y;
+ int (*gpio_config)(unsigned gpio, bool configure);
+ struct synaptics_rmi4_capacitance_button_map *capacitance_button_map;
+};
+
+#endif
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 6ead200370da..5cb153a8474a 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -319,6 +319,9 @@ irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
desc->affinity_notify = notify;
raw_spin_unlock_irqrestore(&desc->lock, flags);
+ if (!notify && old_notify)
+ cancel_work_sync(&old_notify->work);
+
if (old_notify)
kref_put(&old_notify->kref, old_notify->release);
diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c
index 0551c219c40e..fb42418507ae 100644
--- a/kernel/locking/mutex.c
+++ b/kernel/locking/mutex.c
@@ -26,6 +26,7 @@
#include <linux/interrupt.h>
#include <linux/debug_locks.h>
#include <linux/osq_lock.h>
+#include <linux/delay.h>
/*
* In the DEBUG case we are using the "NULL fastpath" for mutexes,
@@ -378,6 +379,17 @@ static bool mutex_optimistic_spin(struct mutex *lock,
* values at the cost of a few extra spins.
*/
cpu_relax_lowlatency();
+
+ /*
+ * On arm systems, we must slow down the waiter's repeated
+ * aquisition of spin_mlock and atomics on the lock count, or
+ * we risk starving out a thread attempting to release the
+ * mutex. The mutex slowpath release must take spin lock
+ * wait_lock. This spin lock can share a monitor with the
+ * other waiter atomics in the mutex data structure, so must
+ * take care to rate limit the waiters.
+ */
+ udelay(1);
}
osq_unlock(&lock->osq);
diff --git a/kernel/locking/spinlock_debug.c b/kernel/locking/spinlock_debug.c
index 0374a596cffa..d381f559e0ce 100644
--- a/kernel/locking/spinlock_debug.c
+++ b/kernel/locking/spinlock_debug.c
@@ -12,6 +12,8 @@
#include <linux/debug_locks.h>
#include <linux/delay.h>
#include <linux/export.h>
+#include <linux/bug.h>
+#include <soc/qcom/watchdog.h>
void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name,
struct lock_class_key *key)
@@ -64,6 +66,11 @@ static void spin_dump(raw_spinlock_t *lock, const char *msg)
owner ? owner->comm : "<none>",
owner ? task_pid_nr(owner) : -1,
lock->owner_cpu);
+#ifdef CONFIG_DEBUG_SPINLOCK_BITE_ON_BUG
+ msm_trigger_wdog_bite();
+#elif defined(CONFIG_DEBUG_SPINLOCK_PANIC_ON_BUG)
+ BUG();
+#endif
dump_stack();
}
@@ -114,7 +121,7 @@ static void __spin_lock_debug(raw_spinlock_t *lock)
__delay(1);
}
/* lockup suspected: */
- spin_dump(lock, "lockup suspected");
+ spin_bug(lock, "lockup suspected");
#ifdef CONFIG_SMP
trigger_all_cpu_backtrace();
#endif
@@ -167,6 +174,11 @@ static void rwlock_bug(rwlock_t *lock, const char *msg)
printk(KERN_EMERG "BUG: rwlock %s on CPU#%d, %s/%d, %p\n",
msg, raw_smp_processor_id(), current->comm,
task_pid_nr(current), lock);
+#ifdef CONFIG_DEBUG_SPINLOCK_BITE_ON_BUG
+ msm_trigger_wdog_bite();
+#elif defined(CONFIG_DEBUG_SPINLOCK_PANIC_ON_BUG)
+ BUG();
+#endif
dump_stack();
}
diff --git a/kernel/module.c b/kernel/module.c
index 0e5c71195f18..fe5248ab3378 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -2499,7 +2499,7 @@ static void layout_symtab(struct module *mod, struct load_info *info)
/* We'll tack temporary mod_kallsyms on the end. */
mod->init_size = ALIGN(mod->init_size,
- __alignof__(struct mod_kallsyms));
+ __alignof__(struct mod_kallsyms));
info->mod_kallsyms_init_off = mod->init_size;
mod->init_size += sizeof(struct mod_kallsyms);
mod->init_size = debug_align(mod->init_size);
@@ -2578,7 +2578,13 @@ void * __weak module_alloc(unsigned long size)
return vmalloc_exec(size);
}
-#ifdef CONFIG_DEBUG_KMEMLEAK
+#if defined(CONFIG_DEBUG_KMEMLEAK) && defined(CONFIG_DEBUG_MODULE_SCAN_OFF)
+static void kmemleak_load_module(const struct module *mod,
+ const struct load_info *info)
+{
+ kmemleak_no_scan(mod->module_core);
+}
+#elif defined(CONFIG_DEBUG_KMEMLEAK)
static void kmemleak_load_module(const struct module *mod,
const struct load_info *info)
{
diff --git a/kernel/panic.c b/kernel/panic.c
index 223564d3e1f8..b4a0edc489c5 100644
--- a/kernel/panic.c
+++ b/kernel/panic.c
@@ -25,6 +25,9 @@
#include <linux/nmi.h>
#include <linux/console.h>
+#define CREATE_TRACE_POINTS
+#include <trace/events/exception.h>
+
#define PANIC_TIMER_STEP 100
#define PANIC_BLINK_SPD 18
@@ -80,6 +83,8 @@ void panic(const char *fmt, ...)
long i, i_next = 0;
int state = 0;
+ trace_kernel_panic(0);
+
/*
* Disable local interrupts. This will prevent panic_smp_self_stop
* from deadlocking the first cpu that invokes the panic, since
@@ -181,6 +186,9 @@ void panic(const char *fmt, ...)
mdelay(PANIC_TIMER_STEP);
}
}
+
+ trace_kernel_panic_late(0);
+
if (panic_timeout != 0) {
/*
* This will not be a clean reboot, with everything
diff --git a/kernel/power/qos.c b/kernel/power/qos.c
index 97b0df71303e..8ecc7b3f7dd9 100644
--- a/kernel/power/qos.c
+++ b/kernel/power/qos.c
@@ -43,6 +43,8 @@
#include <linux/kernel.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
+#include <linux/irq.h>
+#include <linux/irqdesc.h>
#include <linux/uaccess.h>
#include <linux/export.h>
@@ -67,6 +69,8 @@ static BLOCKING_NOTIFIER_HEAD(cpu_dma_lat_notifier);
static struct pm_qos_constraints cpu_dma_constraints = {
.list = PLIST_HEAD_INIT(cpu_dma_constraints.list),
.target_value = PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE,
+ .target_per_cpu = { [0 ... (NR_CPUS - 1)] =
+ PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE },
.default_value = PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE,
.no_constraint_value = PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE,
.type = PM_QOS_MIN,
@@ -81,6 +85,8 @@ static BLOCKING_NOTIFIER_HEAD(network_lat_notifier);
static struct pm_qos_constraints network_lat_constraints = {
.list = PLIST_HEAD_INIT(network_lat_constraints.list),
.target_value = PM_QOS_NETWORK_LAT_DEFAULT_VALUE,
+ .target_per_cpu = { [0 ... (NR_CPUS - 1)] =
+ PM_QOS_NETWORK_LAT_DEFAULT_VALUE },
.default_value = PM_QOS_NETWORK_LAT_DEFAULT_VALUE,
.no_constraint_value = PM_QOS_NETWORK_LAT_DEFAULT_VALUE,
.type = PM_QOS_MIN,
@@ -91,11 +97,12 @@ static struct pm_qos_object network_lat_pm_qos = {
.name = "network_latency",
};
-
static BLOCKING_NOTIFIER_HEAD(network_throughput_notifier);
static struct pm_qos_constraints network_tput_constraints = {
.list = PLIST_HEAD_INIT(network_tput_constraints.list),
.target_value = PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE,
+ .target_per_cpu = { [0 ... (NR_CPUS - 1)] =
+ PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE },
.default_value = PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE,
.no_constraint_value = PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE,
.type = PM_QOS_MAX,
@@ -259,22 +266,60 @@ static const struct file_operations pm_qos_debug_fops = {
.release = single_release,
};
+static inline void pm_qos_set_value_for_cpus(struct pm_qos_constraints *c,
+ struct cpumask *cpus)
+{
+ struct pm_qos_request *req = NULL;
+ int cpu;
+ s32 qos_val[NR_CPUS] = { [0 ... (NR_CPUS - 1)] = c->default_value };
+
+ plist_for_each_entry(req, &c->list, node) {
+ for_each_cpu(cpu, &req->cpus_affine) {
+ switch (c->type) {
+ case PM_QOS_MIN:
+ if (qos_val[cpu] > req->node.prio)
+ qos_val[cpu] = req->node.prio;
+ break;
+ case PM_QOS_MAX:
+ if (req->node.prio > qos_val[cpu])
+ qos_val[cpu] = req->node.prio;
+ break;
+ case PM_QOS_SUM:
+ qos_val[cpu] += req->node.prio;
+ break;
+ default:
+ BUG();
+ break;
+ }
+ }
+ }
+
+ for_each_possible_cpu(cpu) {
+ if (c->target_per_cpu[cpu] != qos_val[cpu])
+ cpumask_set_cpu(cpu, cpus);
+ c->target_per_cpu[cpu] = qos_val[cpu];
+ }
+}
+
/**
* pm_qos_update_target - manages the constraints list and calls the notifiers
* if needed
* @c: constraints data struct
- * @node: request to add to the list, to update or to remove
+ * @req: request to add to the list, to update or to remove
* @action: action to take on the constraints list
* @value: value of the request to add or update
*
* This function returns 1 if the aggregated constraint value has changed, 0
* otherwise.
*/
-int pm_qos_update_target(struct pm_qos_constraints *c, struct plist_node *node,
- enum pm_qos_req_action action, int value)
+int pm_qos_update_target(struct pm_qos_constraints *c,
+ struct pm_qos_request *req,
+ enum pm_qos_req_action action, int value)
{
unsigned long flags;
int prev_value, curr_value, new_value;
+ struct plist_node *node = &req->node;
+ struct cpumask cpus;
int ret;
spin_lock_irqsave(&pm_qos_lock, flags);
@@ -305,7 +350,9 @@ int pm_qos_update_target(struct pm_qos_constraints *c, struct plist_node *node,
}
curr_value = pm_qos_get_value(c);
+ cpumask_clear(&cpus);
pm_qos_set_value(c, curr_value);
+ pm_qos_set_value_for_cpus(c, &cpus);
spin_unlock_irqrestore(&pm_qos_lock, flags);
@@ -315,7 +362,7 @@ int pm_qos_update_target(struct pm_qos_constraints *c, struct plist_node *node,
if (c->notifiers)
blocking_notifier_call_chain(c->notifiers,
(unsigned long)curr_value,
- NULL);
+ &cpus);
} else {
ret = 0;
}
@@ -398,12 +445,50 @@ int pm_qos_request(int pm_qos_class)
}
EXPORT_SYMBOL_GPL(pm_qos_request);
+int pm_qos_request_for_cpu(int pm_qos_class, int cpu)
+{
+ return pm_qos_array[pm_qos_class]->constraints->target_per_cpu[cpu];
+}
+EXPORT_SYMBOL(pm_qos_request_for_cpu);
+
int pm_qos_request_active(struct pm_qos_request *req)
{
return req->pm_qos_class != 0;
}
EXPORT_SYMBOL_GPL(pm_qos_request_active);
+int pm_qos_request_for_cpumask(int pm_qos_class, struct cpumask *mask)
+{
+ unsigned long irqflags;
+ int cpu;
+ struct pm_qos_constraints *c = NULL;
+ int val;
+
+ spin_lock_irqsave(&pm_qos_lock, irqflags);
+ c = pm_qos_array[pm_qos_class]->constraints;
+ val = c->default_value;
+
+ for_each_cpu(cpu, mask) {
+ switch (c->type) {
+ case PM_QOS_MIN:
+ if (c->target_per_cpu[cpu] < val)
+ val = c->target_per_cpu[cpu];
+ break;
+ case PM_QOS_MAX:
+ if (c->target_per_cpu[cpu] > val)
+ val = c->target_per_cpu[cpu];
+ break;
+ default:
+ BUG();
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&pm_qos_lock, irqflags);
+
+ return val;
+}
+EXPORT_SYMBOL(pm_qos_request_for_cpumask);
+
static void __pm_qos_update_request(struct pm_qos_request *req,
s32 new_value)
{
@@ -412,7 +497,7 @@ static void __pm_qos_update_request(struct pm_qos_request *req,
if (new_value != req->node.prio)
pm_qos_update_target(
pm_qos_array[req->pm_qos_class]->constraints,
- &req->node, PM_QOS_UPDATE_REQ, new_value);
+ req, PM_QOS_UPDATE_REQ, new_value);
}
/**
@@ -430,6 +515,41 @@ static void pm_qos_work_fn(struct work_struct *work)
__pm_qos_update_request(req, PM_QOS_DEFAULT_VALUE);
}
+#ifdef CONFIG_SMP
+static void pm_qos_irq_release(struct kref *ref)
+{
+ unsigned long flags;
+ struct irq_affinity_notify *notify = container_of(ref,
+ struct irq_affinity_notify, kref);
+ struct pm_qos_request *req = container_of(notify,
+ struct pm_qos_request, irq_notify);
+ struct pm_qos_constraints *c =
+ pm_qos_array[req->pm_qos_class]->constraints;
+
+ spin_lock_irqsave(&pm_qos_lock, flags);
+ cpumask_setall(&req->cpus_affine);
+ spin_unlock_irqrestore(&pm_qos_lock, flags);
+
+ pm_qos_update_target(c, req, PM_QOS_UPDATE_REQ, c->default_value);
+}
+
+static void pm_qos_irq_notify(struct irq_affinity_notify *notify,
+ const cpumask_t *mask)
+{
+ unsigned long flags;
+ struct pm_qos_request *req = container_of(notify,
+ struct pm_qos_request, irq_notify);
+ struct pm_qos_constraints *c =
+ pm_qos_array[req->pm_qos_class]->constraints;
+
+ spin_lock_irqsave(&pm_qos_lock, flags);
+ cpumask_copy(&req->cpus_affine, mask);
+ spin_unlock_irqrestore(&pm_qos_lock, flags);
+
+ pm_qos_update_target(c, req, PM_QOS_UPDATE_REQ, req->node.prio);
+}
+#endif
+
/**
* pm_qos_add_request - inserts new qos request into the list
* @req: pointer to a preallocated handle
@@ -453,11 +573,56 @@ void pm_qos_add_request(struct pm_qos_request *req,
WARN(1, KERN_ERR "pm_qos_add_request() called for already added request\n");
return;
}
+
+ switch (req->type) {
+ case PM_QOS_REQ_AFFINE_CORES:
+ if (cpumask_empty(&req->cpus_affine)) {
+ req->type = PM_QOS_REQ_ALL_CORES;
+ cpumask_setall(&req->cpus_affine);
+ WARN(1, KERN_ERR "Affine cores not set for request with affinity flag\n");
+ }
+ break;
+#ifdef CONFIG_SMP
+ case PM_QOS_REQ_AFFINE_IRQ:
+ if (irq_can_set_affinity(req->irq)) {
+ int ret = 0;
+ struct irq_desc *desc = irq_to_desc(req->irq);
+ struct cpumask *mask = desc->irq_data.common->affinity;
+
+ /* Get the current affinity */
+ cpumask_copy(&req->cpus_affine, mask);
+ req->irq_notify.irq = req->irq;
+ req->irq_notify.notify = pm_qos_irq_notify;
+ req->irq_notify.release = pm_qos_irq_release;
+
+ ret = irq_set_affinity_notifier(req->irq,
+ &req->irq_notify);
+ if (ret) {
+ WARN(1, KERN_ERR "IRQ affinity notify set failed\n");
+ req->type = PM_QOS_REQ_ALL_CORES;
+ cpumask_setall(&req->cpus_affine);
+ }
+ } else {
+ req->type = PM_QOS_REQ_ALL_CORES;
+ cpumask_setall(&req->cpus_affine);
+ WARN(1, KERN_ERR "IRQ-%d not set for request with affinity flag\n",
+ req->irq);
+ }
+ break;
+#endif
+ default:
+ WARN(1, KERN_ERR "Unknown request type %d\n", req->type);
+ /* fall through */
+ case PM_QOS_REQ_ALL_CORES:
+ cpumask_setall(&req->cpus_affine);
+ break;
+ }
+
req->pm_qos_class = pm_qos_class;
INIT_DELAYED_WORK(&req->work, pm_qos_work_fn);
trace_pm_qos_add_request(pm_qos_class, value);
pm_qos_update_target(pm_qos_array[pm_qos_class]->constraints,
- &req->node, PM_QOS_ADD_REQ, value);
+ req, PM_QOS_ADD_REQ, value);
}
EXPORT_SYMBOL_GPL(pm_qos_add_request);
@@ -511,7 +676,7 @@ void pm_qos_update_request_timeout(struct pm_qos_request *req, s32 new_value,
if (new_value != req->node.prio)
pm_qos_update_target(
pm_qos_array[req->pm_qos_class]->constraints,
- &req->node, PM_QOS_UPDATE_REQ, new_value);
+ req, PM_QOS_UPDATE_REQ, new_value);
schedule_delayed_work(&req->work, usecs_to_jiffies(timeout_us));
}
@@ -531,15 +696,25 @@ void pm_qos_remove_request(struct pm_qos_request *req)
/* silent return to keep pcm code cleaner */
if (!pm_qos_request_active(req)) {
- WARN(1, KERN_ERR "pm_qos_remove_request() called for unknown object\n");
+ WARN(1, "pm_qos_remove_request() called for unknown object\n");
return;
}
cancel_delayed_work_sync(&req->work);
+#ifdef CONFIG_SMP
+ if (req->type == PM_QOS_REQ_AFFINE_IRQ) {
+ int ret = 0;
+ /* Get the current affinity */
+ ret = irq_set_affinity_notifier(req->irq, NULL);
+ if (ret)
+ WARN(1, "IRQ affinity notify set failed\n");
+ }
+#endif
+
trace_pm_qos_remove_request(req->pm_qos_class, PM_QOS_DEFAULT_VALUE);
pm_qos_update_target(pm_qos_array[req->pm_qos_class]->constraints,
- &req->node, PM_QOS_REMOVE_REQ,
+ req, PM_QOS_REMOVE_REQ,
PM_QOS_DEFAULT_VALUE);
memset(req, 0, sizeof(*req));
}
diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
index e7e586bb2022..7b884dc55bd0 100644
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
@@ -236,7 +236,11 @@ struct printk_log {
u8 facility; /* syslog facility */
u8 flags:5; /* internal record flags */
u8 level:3; /* syslog level */
-};
+}
+#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+__packed __aligned(4)
+#endif
+;
/*
* The logbuf_lock protects kmsg buffer, indices, counters. This can be taken
@@ -277,11 +281,7 @@ static u32 clear_idx;
#define LOG_FACILITY(v) ((v) >> 3 & 0xff)
/* record buffer */
-#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
-#define LOG_ALIGN 4
-#else
#define LOG_ALIGN __alignof__(struct printk_log)
-#endif
#define __LOG_BUF_LEN (1 << CONFIG_LOG_BUF_SHIFT)
static char __log_buf[__LOG_BUF_LEN] __aligned(LOG_ALIGN);
static char *log_buf = __log_buf;
@@ -2138,8 +2138,12 @@ static int console_cpu_notify(struct notifier_block *self,
case CPU_DEAD:
case CPU_DOWN_FAILED:
case CPU_UP_CANCELED:
+ case CPU_DYING:
+#ifdef CONFIG_CONSOLE_FLUSH_ON_HOTPLUG
console_lock();
console_unlock();
+#endif
+ break;
}
return NOTIFY_OK;
}
diff --git a/kernel/resource.c b/kernel/resource.c
index 249b1eb1e6e1..4c9835c09dcd 100644
--- a/kernel/resource.c
+++ b/kernel/resource.c
@@ -163,7 +163,7 @@ static const struct file_operations proc_iomem_operations = {
static int __init ioresources_init(void)
{
proc_create("ioports", 0, NULL, &proc_ioports_operations);
- proc_create("iomem", 0, NULL, &proc_iomem_operations);
+ proc_create("iomem", S_IRUSR, NULL, &proc_iomem_operations);
return 0;
}
__initcall(ioresources_init);
diff --git a/kernel/sched/Makefile b/kernel/sched/Makefile
index 67687973ce80..1f159743ebfc 100644
--- a/kernel/sched/Makefile
+++ b/kernel/sched/Makefile
@@ -13,8 +13,9 @@ endif
obj-y += core.o loadavg.o clock.o cputime.o
obj-y += idle_task.o fair.o rt.o deadline.o stop_task.o
-obj-y += wait.o completion.o idle.o
+obj-y += wait.o completion.o idle.o sched_avg.o
obj-$(CONFIG_SMP) += cpupri.o cpudeadline.o
+obj-$(CONFIG_SCHED_HMP) += hmp.o
obj-$(CONFIG_SCHED_AUTOGROUP) += auto_group.o
obj-$(CONFIG_SCHEDSTATS) += stats.o
obj-$(CONFIG_SCHED_DEBUG) += debug.o
diff --git a/kernel/sched/clock.c b/kernel/sched/clock.c
index caf4041f5b0a..bc54e84675da 100644
--- a/kernel/sched/clock.c
+++ b/kernel/sched/clock.c
@@ -354,7 +354,7 @@ void sched_clock_idle_wakeup_event(u64 delta_ns)
return;
sched_clock_tick();
- touch_softlockup_watchdog();
+ touch_softlockup_watchdog_sched();
}
EXPORT_SYMBOL_GPL(sched_clock_idle_wakeup_event);
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index f11fdb5c8084..7474463b9835 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -90,6 +90,8 @@
#define CREATE_TRACE_POINTS
#include <trace/events/sched.h>
+ATOMIC_NOTIFIER_HEAD(load_alert_notifier_head);
+
DEFINE_MUTEX(sched_domains_mutex);
DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
@@ -833,6 +835,7 @@ static inline void enqueue_task(struct rq *rq, struct task_struct *p, int flags)
if (!(flags & ENQUEUE_RESTORE))
sched_info_queued(rq, p);
p->sched_class->enqueue_task(rq, p, flags);
+ trace_sched_enq_deq_task(p, 1, cpumask_bits(&p->cpus_allowed)[0]);
}
static inline void dequeue_task(struct rq *rq, struct task_struct *p, int flags)
@@ -841,6 +844,7 @@ static inline void dequeue_task(struct rq *rq, struct task_struct *p, int flags)
if (!(flags & DEQUEUE_SAVE))
sched_info_dequeued(rq, p);
p->sched_class->dequeue_task(rq, p, flags);
+ trace_sched_enq_deq_task(p, 0, cpumask_bits(&p->cpus_allowed)[0]);
}
void activate_task(struct rq *rq, struct task_struct *p, int flags)
@@ -856,6 +860,9 @@ void deactivate_task(struct rq *rq, struct task_struct *p, int flags)
if (task_contributes_to_load(p))
rq->nr_uninterruptible++;
+ if (flags & DEQUEUE_SLEEP)
+ clear_ed_task(p, rq);
+
dequeue_task(rq, p, flags);
}
@@ -1071,17 +1078,19 @@ static struct rq *move_queued_task(struct rq *rq, struct task_struct *p, int new
{
lockdep_assert_held(&rq->lock);
- dequeue_task(rq, p, 0);
p->on_rq = TASK_ON_RQ_MIGRATING;
+ dequeue_task(rq, p, 0);
+ double_lock_balance(rq, cpu_rq(new_cpu));
set_task_cpu(p, new_cpu);
+ double_unlock_balance(rq, cpu_rq(new_cpu));
raw_spin_unlock(&rq->lock);
rq = cpu_rq(new_cpu);
raw_spin_lock(&rq->lock);
BUG_ON(task_cpu(p) != new_cpu);
- p->on_rq = TASK_ON_RQ_QUEUED;
enqueue_task(rq, p, 0);
+ p->on_rq = TASK_ON_RQ_QUEUED;
check_preempt_curr(rq, p, 0);
return rq;
@@ -1103,6 +1112,8 @@ struct migration_arg {
*/
static struct rq *__migrate_task(struct rq *rq, struct task_struct *p, int dest_cpu)
{
+ int src_cpu;
+
if (unlikely(!cpu_active(dest_cpu)))
return rq;
@@ -1110,6 +1121,7 @@ static struct rq *__migrate_task(struct rq *rq, struct task_struct *p, int dest_
if (!cpumask_test_cpu(dest_cpu, tsk_cpus_allowed(p)))
return rq;
+ src_cpu = cpu_of(rq);
rq = move_queued_task(rq, p, dest_cpu);
return rq;
@@ -1125,6 +1137,8 @@ static int migration_cpu_stop(void *data)
struct migration_arg *arg = data;
struct task_struct *p = arg->task;
struct rq *rq = this_rq();
+ int src_cpu = cpu_of(rq);
+ bool moved = false;
/*
* The original target cpu might have gone down and we might
@@ -1145,12 +1159,18 @@ static int migration_cpu_stop(void *data)
* holding rq->lock, if p->on_rq == 0 it cannot get enqueued because
* we're holding p->pi_lock.
*/
- if (task_rq(p) == rq && task_on_rq_queued(p))
+ if (task_rq(p) == rq && task_on_rq_queued(p)) {
rq = __migrate_task(rq, p, arg->dest_cpu);
+ moved = true;
+ }
raw_spin_unlock(&rq->lock);
raw_spin_unlock(&p->pi_lock);
local_irq_enable();
+
+ if (moved)
+ notify_migration(src_cpu, arg->dest_cpu, false, p);
+
return 0;
}
@@ -1224,7 +1244,8 @@ static int __set_cpus_allowed_ptr(struct task_struct *p,
if (cpumask_equal(&p->cpus_allowed, new_mask))
goto out;
- if (!cpumask_intersects(new_mask, cpu_active_mask)) {
+ dest_cpu = cpumask_any_and(cpu_active_mask, new_mask);
+ if (dest_cpu >= nr_cpu_ids) {
ret = -EINVAL;
goto out;
}
@@ -1235,7 +1256,6 @@ static int __set_cpus_allowed_ptr(struct task_struct *p,
if (cpumask_test_cpu(task_cpu(p), new_mask))
goto out;
- dest_cpu = cpumask_any_and(cpu_active_mask, new_mask);
if (task_running(rq, p) || p->state == TASK_WAKING) {
struct migration_arg arg = { p, dest_cpu };
/* Need help from migration thread: drop lock and wait. */
@@ -1274,6 +1294,15 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
WARN_ON_ONCE(p->state != TASK_RUNNING && p->state != TASK_WAKING &&
!p->on_rq);
+ /*
+ * Migrating fair class task must have p->on_rq = TASK_ON_RQ_MIGRATING,
+ * because schedstat_wait_{start,end} rebase migrating task's wait_start
+ * time relying on p->on_rq.
+ */
+ WARN_ON_ONCE(p->state == TASK_RUNNING &&
+ p->sched_class == &fair_sched_class &&
+ (p->on_rq && !task_on_rq_migrating(p)));
+
#ifdef CONFIG_LOCKDEP
/*
* The caller should hold either p->pi_lock or rq->lock, when changing
@@ -1290,13 +1319,15 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
#endif
#endif
- trace_sched_migrate_task(p, new_cpu);
+ trace_sched_migrate_task(p, new_cpu, pct_task_load(p));
if (task_cpu(p) != new_cpu) {
if (p->sched_class->migrate_task_rq)
p->sched_class->migrate_task_rq(p);
p->se.nr_migrations++;
perf_event_task_migrate(p);
+
+ fixup_busy_time(p, new_cpu);
}
__set_task_cpu(p, new_cpu);
@@ -1310,9 +1341,11 @@ static void __migrate_swap_task(struct task_struct *p, int cpu)
src_rq = task_rq(p);
dst_rq = cpu_rq(cpu);
+ p->on_rq = TASK_ON_RQ_MIGRATING;
deactivate_task(src_rq, p, 0);
set_task_cpu(p, cpu);
activate_task(dst_rq, p, 0);
+ p->on_rq = TASK_ON_RQ_QUEUED;
check_preempt_curr(dst_rq, p, 0);
} else {
/*
@@ -1498,7 +1531,7 @@ unsigned long wait_task_inactive(struct task_struct *p, long match_state)
* yield - it could be a while.
*/
if (unlikely(queued)) {
- ktime_t to = ktime_set(0, NSEC_PER_SEC/HZ);
+ ktime_t to = ktime_set(0, NSEC_PER_MSEC);
set_current_state(TASK_UNINTERRUPTIBLE);
schedule_hrtimeout(&to, HRTIMER_MODE_REL);
@@ -1717,6 +1750,7 @@ static void
ttwu_do_wakeup(struct rq *rq, struct task_struct *p, int wake_flags)
{
check_preempt_curr(rq, p, wake_flags);
+
p->state = TASK_RUNNING;
trace_sched_wakeup(p);
@@ -1808,6 +1842,8 @@ void sched_ttwu_pending(void)
void scheduler_ipi(void)
{
+ int cpu = smp_processor_id();
+
/*
* Fold TIF_NEED_RESCHED into the preempt_count; anybody setting
* TIF_NEED_RESCHED remotely (for the first time) will also send
@@ -1815,9 +1851,18 @@ void scheduler_ipi(void)
*/
preempt_fold_need_resched();
- if (llist_empty(&this_rq()->wake_list) && !got_nohz_idle_kick())
+ if (llist_empty(&this_rq()->wake_list) && !got_nohz_idle_kick() &&
+ !got_boost_kick())
return;
+ if (got_boost_kick()) {
+ struct rq *rq = cpu_rq(cpu);
+
+ if (rq->curr->sched_class == &fair_sched_class)
+ check_for_migration(rq, rq->curr);
+ clear_boost_kick(cpu);
+ }
+
/*
* Not all reschedule IPI handlers call irq_enter/irq_exit, since
* traditionally all their work was done from the interrupt return
@@ -1924,7 +1969,17 @@ static int
try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
{
unsigned long flags;
- int cpu, success = 0;
+ int cpu, src_cpu, success = 0;
+#ifdef CONFIG_SMP
+ unsigned int old_load;
+ struct rq *rq;
+ u64 wallclock;
+ struct related_thread_group *grp = NULL;
+#endif
+ bool freq_notif_allowed = !(wake_flags & WF_NO_NOTIFIER);
+ bool check_group = false;
+
+ wake_flags &= ~WF_NO_NOTIFIER;
/*
* If we are going to wake up a thread waiting for CONDITION we
@@ -1934,13 +1989,14 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
*/
smp_mb__before_spinlock();
raw_spin_lock_irqsave(&p->pi_lock, flags);
+ src_cpu = cpu = task_cpu(p);
+
if (!(p->state & state))
goto out;
trace_sched_waking(p);
success = 1; /* we're going to change ->state */
- cpu = task_cpu(p);
if (p->on_rq && ttwu_remote(p, wake_flags))
goto stat;
@@ -1982,6 +2038,22 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
*/
smp_rmb();
+ rq = cpu_rq(task_cpu(p));
+
+ raw_spin_lock(&rq->lock);
+ old_load = task_load(p);
+ wallclock = sched_ktime_clock();
+ update_task_ravg(rq->curr, rq, TASK_UPDATE, wallclock, 0);
+ update_task_ravg(p, rq, TASK_WAKE, wallclock, 0);
+ raw_spin_unlock(&rq->lock);
+
+ rcu_read_lock();
+ grp = task_related_thread_group(p);
+ if (update_preferred_cluster(grp, p, old_load))
+ set_preferred_cluster(grp);
+ rcu_read_unlock();
+ check_group = grp != NULL;
+
p->sched_contributes_to_load = !!task_contributes_to_load(p);
p->state = TASK_WAKING;
@@ -1989,18 +2061,33 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
p->sched_class->task_waking(p);
cpu = select_task_rq(p, p->wake_cpu, SD_BALANCE_WAKE, wake_flags);
- if (task_cpu(p) != cpu) {
+
+ /* Refresh src_cpu as it could have changed since we last read it */
+ src_cpu = task_cpu(p);
+ if (src_cpu != cpu) {
wake_flags |= WF_MIGRATED;
set_task_cpu(p, cpu);
}
-#endif /* CONFIG_SMP */
+ set_task_last_wake(p, wallclock);
+#endif /* CONFIG_SMP */
ttwu_queue(p, cpu);
stat:
ttwu_stat(p, cpu, wake_flags);
out:
raw_spin_unlock_irqrestore(&p->pi_lock, flags);
+ if (freq_notif_allowed) {
+ if (!same_freq_domain(src_cpu, cpu)) {
+ check_for_freq_change(cpu_rq(cpu),
+ false, check_group);
+ check_for_freq_change(cpu_rq(src_cpu),
+ false, check_group);
+ } else if (success) {
+ check_for_freq_change(cpu_rq(cpu), true, false);
+ }
+ }
+
return success;
}
@@ -2016,9 +2103,13 @@ static void try_to_wake_up_local(struct task_struct *p)
{
struct rq *rq = task_rq(p);
- if (WARN_ON_ONCE(rq != this_rq()) ||
- WARN_ON_ONCE(p == current))
+ if (rq != this_rq() || p == current) {
+ printk_deferred("%s: Failed to wakeup task %d (%s), rq = %p,"
+ " this_rq = %p, p = %p, current = %p\n",
+ __func__, task_pid_nr(p), p->comm, rq,
+ this_rq(), p, current);
return;
+ }
lockdep_assert_held(&rq->lock);
@@ -2041,13 +2132,20 @@ static void try_to_wake_up_local(struct task_struct *p)
trace_sched_waking(p);
- if (!task_on_rq_queued(p))
+ if (!task_on_rq_queued(p)) {
+ u64 wallclock = sched_ktime_clock();
+
+ update_task_ravg(rq->curr, rq, TASK_UPDATE, wallclock, 0);
+ update_task_ravg(p, rq, TASK_WAKE, wallclock, 0);
ttwu_activate(rq, p, ENQUEUE_WAKEUP);
+ set_task_last_wake(p, wallclock);
+ }
ttwu_do_wakeup(rq, p, 0);
ttwu_stat(p, smp_processor_id(), 0);
out:
raw_spin_unlock(&p->pi_lock);
+ /* Todo : Send cpufreq notifier */
}
/**
@@ -2068,6 +2166,26 @@ int wake_up_process(struct task_struct *p)
}
EXPORT_SYMBOL(wake_up_process);
+/**
+ * wake_up_process_no_notif - Wake up a specific process without notifying
+ * governor
+ * @p: The process to be woken up.
+ *
+ * Attempt to wake up the nominated process and move it to the set of runnable
+ * processes.
+ *
+ * Return: 1 if the process was woken up, 0 if it was already running.
+ *
+ * It may be assumed that this function implies a write memory barrier before
+ * changing the task state if and only if any tasks are woken up.
+ */
+int wake_up_process_no_notif(struct task_struct *p)
+{
+ WARN_ON(task_is_stopped_or_traced(p));
+ return try_to_wake_up(p, TASK_NORMAL, WF_NO_NOTIFIER);
+}
+EXPORT_SYMBOL(wake_up_process_no_notif);
+
int wake_up_state(struct task_struct *p, unsigned int state)
{
return try_to_wake_up(p, state, 0);
@@ -2091,6 +2209,44 @@ void __dl_clear_params(struct task_struct *p)
dl_se->dl_yielded = 0;
}
+#ifdef CONFIG_SCHED_HMP
+/*
+ * sched_exit() - Set EXITING_TASK_MARKER in task's ravg.demand field
+ *
+ * Stop accounting (exiting) task's future cpu usage
+ *
+ * We need this so that reset_all_windows_stats() can function correctly.
+ * reset_all_window_stats() depends on do_each_thread/for_each_thread task
+ * iterators to reset *all* task's statistics. Exiting tasks however become
+ * invisible to those iterators. sched_exit() is called on a exiting task prior
+ * to being removed from task_list, which will let reset_all_window_stats()
+ * function correctly.
+ */
+void sched_exit(struct task_struct *p)
+{
+ unsigned long flags;
+ int cpu = get_cpu();
+ struct rq *rq = cpu_rq(cpu);
+ u64 wallclock;
+
+ sched_set_group_id(p, 0);
+
+ raw_spin_lock_irqsave(&rq->lock, flags);
+ /* rq->curr == p */
+ wallclock = sched_ktime_clock();
+ update_task_ravg(rq->curr, rq, TASK_UPDATE, wallclock, 0);
+ dequeue_task(rq, p, 0);
+ reset_task_stats(p);
+ p->ravg.mark_start = wallclock;
+ p->ravg.sum_history[0] = EXITING_TASK_MARKER;
+ enqueue_task(rq, p, 0);
+ clear_ed_task(p, rq);
+ raw_spin_unlock_irqrestore(&rq->lock, flags);
+
+ put_cpu();
+}
+#endif /* CONFIG_SCHED_HMP */
+
/*
* Perform scheduler related setup for a newly forked process p.
* p is forked by current.
@@ -2107,6 +2263,7 @@ static void __sched_fork(unsigned long clone_flags, struct task_struct *p)
p->se.prev_sum_exec_runtime = 0;
p->se.nr_migrations = 0;
p->se.vruntime = 0;
+
INIT_LIST_HEAD(&p->se.group_node);
#ifdef CONFIG_SCHEDSTATS
@@ -2375,6 +2532,8 @@ void wake_up_new_task(struct task_struct *p)
struct rq *rq;
raw_spin_lock_irqsave(&p->pi_lock, flags);
+ init_new_task_load(p);
+ add_new_task_to_grp(p);
/* Initialize new task's runnable average */
init_entity_runnable_average(&p->se);
#ifdef CONFIG_SMP
@@ -2387,6 +2546,7 @@ void wake_up_new_task(struct task_struct *p)
#endif
rq = __task_rq_lock(p);
+ mark_task_starting(p);
activate_task(rq, p, 0);
p->on_rq = TASK_ON_RQ_QUEUED;
trace_sched_wakeup_new(p);
@@ -2775,7 +2935,7 @@ void get_iowait_load(unsigned long *nr_waiters, unsigned long *load)
*load = rq->load.weight;
}
-#ifdef CONFIG_SMP
+#if defined(CONFIG_SMP)
/*
* sched_exec - execve() is a valuable balancing opportunity, because at
@@ -2785,9 +2945,13 @@ void sched_exec(void)
{
struct task_struct *p = current;
unsigned long flags;
- int dest_cpu;
+ int dest_cpu, curr_cpu;
+
+ if (sched_enable_hmp)
+ return;
raw_spin_lock_irqsave(&p->pi_lock, flags);
+ curr_cpu = task_cpu(p);
dest_cpu = p->sched_class->select_task_rq(p, task_cpu(p), SD_BALANCE_EXEC, 0);
if (dest_cpu == smp_processor_id())
goto unlock;
@@ -2796,7 +2960,7 @@ void sched_exec(void)
struct migration_arg arg = { p, dest_cpu };
raw_spin_unlock_irqrestore(&p->pi_lock, flags);
- stop_one_cpu(task_cpu(p), migration_cpu_stop, &arg);
+ stop_one_cpu(curr_cpu, migration_cpu_stop, &arg);
return;
}
unlock:
@@ -2863,16 +3027,29 @@ void scheduler_tick(void)
int cpu = smp_processor_id();
struct rq *rq = cpu_rq(cpu);
struct task_struct *curr = rq->curr;
+ u64 wallclock;
+ bool early_notif;
+ u32 old_load;
+ struct related_thread_group *grp;
sched_clock_tick();
raw_spin_lock(&rq->lock);
+ old_load = task_load(curr);
+ set_window_start(rq);
update_rq_clock(rq);
curr->sched_class->task_tick(rq, curr, 0);
update_cpu_load_active(rq);
calc_global_load_tick(rq);
+ wallclock = sched_ktime_clock();
+ update_task_ravg(rq->curr, rq, TASK_UPDATE, wallclock, 0);
+ early_notif = early_detection_notify(rq, wallclock);
raw_spin_unlock(&rq->lock);
+ if (early_notif)
+ atomic_notifier_call_chain(&load_alert_notifier_head,
+ 0, (void *)(long)cpu);
+
perf_event_task_tick();
#ifdef CONFIG_SMP
@@ -2880,6 +3057,15 @@ void scheduler_tick(void)
trigger_load_balance(rq);
#endif
rq_last_tick_reset(rq);
+
+ rcu_read_lock();
+ grp = task_related_thread_group(curr);
+ if (update_preferred_cluster(grp, curr, old_load))
+ set_preferred_cluster(grp);
+ rcu_read_unlock();
+
+ if (curr->sched_class == &fair_sched_class)
+ check_for_migration(rq, curr);
}
#ifdef CONFIG_NO_HZ_FULL
@@ -2998,6 +3184,9 @@ static noinline void __schedule_bug(struct task_struct *prev)
pr_cont("\n");
}
#endif
+#ifdef CONFIG_PANIC_ON_SCHED_BUG
+ BUG();
+#endif
dump_stack();
add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
}
@@ -3106,6 +3295,7 @@ static void __sched notrace __schedule(bool preempt)
unsigned long *switch_count;
struct rq *rq;
int cpu;
+ u64 wallclock;
cpu = smp_processor_id();
rq = cpu_rq(cpu);
@@ -3167,15 +3357,22 @@ static void __sched notrace __schedule(bool preempt)
update_rq_clock(rq);
next = pick_next_task(rq, prev);
+ wallclock = sched_ktime_clock();
+ update_task_ravg(prev, rq, PUT_PREV_TASK, wallclock, 0);
+ update_task_ravg(next, rq, PICK_NEXT_TASK, wallclock, 0);
clear_tsk_need_resched(prev);
clear_preempt_need_resched();
rq->clock_skip_update = 0;
+ BUG_ON(task_cpu(next) != cpu_of(rq));
+
if (likely(prev != next)) {
rq->nr_switches++;
rq->curr = next;
++*switch_count;
+ set_task_last_switch_out(prev, wallclock);
+
trace_sched_switch(preempt, prev, next);
rq = context_switch(rq, prev, next); /* unlocks the rq */
cpu = cpu_of(rq);
@@ -4081,7 +4278,7 @@ int sched_setscheduler_nocheck(struct task_struct *p, int policy,
{
return _sched_setscheduler(p, policy, param, false);
}
-EXPORT_SYMBOL_GPL(sched_setscheduler_nocheck);
+EXPORT_SYMBOL(sched_setscheduler_nocheck);
static int
do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param)
@@ -4958,7 +5155,7 @@ void show_state_filter(unsigned long state_filter)
touch_all_softlockup_watchdogs();
-#ifdef CONFIG_SCHED_DEBUG
+#ifdef CONFIG_SYSRQ_SCHED_DEBUG
sysrq_sched_debug_show();
#endif
rcu_read_unlock();
@@ -4987,10 +5184,11 @@ void init_idle(struct task_struct *idle, int cpu)
struct rq *rq = cpu_rq(cpu);
unsigned long flags;
+ __sched_fork(0, idle);
+
raw_spin_lock_irqsave(&idle->pi_lock, flags);
raw_spin_lock(&rq->lock);
- __sched_fork(0, idle);
idle->state = TASK_RUNNING;
idle->se.exec_start = sched_clock();
@@ -5292,8 +5490,11 @@ static void migrate_tasks(struct rq *dead_rq)
rq = __migrate_task(rq, next, dest_cpu);
if (rq != dead_rq) {
+ raw_spin_unlock(&next->pi_lock);
raw_spin_unlock(&rq->lock);
+ notify_migration(dead_rq->cpu, dest_cpu, true, next);
rq = dead_rq;
+ raw_spin_lock(&next->pi_lock);
raw_spin_lock(&rq->lock);
}
raw_spin_unlock(&next->pi_lock);
@@ -5524,6 +5725,9 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
switch (action & ~CPU_TASKS_FROZEN) {
case CPU_UP_PREPARE:
+ raw_spin_lock_irqsave(&rq->lock, flags);
+ set_window_start(rq);
+ raw_spin_unlock_irqrestore(&rq->lock, flags);
rq->calc_load_update = calc_load_update;
account_reset_rq(rq);
break;
@@ -5544,6 +5748,8 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
sched_ttwu_pending();
/* Update our root-domain */
raw_spin_lock_irqsave(&rq->lock, flags);
+ migrate_sync_cpu(cpu);
+
if (rq->rd) {
BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
set_rq_offline(rq);
@@ -5554,6 +5760,7 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
break;
case CPU_DEAD:
+ clear_hmp_request(cpu);
calc_load_migrate(rq);
break;
#endif
@@ -6028,6 +6235,7 @@ cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu)
{
struct rq *rq = cpu_rq(cpu);
struct sched_domain *tmp;
+ unsigned long next_balance = rq->next_balance;
/* Remove the sched domains which do not contribute to scheduling. */
for (tmp = sd; tmp; ) {
@@ -6059,6 +6267,17 @@ cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu)
sd->child = NULL;
}
+ for (tmp = sd; tmp; ) {
+ unsigned long interval;
+
+ interval = msecs_to_jiffies(tmp->balance_interval);
+ if (time_after(next_balance, tmp->last_balance + interval))
+ next_balance = tmp->last_balance + interval;
+
+ tmp = tmp->parent;
+ }
+ rq->next_balance = next_balance;
+
sched_domain_debug(sd, cpu);
rq_attach_root(rq, rd);
@@ -6950,6 +7169,9 @@ struct sched_domain *build_sched_domain(struct sched_domain_topology_level *tl,
pr_err(" the %s domain not a subset of the %s domain\n",
child->name, sd->name);
#endif
+#ifdef CONFIG_PANIC_ON_SCHED_BUG
+ BUG();
+#endif
/* Fixup, ensure @sd has at least @child cpus. */
cpumask_or(sched_domain_span(sd),
sched_domain_span(sd),
@@ -7324,6 +7546,8 @@ void __init sched_init_smp(void)
hotcpu_notifier(cpuset_cpu_active, CPU_PRI_CPUSET_ACTIVE);
hotcpu_notifier(cpuset_cpu_inactive, CPU_PRI_CPUSET_INACTIVE);
+ update_cluster_topology();
+
init_hrtick();
/* Move init over to a non-isolated CPU */
@@ -7342,6 +7566,7 @@ void __init sched_init_smp(void)
}
#endif /* CONFIG_SMP */
+
int in_sched_functions(unsigned long addr)
{
return in_lock_functions(addr) ||
@@ -7365,6 +7590,15 @@ void __init sched_init(void)
int i, j;
unsigned long alloc_size = 0, ptr;
+ if (sched_enable_hmp)
+ pr_info("HMP scheduling enabled.\n");
+
+ BUG_ON(num_possible_cpus() > BITS_PER_LONG);
+
+#ifdef CONFIG_SCHED_HMP
+ init_clusters();
+#endif
+
#ifdef CONFIG_FAIR_GROUP_SCHED
alloc_size += 2 * nr_cpu_ids * sizeof(void **);
#endif
@@ -7475,10 +7709,40 @@ void __init sched_init(void)
rq->active_balance = 0;
rq->next_balance = jiffies;
rq->push_cpu = 0;
+ rq->push_task = NULL;
rq->cpu = i;
rq->online = 0;
rq->idle_stamp = 0;
rq->avg_idle = 2*sysctl_sched_migration_cost;
+#ifdef CONFIG_SCHED_HMP
+ cpumask_set_cpu(i, &rq->freq_domain_cpumask);
+ rq->hmp_stats.cumulative_runnable_avg = 0;
+ rq->window_start = 0;
+ rq->hmp_stats.nr_big_tasks = 0;
+ rq->hmp_flags = 0;
+ rq->cur_irqload = 0;
+ rq->avg_irqload = 0;
+ rq->irqload_ts = 0;
+ rq->static_cpu_pwr_cost = 0;
+ rq->cc.cycles = 1;
+ rq->cc.time = 1;
+ rq->cstate = 0;
+ rq->wakeup_latency = 0;
+ rq->wakeup_energy = 0;
+
+ /*
+ * All cpus part of same cluster by default. This avoids the
+ * need to check for rq->cluster being non-NULL in hot-paths
+ * like select_best_cpu()
+ */
+ rq->cluster = &init_cluster;
+ rq->curr_runnable_sum = rq->prev_runnable_sum = 0;
+ rq->nt_curr_runnable_sum = rq->nt_prev_runnable_sum = 0;
+ rq->old_busy_time = 0;
+ rq->old_estimated_time = 0;
+ rq->old_busy_time_group = 0;
+ rq->hmp_stats.pred_demands_sum = 0;
+#endif
rq->max_idle_balance_cost = sysctl_sched_migration_cost;
INIT_LIST_HEAD(&rq->cfs_tasks);
@@ -7495,6 +7759,8 @@ void __init sched_init(void)
atomic_set(&rq->nr_iowait, 0);
}
+ set_hmp_defaults();
+
set_load_weight(&init_task);
#ifdef CONFIG_PREEMPT_NOTIFIERS
@@ -7605,6 +7871,9 @@ void ___might_sleep(const char *file, int line, int preempt_offset)
pr_cont("\n");
}
#endif
+#ifdef CONFIG_PANIC_ON_SCHED_BUG
+ BUG();
+#endif
dump_stack();
}
EXPORT_SYMBOL(___might_sleep);
@@ -8200,7 +8469,7 @@ int sched_rr_handler(struct ctl_table *table, int write,
#ifdef CONFIG_CGROUP_SCHED
-static inline struct task_group *css_tg(struct cgroup_subsys_state *css)
+inline struct task_group *css_tg(struct cgroup_subsys_state *css)
{
return css ? container_of(css, struct task_group, css) : NULL;
}
@@ -8564,6 +8833,13 @@ static u64 cpu_rt_period_read_uint(struct cgroup_subsys_state *css,
#endif /* CONFIG_RT_GROUP_SCHED */
static struct cftype cpu_files[] = {
+#ifdef CONFIG_SCHED_HMP
+ {
+ .name = "upmigrate_discourage",
+ .read_u64 = cpu_upmigrate_discourage_read_u64,
+ .write_u64 = cpu_upmigrate_discourage_write_u64,
+ },
+#endif
#ifdef CONFIG_FAIR_GROUP_SCHED
{
.name = "shares",
diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c
index f74ea89e77a8..f29b132a9f8b 100644
--- a/kernel/sched/cputime.c
+++ b/kernel/sched/cputime.c
@@ -49,6 +49,8 @@ void irqtime_account_irq(struct task_struct *curr)
unsigned long flags;
s64 delta;
int cpu;
+ u64 wallclock;
+ bool account = true;
if (!sched_clock_irqtime)
return;
@@ -56,7 +58,8 @@ void irqtime_account_irq(struct task_struct *curr)
local_irq_save(flags);
cpu = smp_processor_id();
- delta = sched_clock_cpu(cpu) - __this_cpu_read(irq_start_time);
+ wallclock = sched_clock_cpu(cpu);
+ delta = wallclock - __this_cpu_read(irq_start_time);
__this_cpu_add(irq_start_time, delta);
irq_time_write_begin();
@@ -70,8 +73,16 @@ void irqtime_account_irq(struct task_struct *curr)
__this_cpu_add(cpu_hardirq_time, delta);
else if (in_serving_softirq() && curr != this_cpu_ksoftirqd())
__this_cpu_add(cpu_softirq_time, delta);
+ else
+ account = false;
irq_time_write_end();
+
+ if (account)
+ sched_account_irqtime(cpu, curr, delta, wallclock);
+ else if (curr != this_cpu_ksoftirqd())
+ sched_account_irqstart(cpu, curr, wallclock);
+
local_irq_restore(flags);
}
EXPORT_SYMBOL_GPL(irqtime_account_irq);
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index 8b0a15e285f9..44178fea87d0 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -271,9 +271,11 @@ static struct rq *dl_task_offline_migration(struct rq *rq, struct task_struct *p
/*
* By now the task is replenished and enqueued; migrate it.
*/
+ p->on_rq = TASK_ON_RQ_MIGRATING;
deactivate_task(rq, p, 0);
set_task_cpu(p, later_rq->cpu);
activate_task(later_rq, p, 0);
+ p->on_rq = TASK_ON_RQ_QUEUED;
if (!fallback)
resched_curr(later_rq);
@@ -851,6 +853,41 @@ static inline void dec_dl_deadline(struct dl_rq *dl_rq, u64 deadline) {}
#endif /* CONFIG_SMP */
+#ifdef CONFIG_SCHED_HMP
+
+static void
+inc_hmp_sched_stats_dl(struct rq *rq, struct task_struct *p)
+{
+ inc_cumulative_runnable_avg(&rq->hmp_stats, p);
+}
+
+static void
+dec_hmp_sched_stats_dl(struct rq *rq, struct task_struct *p)
+{
+ dec_cumulative_runnable_avg(&rq->hmp_stats, p);
+}
+
+static void
+fixup_hmp_sched_stats_dl(struct rq *rq, struct task_struct *p,
+ u32 new_task_load, u32 new_pred_demand)
+{
+ s64 task_load_delta = (s64)new_task_load - task_load(p);
+ s64 pred_demand_delta = PRED_DEMAND_DELTA;
+
+ fixup_cumulative_runnable_avg(&rq->hmp_stats, p, task_load_delta,
+ pred_demand_delta);
+}
+
+#else /* CONFIG_SCHED_HMP */
+
+static inline void
+inc_hmp_sched_stats_dl(struct rq *rq, struct task_struct *p) { }
+
+static inline void
+dec_hmp_sched_stats_dl(struct rq *rq, struct task_struct *p) { }
+
+#endif /* CONFIG_SCHED_HMP */
+
static inline
void inc_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
{
@@ -860,6 +897,7 @@ void inc_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
WARN_ON(!dl_prio(prio));
dl_rq->dl_nr_running++;
add_nr_running(rq_of_dl_rq(dl_rq), 1);
+ inc_hmp_sched_stats_dl(rq_of_dl_rq(dl_rq), dl_task_of(dl_se));
inc_dl_deadline(dl_rq, deadline);
inc_dl_migration(dl_se, dl_rq);
@@ -874,6 +912,7 @@ void dec_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
WARN_ON(!dl_rq->dl_nr_running);
dl_rq->dl_nr_running--;
sub_nr_running(rq_of_dl_rq(dl_rq), 1);
+ dec_hmp_sched_stats_dl(rq_of_dl_rq(dl_rq), dl_task_of(dl_se));
dec_dl_deadline(dl_rq, dl_se->deadline);
dec_dl_migration(dl_se, dl_rq);
@@ -1555,9 +1594,11 @@ retry:
goto retry;
}
+ next_task->on_rq = TASK_ON_RQ_MIGRATING;
deactivate_task(rq, next_task, 0);
set_task_cpu(next_task, later_rq->cpu);
activate_task(later_rq, next_task, 0);
+ next_task->on_rq = TASK_ON_RQ_QUEUED;
ret = 1;
resched_curr(later_rq);
@@ -1643,9 +1684,11 @@ static void pull_dl_task(struct rq *this_rq)
resched = true;
+ p->on_rq = TASK_ON_RQ_MIGRATING;
deactivate_task(src_rq, p, 0);
set_task_cpu(p, this_cpu);
activate_task(this_rq, p, 0);
+ p->on_rq = TASK_ON_RQ_QUEUED;
dmin = p->dl.deadline;
/* Is there any other task even earlier? */
@@ -1846,6 +1889,11 @@ const struct sched_class dl_sched_class = {
.switched_to = switched_to_dl,
.update_curr = update_curr_dl,
+#ifdef CONFIG_SCHED_HMP
+ .inc_hmp_sched_stats = inc_hmp_sched_stats_dl,
+ .dec_hmp_sched_stats = dec_hmp_sched_stats_dl,
+ .fixup_hmp_sched_stats = fixup_hmp_sched_stats_dl,
+#endif
};
#ifdef CONFIG_SCHED_DEBUG
diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
index 641511771ae6..b6dc131f36a6 100644
--- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c
@@ -227,6 +227,14 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
cfs_rq->throttled);
SEQ_printf(m, " .%-30s: %d\n", "throttle_count",
cfs_rq->throttle_count);
+ SEQ_printf(m, " .%-30s: %d\n", "runtime_enabled",
+ cfs_rq->runtime_enabled);
+#ifdef CONFIG_SCHED_HMP
+ SEQ_printf(m, " .%-30s: %d\n", "nr_big_tasks",
+ cfs_rq->hmp_stats.nr_big_tasks);
+ SEQ_printf(m, " .%-30s: %llu\n", "cumulative_runnable_avg",
+ cfs_rq->hmp_stats.cumulative_runnable_avg);
+#endif
#endif
#ifdef CONFIG_FAIR_GROUP_SCHED
@@ -306,6 +314,23 @@ do { \
P(cpu_load[2]);
P(cpu_load[3]);
P(cpu_load[4]);
+#ifdef CONFIG_SMP
+ P(cpu_capacity);
+#endif
+#ifdef CONFIG_SCHED_HMP
+ P(static_cpu_pwr_cost);
+ P(cluster->static_cluster_pwr_cost);
+ P(cluster->load_scale_factor);
+ P(cluster->capacity);
+ P(cluster->max_possible_capacity);
+ P(cluster->efficiency);
+ P(cluster->cur_freq);
+ P(cluster->max_freq);
+ P(cluster->exec_scale_factor);
+ P(hmp_stats.nr_big_tasks);
+ SEQ_printf(m, " .%-30s: %llu\n", "hmp_stats.cumulative_runnable_avg",
+ rq->hmp_stats.cumulative_runnable_avg);
+#endif
#undef P
#undef PN
@@ -386,6 +411,14 @@ static void sched_debug_header(struct seq_file *m)
PN(sysctl_sched_wakeup_granularity);
P(sysctl_sched_child_runs_first);
P(sysctl_sched_features);
+#ifdef CONFIG_SCHED_HMP
+ P(sched_upmigrate);
+ P(sched_downmigrate);
+ P(sched_init_task_load_windows);
+ P(min_capacity);
+ P(max_capacity);
+ P(sched_ravg_window);
+#endif
#undef PN
#undef P
@@ -408,6 +441,7 @@ static int sched_debug_show(struct seq_file *m, void *v)
return 0;
}
+#ifdef CONFIG_SYSRQ_SCHED_DEBUG
void sysrq_sched_debug_show(void)
{
int cpu;
@@ -417,6 +451,7 @@ void sysrq_sched_debug_show(void)
print_cpu(NULL, cpu);
}
+#endif
/*
* This itererator needs some explanation.
@@ -547,6 +582,9 @@ static void sched_show_numa(struct task_struct *p, struct seq_file *m)
void proc_sched_show_task(struct task_struct *p, struct seq_file *m)
{
unsigned long nr_switches;
+ unsigned int load_avg;
+
+ load_avg = pct_task_load(p);
SEQ_printf(m, "%s (%d, #threads: %d)\n", p->comm, task_pid_nr(p),
get_nr_threads(p));
@@ -598,6 +636,13 @@ void proc_sched_show_task(struct task_struct *p, struct seq_file *m)
P(se.statistics.nr_wakeups_passive);
P(se.statistics.nr_wakeups_idle);
+#if defined(CONFIG_SMP) && defined(CONFIG_FAIR_GROUP_SCHED)
+ __P(load_avg);
+#ifdef CONFIG_SCHED_HMP
+ P(ravg.demand);
+#endif
+#endif
+
{
u64 avg_atom, avg_per_cpu;
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index ba24bfe4ac51..e893b0fcac6b 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -31,9 +31,8 @@
#include <linux/migrate.h>
#include <linux/task_work.h>
-#include <trace/events/sched.h>
-
#include "sched.h"
+#include <trace/events/sched.h>
/*
* Targeted preemption latency for CPU-bound tasks:
@@ -81,6 +80,14 @@ static unsigned int sched_nr_latency = 8;
unsigned int sysctl_sched_child_runs_first __read_mostly;
/*
+ * Controls whether, when SD_SHARE_PKG_RESOURCES is on, if all
+ * tasks go to idle CPUs when woken. If this is off, note that the
+ * per-task flag PF_WAKE_UP_IDLE can still cause a task to go to an
+ * idle CPU upon being woken.
+ */
+unsigned int __read_mostly sysctl_sched_wake_to_idle;
+
+/*
* SCHED_OTHER wake-up granularity.
* (default: 1 msec * (1 + ilog(ncpus)), units: nanoseconds)
*
@@ -236,6 +243,9 @@ static u64 __calc_delta(u64 delta_exec, unsigned long weight, struct load_weight
return mul_u64_u32_shr(delta_exec, fact, shift);
}
+#ifdef CONFIG_SMP
+static int active_load_balance_cpu_stop(void *data);
+#endif
const struct sched_class fair_sched_class;
@@ -738,12 +748,56 @@ static void update_curr_fair(struct rq *rq)
update_curr(cfs_rq_of(&rq->curr->se));
}
+#ifdef CONFIG_SCHEDSTATS
+static inline void
+update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
+{
+ u64 wait_start = rq_clock(rq_of(cfs_rq));
+
+ if (entity_is_task(se) && task_on_rq_migrating(task_of(se)) &&
+ likely(wait_start > se->statistics.wait_start))
+ wait_start -= se->statistics.wait_start;
+
+ se->statistics.wait_start = wait_start;
+}
+
+static void
+update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se)
+{
+ struct task_struct *p;
+ u64 delta = rq_clock(rq_of(cfs_rq)) - se->statistics.wait_start;
+
+ if (entity_is_task(se)) {
+ p = task_of(se);
+ if (task_on_rq_migrating(p)) {
+ /*
+ * Preserve migrating task's wait time so wait_start
+ * time stamp can be adjusted to accumulate wait time
+ * prior to migration.
+ */
+ se->statistics.wait_start = delta;
+ return;
+ }
+ trace_sched_stat_wait(p, delta);
+ }
+
+ se->statistics.wait_max = max(se->statistics.wait_max, delta);
+ se->statistics.wait_count++;
+ se->statistics.wait_sum += delta;
+ se->statistics.wait_start = 0;
+}
+#else
static inline void
update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
- schedstat_set(se->statistics.wait_start, rq_clock(rq_of(cfs_rq)));
}
+static inline void
+update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se)
+{
+}
+#endif
+
/*
* Task is being enqueued - update stats:
*/
@@ -757,23 +811,6 @@ static void update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
update_stats_wait_start(cfs_rq, se);
}
-static void
-update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se)
-{
- schedstat_set(se->statistics.wait_max, max(se->statistics.wait_max,
- rq_clock(rq_of(cfs_rq)) - se->statistics.wait_start));
- schedstat_set(se->statistics.wait_count, se->statistics.wait_count + 1);
- schedstat_set(se->statistics.wait_sum, se->statistics.wait_sum +
- rq_clock(rq_of(cfs_rq)) - se->statistics.wait_start);
-#ifdef CONFIG_SCHEDSTATS
- if (entity_is_task(se)) {
- trace_sched_stat_wait(task_of(se),
- rq_clock(rq_of(cfs_rq)) - se->statistics.wait_start);
- }
-#endif
- schedstat_set(se->statistics.wait_start, 0);
-}
-
static inline void
update_stats_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
@@ -2442,7 +2479,25 @@ static inline void update_cfs_shares(struct cfs_rq *cfs_rq)
#endif /* CONFIG_FAIR_GROUP_SCHED */
#ifdef CONFIG_SMP
-/* Precomputed fixed inverse multiplies for multiplication by y^n */
+u32 sched_get_wake_up_idle(struct task_struct *p)
+{
+ u32 enabled = p->flags & PF_WAKE_UP_IDLE;
+
+ return !!enabled;
+}
+
+int sched_set_wake_up_idle(struct task_struct *p, int wake_up_idle)
+{
+ int enable = !!wake_up_idle;
+
+ if (enable)
+ p->flags |= PF_WAKE_UP_IDLE;
+ else
+ p->flags &= ~PF_WAKE_UP_IDLE;
+
+ return 0;
+}
+
static const u32 runnable_avg_yN_inv[] = {
0xffffffff, 0xfa83b2da, 0xf5257d14, 0xefe4b99a, 0xeac0c6e6, 0xe5b906e6,
0xe0ccdeeb, 0xdbfbb796, 0xd744fcc9, 0xd2a81d91, 0xce248c14, 0xc9b9bd85,
@@ -2522,6 +2577,968 @@ static u32 __compute_runnable_contrib(u64 n)
return contrib + runnable_avg_yN_sum[n];
}
+#ifdef CONFIG_SCHED_HMP
+
+/* CPU selection flag */
+#define SBC_FLAG_PREV_CPU 0x1
+#define SBC_FLAG_BEST_CAP_CPU 0x2
+#define SBC_FLAG_CPU_COST 0x4
+#define SBC_FLAG_MIN_COST 0x8
+#define SBC_FLAG_IDLE_LEAST_LOADED 0x10
+#define SBC_FLAG_IDLE_CSTATE 0x20
+#define SBC_FLAG_COST_CSTATE_TIE_BREAKER 0x40
+#define SBC_FLAG_COST_CSTATE_PREV_CPU_TIE_BREAKER 0x80
+#define SBC_FLAG_CSTATE_LOAD 0x100
+#define SBC_FLAG_BEST_SIBLING 0x200
+
+/* Cluster selection flag */
+#define SBC_FLAG_COLOC_CLUSTER 0x10000
+#define SBC_FLAG_WAKER_CLUSTER 0x20000
+#define SBC_FLAG_BACKUP_CLUSTER 0x40000
+
+struct cpu_select_env {
+ struct task_struct *p;
+ struct related_thread_group *rtg;
+ u8 reason;
+ u8 need_idle:1;
+ u8 need_waker_cluster:1;
+ u8 sync:1;
+ u8 ignore_prev_cpu:1;
+ enum sched_boost_type boost_type;
+ int prev_cpu;
+ DECLARE_BITMAP(candidate_list, NR_CPUS);
+ DECLARE_BITMAP(backup_list, NR_CPUS);
+ u64 task_load;
+ u64 cpu_load;
+ u32 sbc_best_flag;
+ u32 sbc_best_cluster_flag;
+};
+
+struct cluster_cpu_stats {
+ int best_idle_cpu, least_loaded_cpu;
+ int best_capacity_cpu, best_cpu, best_sibling_cpu;
+ int min_cost, best_sibling_cpu_cost;
+ int best_cpu_cstate;
+ u64 min_load, best_load, best_sibling_cpu_load;
+ s64 highest_spare_capacity;
+};
+
+static int spill_threshold_crossed(struct cpu_select_env *env, struct rq *rq)
+{
+ u64 total_load;
+
+ total_load = env->task_load + env->cpu_load;
+
+ if (total_load > sched_spill_load ||
+ (rq->nr_running + 1) > sysctl_sched_spill_nr_run)
+ return 1;
+
+ return 0;
+}
+
+static int skip_cpu(int cpu, struct cpu_select_env *env)
+{
+ int tcpu = task_cpu(env->p);
+ int skip = 0;
+
+ if (!env->reason)
+ return 0;
+
+ if (is_reserved(cpu))
+ return 1;
+
+ switch (env->reason) {
+ case UP_MIGRATION:
+ skip = !idle_cpu(cpu);
+ break;
+ case IRQLOAD_MIGRATION:
+ /* Purposely fall through */
+ default:
+ skip = (cpu == tcpu);
+ break;
+ }
+
+ return skip;
+}
+
+static inline int
+acceptable_capacity(struct sched_cluster *cluster, struct cpu_select_env *env)
+{
+ int tcpu;
+
+ if (!env->reason)
+ return 1;
+
+ tcpu = task_cpu(env->p);
+ switch (env->reason) {
+ case UP_MIGRATION:
+ return cluster->capacity > cpu_capacity(tcpu);
+
+ case DOWN_MIGRATION:
+ return cluster->capacity < cpu_capacity(tcpu);
+
+ default:
+ break;
+ }
+
+ return 1;
+}
+
+static int
+skip_cluster(struct sched_cluster *cluster, struct cpu_select_env *env)
+{
+ if (!test_bit(cluster->id, env->candidate_list))
+ return 1;
+
+ if (!acceptable_capacity(cluster, env)) {
+ __clear_bit(cluster->id, env->candidate_list);
+ return 1;
+ }
+
+ return 0;
+}
+
+static struct sched_cluster *
+select_least_power_cluster(struct cpu_select_env *env)
+{
+ struct sched_cluster *cluster;
+
+ if (env->rtg) {
+ env->task_load = scale_load_to_cpu(task_load(env->p),
+ cluster_first_cpu(env->rtg->preferred_cluster));
+ env->sbc_best_cluster_flag |= SBC_FLAG_COLOC_CLUSTER;
+ return env->rtg->preferred_cluster;
+ }
+
+ for_each_sched_cluster(cluster) {
+ if (!skip_cluster(cluster, env)) {
+ int cpu = cluster_first_cpu(cluster);
+
+ env->task_load = scale_load_to_cpu(task_load(env->p),
+ cpu);
+ if (task_load_will_fit(env->p, env->task_load, cpu,
+ env->boost_type))
+ return cluster;
+
+ __set_bit(cluster->id, env->backup_list);
+ __clear_bit(cluster->id, env->candidate_list);
+ }
+ }
+
+ return NULL;
+}
+
+static struct sched_cluster *
+next_candidate(const unsigned long *list, int start, int end)
+{
+ int cluster_id;
+
+ cluster_id = find_next_bit(list, end, start - 1 + 1);
+ if (cluster_id >= end)
+ return NULL;
+
+ return sched_cluster[cluster_id];
+}
+
+static void
+update_spare_capacity(struct cluster_cpu_stats *stats,
+ struct cpu_select_env *env, int cpu, int capacity,
+ u64 cpu_load)
+{
+ s64 spare_capacity = sched_ravg_window - cpu_load;
+
+ if (spare_capacity > 0 &&
+ (spare_capacity > stats->highest_spare_capacity ||
+ (spare_capacity == stats->highest_spare_capacity &&
+ ((!env->need_waker_cluster &&
+ capacity > cpu_capacity(stats->best_capacity_cpu)) ||
+ (env->need_waker_cluster &&
+ cpu_rq(cpu)->nr_running <
+ cpu_rq(stats->best_capacity_cpu)->nr_running))))) {
+ /*
+ * If sync waker is the only runnable of CPU, cr_avg of the
+ * CPU is 0 so we have high chance to place the wakee on the
+ * waker's CPU which likely causes preemtion of the waker.
+ * This can lead migration of preempted waker. Place the
+ * wakee on the real idle CPU when it's possible by checking
+ * nr_running to avoid such preemption.
+ */
+ stats->highest_spare_capacity = spare_capacity;
+ stats->best_capacity_cpu = cpu;
+ }
+}
+
+static inline void find_backup_cluster(
+struct cpu_select_env *env, struct cluster_cpu_stats *stats)
+{
+ struct sched_cluster *next = NULL;
+ int i;
+
+ while (!bitmap_empty(env->backup_list, num_clusters)) {
+ next = next_candidate(env->backup_list, 0, num_clusters);
+ __clear_bit(next->id, env->backup_list);
+ for_each_cpu_and(i, &env->p->cpus_allowed, &next->cpus) {
+ trace_sched_cpu_load_wakeup(cpu_rq(i), idle_cpu(i),
+ sched_irqload(i), power_cost(i, task_load(env->p) +
+ cpu_cravg_sync(i, env->sync)), 0);
+
+ update_spare_capacity(stats, env, i, next->capacity,
+ cpu_load_sync(i, env->sync));
+ }
+ env->sbc_best_cluster_flag = SBC_FLAG_BACKUP_CLUSTER;
+ }
+}
+
+struct sched_cluster *
+next_best_cluster(struct sched_cluster *cluster, struct cpu_select_env *env,
+ struct cluster_cpu_stats *stats)
+{
+ struct sched_cluster *next = NULL;
+
+ __clear_bit(cluster->id, env->candidate_list);
+
+ if (env->rtg && preferred_cluster(cluster, env->p))
+ return NULL;
+
+ do {
+ if (bitmap_empty(env->candidate_list, num_clusters))
+ return NULL;
+
+ next = next_candidate(env->candidate_list, 0, num_clusters);
+ if (next) {
+ if (next->min_power_cost > stats->min_cost) {
+ clear_bit(next->id, env->candidate_list);
+ next = NULL;
+ continue;
+ }
+
+ if (skip_cluster(next, env))
+ next = NULL;
+ }
+ } while (!next);
+
+ env->task_load = scale_load_to_cpu(task_load(env->p),
+ cluster_first_cpu(next));
+ return next;
+}
+
+#ifdef CONFIG_SCHED_HMP_CSTATE_AWARE
+static void __update_cluster_stats(int cpu, struct cluster_cpu_stats *stats,
+ struct cpu_select_env *env, int cpu_cost)
+{
+ int cpu_cstate;
+ int prev_cpu = env->prev_cpu;
+
+ cpu_cstate = cpu_rq(cpu)->cstate;
+
+ if (env->need_idle) {
+ stats->min_cost = cpu_cost;
+ if (idle_cpu(cpu)) {
+ if (cpu_cstate < stats->best_cpu_cstate ||
+ (cpu_cstate == stats->best_cpu_cstate &&
+ cpu == prev_cpu)) {
+ stats->best_idle_cpu = cpu;
+ stats->best_cpu_cstate = cpu_cstate;
+ }
+ } else {
+ if (env->cpu_load < stats->min_load ||
+ (env->cpu_load == stats->min_load &&
+ cpu == prev_cpu)) {
+ stats->least_loaded_cpu = cpu;
+ stats->min_load = env->cpu_load;
+ }
+ }
+
+ return;
+ }
+
+ if (cpu_cost < stats->min_cost) {
+ stats->min_cost = cpu_cost;
+ stats->best_cpu_cstate = cpu_cstate;
+ stats->best_load = env->cpu_load;
+ stats->best_cpu = cpu;
+ env->sbc_best_flag = SBC_FLAG_CPU_COST;
+ return;
+ }
+
+ /* CPU cost is the same. Start breaking the tie by C-state */
+
+ if (cpu_cstate > stats->best_cpu_cstate)
+ return;
+
+ if (cpu_cstate < stats->best_cpu_cstate) {
+ stats->best_cpu_cstate = cpu_cstate;
+ stats->best_load = env->cpu_load;
+ stats->best_cpu = cpu;
+ env->sbc_best_flag = SBC_FLAG_COST_CSTATE_TIE_BREAKER;
+ return;
+ }
+
+ /* C-state is the same. Use prev CPU to break the tie */
+ if (cpu == prev_cpu) {
+ stats->best_cpu = cpu;
+ env->sbc_best_flag = SBC_FLAG_COST_CSTATE_PREV_CPU_TIE_BREAKER;
+ return;
+ }
+
+ if (stats->best_cpu != prev_cpu &&
+ ((cpu_cstate == 0 && env->cpu_load < stats->best_load) ||
+ (cpu_cstate > 0 && env->cpu_load > stats->best_load))) {
+ stats->best_load = env->cpu_load;
+ stats->best_cpu = cpu;
+ env->sbc_best_flag = SBC_FLAG_CSTATE_LOAD;
+ }
+}
+#else /* CONFIG_SCHED_HMP_CSTATE_AWARE */
+static void __update_cluster_stats(int cpu, struct cluster_cpu_stats *stats,
+ struct cpu_select_env *env, int cpu_cost)
+{
+ int prev_cpu = env->prev_cpu;
+
+ if (cpu != prev_cpu && cpus_share_cache(prev_cpu, cpu)) {
+ if (stats->best_sibling_cpu_cost > cpu_cost ||
+ (stats->best_sibling_cpu_cost == cpu_cost &&
+ stats->best_sibling_cpu_load > env->cpu_load)) {
+ stats->best_sibling_cpu_cost = cpu_cost;
+ stats->best_sibling_cpu_load = env->cpu_load;
+ stats->best_sibling_cpu = cpu;
+ }
+ }
+
+ if ((cpu_cost < stats->min_cost) ||
+ ((stats->best_cpu != prev_cpu &&
+ stats->min_load > env->cpu_load) || cpu == prev_cpu)) {
+ if (env->need_idle) {
+ if (idle_cpu(cpu)) {
+ stats->min_cost = cpu_cost;
+ stats->best_idle_cpu = cpu;
+ }
+ } else {
+ stats->min_cost = cpu_cost;
+ stats->min_load = env->cpu_load;
+ stats->best_cpu = cpu;
+ env->sbc_best_flag = SBC_FLAG_MIN_COST;
+ }
+ }
+}
+#endif /* CONFIG_SCHED_HMP_CSTATE_AWARE */
+
+static void update_cluster_stats(int cpu, struct cluster_cpu_stats *stats,
+ struct cpu_select_env *env)
+{
+ int cpu_cost;
+
+ cpu_cost = power_cost(cpu, task_load(env->p) +
+ cpu_cravg_sync(cpu, env->sync));
+ if (cpu_cost <= stats->min_cost)
+ __update_cluster_stats(cpu, stats, env, cpu_cost);
+}
+
+static void find_best_cpu_in_cluster(struct sched_cluster *c,
+ struct cpu_select_env *env, struct cluster_cpu_stats *stats)
+{
+ int i;
+ struct cpumask search_cpus;
+
+ cpumask_and(&search_cpus, tsk_cpus_allowed(env->p), &c->cpus);
+ if (env->ignore_prev_cpu)
+ cpumask_clear_cpu(env->prev_cpu, &search_cpus);
+
+ for_each_cpu(i, &search_cpus) {
+ env->cpu_load = cpu_load_sync(i, env->sync);
+
+ trace_sched_cpu_load_wakeup(cpu_rq(i), idle_cpu(i),
+ sched_irqload(i),
+ power_cost(i, task_load(env->p) +
+ cpu_cravg_sync(i, env->sync)), 0);
+
+ if (unlikely(!cpu_active(i)) || skip_cpu(i, env))
+ continue;
+
+ update_spare_capacity(stats, env, i, c->capacity,
+ env->cpu_load);
+
+ if (env->boost_type == SCHED_BOOST_ON_ALL ||
+ env->need_waker_cluster ||
+ sched_cpu_high_irqload(i) ||
+ spill_threshold_crossed(env, cpu_rq(i)))
+ continue;
+
+ update_cluster_stats(i, stats, env);
+ }
+}
+
+static inline void init_cluster_cpu_stats(struct cluster_cpu_stats *stats)
+{
+ stats->best_cpu = stats->best_idle_cpu = -1;
+ stats->best_capacity_cpu = stats->best_sibling_cpu = -1;
+ stats->min_cost = stats->best_sibling_cpu_cost = INT_MAX;
+ stats->min_load = stats->best_sibling_cpu_load = ULLONG_MAX;
+ stats->highest_spare_capacity = 0;
+ stats->least_loaded_cpu = -1;
+ stats->best_cpu_cstate = INT_MAX;
+ /* No need to initialize stats->best_load */
+}
+
+/*
+ * Should task be woken to any available idle cpu?
+ *
+ * Waking tasks to idle cpu has mixed implications on both performance and
+ * power. In many cases, scheduler can't estimate correctly impact of using idle
+ * cpus on either performance or power. PF_WAKE_UP_IDLE allows external kernel
+ * module to pass a strong hint to scheduler that the task in question should be
+ * woken to idle cpu, generally to improve performance.
+ */
+static inline int wake_to_idle(struct task_struct *p)
+{
+ return (current->flags & PF_WAKE_UP_IDLE) ||
+ (p->flags & PF_WAKE_UP_IDLE) || sysctl_sched_wake_to_idle;
+}
+
+static inline bool
+bias_to_prev_cpu(struct cpu_select_env *env, struct cluster_cpu_stats *stats)
+{
+ int prev_cpu;
+ struct task_struct *task = env->p;
+ struct sched_cluster *cluster;
+
+ if (env->boost_type != SCHED_BOOST_NONE || env->reason ||
+ !task->ravg.mark_start ||
+ env->need_idle || !sched_short_sleep_task_threshold)
+ return false;
+
+ prev_cpu = env->prev_cpu;
+ if (!cpumask_test_cpu(prev_cpu, tsk_cpus_allowed(task)) ||
+ unlikely(!cpu_active(prev_cpu)))
+ return false;
+
+ if (task->ravg.mark_start - task->last_cpu_selected_ts >=
+ sched_long_cpu_selection_threshold)
+ return false;
+
+ /*
+ * This function should be used by task wake up path only as it's
+ * assuming p->last_switch_out_ts as last sleep time.
+ * p->last_switch_out_ts can denote last preemption time as well as
+ * last sleep time.
+ */
+ if (task->ravg.mark_start - task->last_switch_out_ts >=
+ sched_short_sleep_task_threshold)
+ return false;
+
+ env->task_load = scale_load_to_cpu(task_load(task), prev_cpu);
+ cluster = cpu_rq(prev_cpu)->cluster;
+
+ if (!task_load_will_fit(task, env->task_load, prev_cpu,
+ sched_boost_type())) {
+
+ __set_bit(cluster->id, env->backup_list);
+ __clear_bit(cluster->id, env->candidate_list);
+ return false;
+ }
+
+ env->cpu_load = cpu_load_sync(prev_cpu, env->sync);
+ if (sched_cpu_high_irqload(prev_cpu) ||
+ spill_threshold_crossed(env, cpu_rq(prev_cpu))) {
+ update_spare_capacity(stats, env, prev_cpu,
+ cluster->capacity, env->cpu_load);
+ env->ignore_prev_cpu = 1;
+ return false;
+ }
+
+ return true;
+}
+
+static inline bool
+wake_to_waker_cluster(struct cpu_select_env *env)
+{
+ return !env->need_idle && !env->reason && env->sync &&
+ task_load(current) > sched_big_waker_task_load &&
+ task_load(env->p) < sched_small_wakee_task_load;
+}
+
+static inline int
+cluster_allowed(struct task_struct *p, struct sched_cluster *cluster)
+{
+ cpumask_t tmp_mask;
+
+ cpumask_and(&tmp_mask, &cluster->cpus, cpu_active_mask);
+ cpumask_and(&tmp_mask, &tmp_mask, &p->cpus_allowed);
+
+ return !cpumask_empty(&tmp_mask);
+}
+
+
+/* return cheapest cpu that can fit this task */
+static int select_best_cpu(struct task_struct *p, int target, int reason,
+ int sync)
+{
+ struct sched_cluster *cluster, *pref_cluster = NULL;
+ struct cluster_cpu_stats stats;
+ struct related_thread_group *grp;
+ unsigned int sbc_flag = 0;
+
+ struct cpu_select_env env = {
+ .p = p,
+ .reason = reason,
+ .need_idle = wake_to_idle(p),
+ .need_waker_cluster = 0,
+ .boost_type = sched_boost_type(),
+ .sync = sync,
+ .prev_cpu = target,
+ .ignore_prev_cpu = 0,
+ .rtg = NULL,
+ .sbc_best_flag = 0,
+ .sbc_best_cluster_flag = 0,
+ };
+
+ bitmap_copy(env.candidate_list, all_cluster_ids, NR_CPUS);
+ bitmap_zero(env.backup_list, NR_CPUS);
+
+ init_cluster_cpu_stats(&stats);
+
+ rcu_read_lock();
+
+ grp = task_related_thread_group(p);
+
+ if (grp && grp->preferred_cluster) {
+ pref_cluster = grp->preferred_cluster;
+ if (!cluster_allowed(p, pref_cluster))
+ clear_bit(pref_cluster->id, env.candidate_list);
+ else
+ env.rtg = grp;
+ } else {
+ cluster = cpu_rq(smp_processor_id())->cluster;
+ if (wake_to_waker_cluster(&env) &&
+ cluster_allowed(p, cluster)) {
+ env.need_waker_cluster = 1;
+ bitmap_zero(env.candidate_list, NR_CPUS);
+ __set_bit(cluster->id, env.candidate_list);
+ env.sbc_best_cluster_flag = SBC_FLAG_WAKER_CLUSTER;
+
+ } else if (bias_to_prev_cpu(&env, &stats)) {
+ sbc_flag = SBC_FLAG_PREV_CPU;
+ goto out;
+ }
+ }
+
+retry:
+ cluster = select_least_power_cluster(&env);
+
+ if (!cluster)
+ goto out;
+
+ /*
+ * 'cluster' now points to the minimum power cluster which can satisfy
+ * task's perf goals. Walk down the cluster list starting with that
+ * cluster. For non-small tasks, skip clusters that don't have
+ * mostly_idle/idle cpus
+ */
+
+ do {
+ find_best_cpu_in_cluster(cluster, &env, &stats);
+
+ } while ((cluster = next_best_cluster(cluster, &env, &stats)));
+
+ if (env.need_idle) {
+ if (stats.best_idle_cpu >= 0) {
+ target = stats.best_idle_cpu;
+ sbc_flag |= SBC_FLAG_IDLE_CSTATE;
+ } else if (stats.least_loaded_cpu >= 0) {
+ target = stats.least_loaded_cpu;
+ sbc_flag |= SBC_FLAG_IDLE_LEAST_LOADED;
+ }
+ } else if (stats.best_cpu >= 0) {
+ if (stats.best_cpu != task_cpu(p) &&
+ stats.min_cost == stats.best_sibling_cpu_cost) {
+ stats.best_cpu = stats.best_sibling_cpu;
+ sbc_flag |= SBC_FLAG_BEST_SIBLING;
+ }
+ sbc_flag |= env.sbc_best_flag;
+ target = stats.best_cpu;
+ } else {
+ if (env.rtg) {
+ env.rtg = NULL;
+ goto retry;
+ }
+
+ find_backup_cluster(&env, &stats);
+ if (stats.best_capacity_cpu >= 0) {
+ target = stats.best_capacity_cpu;
+ sbc_flag |= SBC_FLAG_BEST_CAP_CPU;
+ }
+ }
+ p->last_cpu_selected_ts = sched_ktime_clock();
+ sbc_flag |= env.sbc_best_cluster_flag;
+out:
+ rcu_read_unlock();
+ trace_sched_task_load(p, sched_boost(), env.reason, env.sync,
+ env.need_idle, sbc_flag, target);
+ return target;
+}
+
+#ifdef CONFIG_CFS_BANDWIDTH
+
+static inline struct task_group *next_task_group(struct task_group *tg)
+{
+ tg = list_entry_rcu(tg->list.next, typeof(struct task_group), list);
+
+ return (&tg->list == &task_groups) ? NULL : tg;
+}
+
+/* Iterate over all cfs_rq in a cpu */
+#define for_each_cfs_rq(cfs_rq, tg, cpu) \
+ for (tg = container_of(&task_groups, struct task_group, list); \
+ ((tg = next_task_group(tg)) && (cfs_rq = tg->cfs_rq[cpu]));)
+
+void reset_cfs_rq_hmp_stats(int cpu, int reset_cra)
+{
+ struct task_group *tg;
+ struct cfs_rq *cfs_rq;
+
+ rcu_read_lock();
+
+ for_each_cfs_rq(cfs_rq, tg, cpu)
+ reset_hmp_stats(&cfs_rq->hmp_stats, reset_cra);
+
+ rcu_read_unlock();
+}
+
+static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq);
+
+static void inc_cfs_rq_hmp_stats(struct cfs_rq *cfs_rq,
+ struct task_struct *p, int change_cra);
+static void dec_cfs_rq_hmp_stats(struct cfs_rq *cfs_rq,
+ struct task_struct *p, int change_cra);
+
+/* Add task's contribution to a cpu' HMP statistics */
+void _inc_hmp_sched_stats_fair(struct rq *rq,
+ struct task_struct *p, int change_cra)
+{
+ struct cfs_rq *cfs_rq;
+ struct sched_entity *se = &p->se;
+
+ /*
+ * Although below check is not strictly required (as
+ * inc/dec_nr_big_task and inc/dec_cumulative_runnable_avg called
+ * from inc_cfs_rq_hmp_stats() have similar checks), we gain a bit on
+ * efficiency by short-circuiting for_each_sched_entity() loop when
+ * !sched_enable_hmp || sched_disable_window_stats
+ */
+ if (!sched_enable_hmp || sched_disable_window_stats)
+ return;
+
+ for_each_sched_entity(se) {
+ cfs_rq = cfs_rq_of(se);
+ inc_cfs_rq_hmp_stats(cfs_rq, p, change_cra);
+ if (cfs_rq_throttled(cfs_rq))
+ break;
+ }
+
+ /* Update rq->hmp_stats only if we didn't find any throttled cfs_rq */
+ if (!se)
+ inc_rq_hmp_stats(rq, p, change_cra);
+}
+
+/* Remove task's contribution from a cpu' HMP statistics */
+static void
+_dec_hmp_sched_stats_fair(struct rq *rq, struct task_struct *p, int change_cra)
+{
+ struct cfs_rq *cfs_rq;
+ struct sched_entity *se = &p->se;
+
+ /* See comment on efficiency in _inc_hmp_sched_stats_fair */
+ if (!sched_enable_hmp || sched_disable_window_stats)
+ return;
+
+ for_each_sched_entity(se) {
+ cfs_rq = cfs_rq_of(se);
+ dec_cfs_rq_hmp_stats(cfs_rq, p, change_cra);
+ if (cfs_rq_throttled(cfs_rq))
+ break;
+ }
+
+ /* Update rq->hmp_stats only if we didn't find any throttled cfs_rq */
+ if (!se)
+ dec_rq_hmp_stats(rq, p, change_cra);
+}
+
+static void inc_hmp_sched_stats_fair(struct rq *rq, struct task_struct *p)
+{
+ _inc_hmp_sched_stats_fair(rq, p, 1);
+}
+
+static void dec_hmp_sched_stats_fair(struct rq *rq, struct task_struct *p)
+{
+ _dec_hmp_sched_stats_fair(rq, p, 1);
+}
+
+static void fixup_hmp_sched_stats_fair(struct rq *rq, struct task_struct *p,
+ u32 new_task_load, u32 new_pred_demand)
+{
+ struct cfs_rq *cfs_rq;
+ struct sched_entity *se = &p->se;
+ s64 task_load_delta = (s64)new_task_load - task_load(p);
+ s64 pred_demand_delta = PRED_DEMAND_DELTA;
+
+ for_each_sched_entity(se) {
+ cfs_rq = cfs_rq_of(se);
+
+ fixup_cumulative_runnable_avg(&cfs_rq->hmp_stats, p,
+ task_load_delta,
+ pred_demand_delta);
+ fixup_nr_big_tasks(&cfs_rq->hmp_stats, p, task_load_delta);
+ if (cfs_rq_throttled(cfs_rq))
+ break;
+ }
+
+ /* Fix up rq->hmp_stats only if we didn't find any throttled cfs_rq */
+ if (!se) {
+ fixup_cumulative_runnable_avg(&rq->hmp_stats, p,
+ task_load_delta,
+ pred_demand_delta);
+ fixup_nr_big_tasks(&rq->hmp_stats, p, task_load_delta);
+ }
+}
+
+static int task_will_be_throttled(struct task_struct *p);
+
+#else /* CONFIG_CFS_BANDWIDTH */
+
+inline void reset_cfs_rq_hmp_stats(int cpu, int reset_cra) { }
+
+static void
+inc_hmp_sched_stats_fair(struct rq *rq, struct task_struct *p)
+{
+ inc_nr_big_task(&rq->hmp_stats, p);
+ inc_cumulative_runnable_avg(&rq->hmp_stats, p);
+}
+
+static void
+dec_hmp_sched_stats_fair(struct rq *rq, struct task_struct *p)
+{
+ dec_nr_big_task(&rq->hmp_stats, p);
+ dec_cumulative_runnable_avg(&rq->hmp_stats, p);
+}
+static void
+fixup_hmp_sched_stats_fair(struct rq *rq, struct task_struct *p,
+ u32 new_task_load, u32 new_pred_demand)
+{
+ s64 task_load_delta = (s64)new_task_load - task_load(p);
+ s64 pred_demand_delta = PRED_DEMAND_DELTA;
+
+ fixup_cumulative_runnable_avg(&rq->hmp_stats, p, task_load_delta,
+ pred_demand_delta);
+ fixup_nr_big_tasks(&rq->hmp_stats, p, task_load_delta);
+}
+
+static inline int task_will_be_throttled(struct task_struct *p)
+{
+ return 0;
+}
+
+void _inc_hmp_sched_stats_fair(struct rq *rq,
+ struct task_struct *p, int change_cra)
+{
+ inc_nr_big_task(&rq->hmp_stats, p);
+}
+
+#endif /* CONFIG_CFS_BANDWIDTH */
+
+/*
+ * Reset balance_interval at all sched_domain levels of given cpu, so that it
+ * honors kick.
+ */
+static inline void reset_balance_interval(int cpu)
+{
+ struct sched_domain *sd;
+
+ if (cpu >= nr_cpu_ids)
+ return;
+
+ rcu_read_lock();
+ for_each_domain(cpu, sd)
+ sd->balance_interval = 0;
+ rcu_read_unlock();
+}
+
+/*
+ * Check if a task is on the "wrong" cpu (i.e its current cpu is not the ideal
+ * cpu as per its demand or priority)
+ *
+ * Returns reason why task needs to be migrated
+ */
+static inline int migration_needed(struct task_struct *p, int cpu)
+{
+ int nice;
+ struct related_thread_group *grp;
+
+ if (!sched_enable_hmp || p->state != TASK_RUNNING ||
+ p->nr_cpus_allowed == 1)
+ return 0;
+
+ /* No need to migrate task that is about to be throttled */
+ if (task_will_be_throttled(p))
+ return 0;
+
+ if (sched_boost_type() == SCHED_BOOST_ON_BIG) {
+ if (cpu_capacity(cpu) != max_capacity)
+ return UP_MIGRATION;
+ return 0;
+ }
+
+ if (sched_cpu_high_irqload(cpu))
+ return IRQLOAD_MIGRATION;
+
+ nice = task_nice(p);
+ rcu_read_lock();
+ grp = task_related_thread_group(p);
+ if (!grp && (nice > SCHED_UPMIGRATE_MIN_NICE ||
+ upmigrate_discouraged(p)) && cpu_capacity(cpu) > min_capacity) {
+ rcu_read_unlock();
+ return DOWN_MIGRATION;
+ }
+
+ if (!grp && !task_will_fit(p, cpu)) {
+ rcu_read_unlock();
+ return UP_MIGRATION;
+ }
+ rcu_read_unlock();
+
+ return 0;
+}
+
+static inline int
+kick_active_balance(struct rq *rq, struct task_struct *p, int new_cpu)
+{
+ unsigned long flags;
+ int rc = 0;
+
+ /* Invoke active balance to force migrate currently running task */
+ raw_spin_lock_irqsave(&rq->lock, flags);
+ if (!rq->active_balance) {
+ rq->active_balance = 1;
+ rq->push_cpu = new_cpu;
+ get_task_struct(p);
+ rq->push_task = p;
+ rc = 1;
+ }
+ raw_spin_unlock_irqrestore(&rq->lock, flags);
+
+ return rc;
+}
+
+static DEFINE_RAW_SPINLOCK(migration_lock);
+
+/*
+ * Check if currently running task should be migrated to a better cpu.
+ *
+ * Todo: Effect this via changes to nohz_balancer_kick() and load balance?
+ */
+void check_for_migration(struct rq *rq, struct task_struct *p)
+{
+ int cpu = cpu_of(rq), new_cpu;
+ int active_balance = 0, reason;
+
+ reason = migration_needed(p, cpu);
+ if (!reason)
+ return;
+
+ raw_spin_lock(&migration_lock);
+ new_cpu = select_best_cpu(p, cpu, reason, 0);
+
+ if (new_cpu != cpu) {
+ active_balance = kick_active_balance(rq, p, new_cpu);
+ if (active_balance)
+ mark_reserved(new_cpu);
+ }
+
+ raw_spin_unlock(&migration_lock);
+
+ if (active_balance)
+ stop_one_cpu_nowait(cpu, active_load_balance_cpu_stop, rq,
+ &rq->active_balance_work);
+}
+
+#ifdef CONFIG_CFS_BANDWIDTH
+
+static void init_cfs_rq_hmp_stats(struct cfs_rq *cfs_rq)
+{
+ cfs_rq->hmp_stats.nr_big_tasks = 0;
+ cfs_rq->hmp_stats.cumulative_runnable_avg = 0;
+ cfs_rq->hmp_stats.pred_demands_sum = 0;
+}
+
+static void inc_cfs_rq_hmp_stats(struct cfs_rq *cfs_rq,
+ struct task_struct *p, int change_cra)
+{
+ inc_nr_big_task(&cfs_rq->hmp_stats, p);
+ if (change_cra)
+ inc_cumulative_runnable_avg(&cfs_rq->hmp_stats, p);
+}
+
+static void dec_cfs_rq_hmp_stats(struct cfs_rq *cfs_rq,
+ struct task_struct *p, int change_cra)
+{
+ dec_nr_big_task(&cfs_rq->hmp_stats, p);
+ if (change_cra)
+ dec_cumulative_runnable_avg(&cfs_rq->hmp_stats, p);
+}
+
+static void inc_throttled_cfs_rq_hmp_stats(struct hmp_sched_stats *stats,
+ struct cfs_rq *cfs_rq)
+{
+ stats->nr_big_tasks += cfs_rq->hmp_stats.nr_big_tasks;
+ stats->cumulative_runnable_avg +=
+ cfs_rq->hmp_stats.cumulative_runnable_avg;
+ stats->pred_demands_sum += cfs_rq->hmp_stats.pred_demands_sum;
+}
+
+static void dec_throttled_cfs_rq_hmp_stats(struct hmp_sched_stats *stats,
+ struct cfs_rq *cfs_rq)
+{
+ stats->nr_big_tasks -= cfs_rq->hmp_stats.nr_big_tasks;
+ stats->cumulative_runnable_avg -=
+ cfs_rq->hmp_stats.cumulative_runnable_avg;
+ stats->pred_demands_sum -= cfs_rq->hmp_stats.pred_demands_sum;
+
+ BUG_ON(stats->nr_big_tasks < 0 ||
+ (s64)stats->cumulative_runnable_avg < 0);
+ verify_pred_demands_sum(stats);
+}
+
+#else /* CONFIG_CFS_BANDWIDTH */
+
+static inline void inc_cfs_rq_hmp_stats(struct cfs_rq *cfs_rq,
+ struct task_struct *p, int change_cra) { }
+
+static inline void dec_cfs_rq_hmp_stats(struct cfs_rq *cfs_rq,
+ struct task_struct *p, int change_cra) { }
+
+#endif /* CONFIG_CFS_BANDWIDTH */
+
+#else /* CONFIG_SCHED_HMP */
+
+static inline void init_cfs_rq_hmp_stats(struct cfs_rq *cfs_rq) { }
+
+static inline void inc_cfs_rq_hmp_stats(struct cfs_rq *cfs_rq,
+ struct task_struct *p, int change_cra) { }
+
+static inline void dec_cfs_rq_hmp_stats(struct cfs_rq *cfs_rq,
+ struct task_struct *p, int change_cra) { }
+
+static inline void inc_throttled_cfs_rq_hmp_stats(struct hmp_sched_stats *stats,
+ struct cfs_rq *cfs_rq)
+{
+}
+
+static inline void dec_throttled_cfs_rq_hmp_stats(struct hmp_sched_stats *stats,
+ struct cfs_rq *cfs_rq)
+{
+}
+
+#endif /* CONFIG_SCHED_HMP */
+
#if (SCHED_LOAD_SHIFT - SCHED_LOAD_RESOLUTION) != 10 || SCHED_CAPACITY_SHIFT != 10
#error "load tracking assumes 2^10 as unit"
#endif
@@ -2644,6 +3661,7 @@ __update_load_avg(u64 now, int cpu, struct sched_avg *sa,
if (cfs_rq)
cfs_rq->runnable_load_sum += weight * scaled_delta;
}
+
if (running)
sa->util_sum += scaled_delta * scale_cpu;
@@ -2884,6 +3902,12 @@ static inline int idle_balance(struct rq *rq)
return 0;
}
+static inline void inc_cfs_rq_hmp_stats(struct cfs_rq *cfs_rq,
+ struct task_struct *p, int change_cra) { }
+
+static inline void dec_cfs_rq_hmp_stats(struct cfs_rq *cfs_rq,
+ struct task_struct *p, int change_cra) { }
+
#endif /* CONFIG_SMP */
static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
@@ -3499,6 +4523,33 @@ static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq)
return cfs_bandwidth_used() && cfs_rq->throttled;
}
+/*
+ * Check if task is part of a hierarchy where some cfs_rq does not have any
+ * runtime left.
+ *
+ * We can't rely on throttled_hierarchy() to do this test, as
+ * cfs_rq->throttle_count will not be updated yet when this function is called
+ * from scheduler_tick()
+ */
+static int task_will_be_throttled(struct task_struct *p)
+{
+ struct sched_entity *se = &p->se;
+ struct cfs_rq *cfs_rq;
+
+ if (!cfs_bandwidth_used())
+ return 0;
+
+ for_each_sched_entity(se) {
+ cfs_rq = cfs_rq_of(se);
+ if (!cfs_rq->runtime_enabled)
+ continue;
+ if (cfs_rq->runtime_remaining <= 0)
+ return 1;
+ }
+
+ return 0;
+}
+
/* check whether cfs_rq, or any parent, is throttled */
static inline int throttled_hierarchy(struct cfs_rq *cfs_rq)
{
@@ -3578,13 +4629,20 @@ static void throttle_cfs_rq(struct cfs_rq *cfs_rq)
if (dequeue)
dequeue_entity(qcfs_rq, se, DEQUEUE_SLEEP);
qcfs_rq->h_nr_running -= task_delta;
+#ifdef CONFIG_SCHED_HMP
+ dec_throttled_cfs_rq_hmp_stats(&qcfs_rq->hmp_stats, cfs_rq);
+#endif
if (qcfs_rq->load.weight)
dequeue = 0;
}
- if (!se)
+ if (!se) {
sub_nr_running(rq, task_delta);
+#ifdef CONFIG_SCHED_HMP
+ dec_throttled_cfs_rq_hmp_stats(&rq->hmp_stats, cfs_rq);
+#endif
+ }
cfs_rq->throttled = 1;
cfs_rq->throttled_clock = rq_clock(rq);
@@ -3605,6 +4663,12 @@ static void throttle_cfs_rq(struct cfs_rq *cfs_rq)
start_cfs_bandwidth(cfs_b);
raw_spin_unlock(&cfs_b->lock);
+
+ /* Log effect on hmp stats after throttling */
+ trace_sched_cpu_load_cgroup(rq, idle_cpu(cpu_of(rq)),
+ sched_irqload(cpu_of(rq)),
+ power_cost(cpu_of(rq), 0),
+ cpu_temp(cpu_of(rq)));
}
void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
@@ -3614,6 +4678,7 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
struct sched_entity *se;
int enqueue = 1;
long task_delta;
+ struct cfs_rq *tcfs_rq = cfs_rq;
se = cfs_rq->tg->se[cpu_of(rq)];
@@ -3641,17 +4706,30 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
if (enqueue)
enqueue_entity(cfs_rq, se, ENQUEUE_WAKEUP);
cfs_rq->h_nr_running += task_delta;
+#ifdef CONFIG_SCHED_HMP
+ inc_throttled_cfs_rq_hmp_stats(&cfs_rq->hmp_stats, tcfs_rq);
+#endif
if (cfs_rq_throttled(cfs_rq))
break;
}
- if (!se)
+ if (!se) {
add_nr_running(rq, task_delta);
+#ifdef CONFIG_SCHED_HMP
+ inc_throttled_cfs_rq_hmp_stats(&rq->hmp_stats, tcfs_rq);
+#endif
+ }
/* determine whether we need to wake up potentially idle cpu */
if (rq->curr == rq->idle && rq->cfs.nr_running)
resched_curr(rq);
+
+ /* Log effect on hmp stats after un-throttling */
+ trace_sched_cpu_load_cgroup(rq, idle_cpu(cpu_of(rq)),
+ sched_irqload(cpu_of(rq)),
+ power_cost(cpu_of(rq), 0),
+ cpu_temp(cpu_of(rq)));
}
static u64 distribute_cfs_runtime(struct cfs_bandwidth *cfs_b,
@@ -3972,6 +5050,7 @@ static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq)
{
cfs_rq->runtime_enabled = 0;
INIT_LIST_HEAD(&cfs_rq->throttled_list);
+ init_cfs_rq_hmp_stats(cfs_rq);
}
void start_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
@@ -4087,7 +5166,7 @@ static void hrtick_start_fair(struct rq *rq, struct task_struct *p)
WARN_ON(task_rq(p) != rq);
- if (cfs_rq->nr_running > 1) {
+ if (rq->cfs.h_nr_running > 1) {
u64 slice = sched_slice(cfs_rq, se);
u64 ran = se->sum_exec_runtime - se->prev_sum_exec_runtime;
s64 delta = slice - ran;
@@ -4103,8 +5182,7 @@ static void hrtick_start_fair(struct rq *rq, struct task_struct *p)
/*
* called from enqueue/dequeue and updates the hrtick when the
- * current task is from our class and nr_running is low enough
- * to matter.
+ * current task is from our class.
*/
static void hrtick_update(struct rq *rq)
{
@@ -4113,8 +5191,7 @@ static void hrtick_update(struct rq *rq)
if (!hrtick_enabled(rq) || curr->sched_class != &fair_sched_class)
return;
- if (cfs_rq_of(&curr->se)->nr_running < sched_nr_latency)
- hrtick_start_fair(rq, curr);
+ hrtick_start_fair(rq, curr);
}
#else /* !CONFIG_SCHED_HRTICK */
static inline void
@@ -4153,6 +5230,7 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
if (cfs_rq_throttled(cfs_rq))
break;
cfs_rq->h_nr_running++;
+ inc_cfs_rq_hmp_stats(cfs_rq, p, 1);
flags = ENQUEUE_WAKEUP;
}
@@ -4160,6 +5238,7 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
for_each_sched_entity(se) {
cfs_rq = cfs_rq_of(se);
cfs_rq->h_nr_running++;
+ inc_cfs_rq_hmp_stats(cfs_rq, p, 1);
if (cfs_rq_throttled(cfs_rq))
break;
@@ -4168,9 +5247,10 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
update_cfs_shares(cfs_rq);
}
- if (!se)
+ if (!se) {
add_nr_running(rq, 1);
-
+ inc_rq_hmp_stats(rq, p, 1);
+ }
hrtick_update(rq);
}
@@ -4200,6 +5280,7 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
if (cfs_rq_throttled(cfs_rq))
break;
cfs_rq->h_nr_running--;
+ dec_cfs_rq_hmp_stats(cfs_rq, p, 1);
/* Don't dequeue parent if it has other entities besides us */
if (cfs_rq->load.weight) {
@@ -4220,6 +5301,7 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
for_each_sched_entity(se) {
cfs_rq = cfs_rq_of(se);
cfs_rq->h_nr_running--;
+ dec_cfs_rq_hmp_stats(cfs_rq, p, 1);
if (cfs_rq_throttled(cfs_rq))
break;
@@ -4228,9 +5310,10 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
update_cfs_shares(cfs_rq);
}
- if (!se)
+ if (!se) {
sub_nr_running(rq, 1);
-
+ dec_rq_hmp_stats(rq, p, 1);
+ }
hrtick_update(rq);
}
@@ -4850,6 +5933,11 @@ static int select_idle_sibling(struct task_struct *p, int target)
if (i != target && cpus_share_cache(i, target) && idle_cpu(i))
return i;
+ if (!sysctl_sched_wake_to_idle &&
+ !(current->flags & PF_WAKE_UP_IDLE) &&
+ !(p->flags & PF_WAKE_UP_IDLE))
+ return target;
+
/*
* Otherwise, iterate the domains and find an elegible idle cpu.
*/
@@ -4932,6 +6020,9 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_f
int want_affine = 0;
int sync = wake_flags & WF_SYNC;
+ if (sched_enable_hmp)
+ return select_best_cpu(p, prev_cpu, 0, sync);
+
if (sd_flag & SD_BALANCE_WAKE)
want_affine = !wake_wide(p) && cpumask_test_cpu(cpu, tsk_cpus_allowed(p));
@@ -5519,6 +6610,13 @@ enum fbq_type { regular, remote, all };
#define LBF_NEED_BREAK 0x02
#define LBF_DST_PINNED 0x04
#define LBF_SOME_PINNED 0x08
+#define LBF_SCHED_BOOST_ACTIVE_BALANCE 0x40
+#define LBF_BIG_TASK_ACTIVE_BALANCE 0x80
+#define LBF_HMP_ACTIVE_BALANCE (LBF_SCHED_BOOST_ACTIVE_BALANCE | \
+ LBF_BIG_TASK_ACTIVE_BALANCE)
+#define LBF_IGNORE_BIG_TASKS 0x100
+#define LBF_IGNORE_PREFERRED_CLUSTER_TASKS 0x200
+#define LBF_MOVED_RELATED_THREAD_GROUP_TASK 0x400
struct lb_env {
struct sched_domain *sd;
@@ -5535,6 +6633,8 @@ struct lb_env {
long imbalance;
/* The set of CPUs under consideration for load-balancing */
struct cpumask *cpus;
+ unsigned int busiest_grp_capacity;
+ unsigned int busiest_nr_running;
unsigned int flags;
@@ -5641,6 +6741,7 @@ static
int can_migrate_task(struct task_struct *p, struct lb_env *env)
{
int tsk_cache_hot;
+ int twf, group_cpus;
lockdep_assert_held(&env->src_rq->lock);
@@ -5687,6 +6788,34 @@ int can_migrate_task(struct task_struct *p, struct lb_env *env)
/* Record that we found atleast one task that could run on dst_cpu */
env->flags &= ~LBF_ALL_PINNED;
+ if (cpu_capacity(env->dst_cpu) > cpu_capacity(env->src_cpu) &&
+ nr_big_tasks(env->src_rq) && !is_big_task(p))
+ return 0;
+
+ twf = task_will_fit(p, env->dst_cpu);
+
+ /*
+ * Attempt to not pull tasks that don't fit. We may get lucky and find
+ * one that actually fits.
+ */
+ if (env->flags & LBF_IGNORE_BIG_TASKS && !twf)
+ return 0;
+
+ if (env->flags & LBF_IGNORE_PREFERRED_CLUSTER_TASKS &&
+ !preferred_cluster(rq_cluster(cpu_rq(env->dst_cpu)), p))
+ return 0;
+
+ /*
+ * Group imbalance can sometimes cause work to be pulled across groups
+ * even though the group could have managed the imbalance on its own.
+ * Prevent inter-cluster migrations for big tasks when the number of
+ * tasks is lower than the capacity of the group.
+ */
+ group_cpus = DIV_ROUND_UP(env->busiest_grp_capacity,
+ SCHED_CAPACITY_SCALE);
+ if (!twf && env->busiest_nr_running <= group_cpus)
+ return 0;
+
if (task_running(env->src_rq, p)) {
schedstat_inc(p, se.statistics.nr_failed_migrations_running);
return 0;
@@ -5694,15 +6823,16 @@ int can_migrate_task(struct task_struct *p, struct lb_env *env)
/*
* Aggressive migration if:
- * 1) destination numa is preferred
- * 2) task is cache cold, or
- * 3) too many balance attempts have failed.
+ * 1) IDLE or NEWLY_IDLE balance.
+ * 2) destination numa is preferred
+ * 3) task is cache cold, or
+ * 4) too many balance attempts have failed.
*/
tsk_cache_hot = migrate_degrades_locality(p, env);
if (tsk_cache_hot == -1)
tsk_cache_hot = task_hot(p, env);
- if (tsk_cache_hot <= 0 ||
+ if (env->idle != CPU_NOT_IDLE || tsk_cache_hot <= 0 ||
env->sd->nr_balance_failed > env->sd->cache_nice_tries) {
if (tsk_cache_hot == 1) {
schedstat_inc(env->sd, lb_hot_gained[env->idle]);
@@ -5722,9 +6852,13 @@ static void detach_task(struct task_struct *p, struct lb_env *env)
{
lockdep_assert_held(&env->src_rq->lock);
- deactivate_task(env->src_rq, p, 0);
p->on_rq = TASK_ON_RQ_MIGRATING;
+ deactivate_task(env->src_rq, p, 0);
+ double_lock_balance(env->src_rq, env->dst_rq);
set_task_cpu(p, env->dst_cpu);
+ if (task_in_related_thread_group(p))
+ env->flags |= LBF_MOVED_RELATED_THREAD_GROUP_TASK;
+ double_unlock_balance(env->src_rq, env->dst_rq);
}
/*
@@ -5752,6 +6886,7 @@ static struct task_struct *detach_one_task(struct lb_env *env)
* inside detach_tasks().
*/
schedstat_inc(env->sd, lb_gained[env->idle]);
+
return p;
}
return NULL;
@@ -5771,12 +6906,20 @@ static int detach_tasks(struct lb_env *env)
struct task_struct *p;
unsigned long load;
int detached = 0;
+ int orig_loop = env->loop;
lockdep_assert_held(&env->src_rq->lock);
if (env->imbalance <= 0)
return 0;
+ if (cpu_capacity(env->dst_cpu) < cpu_capacity(env->src_cpu) &&
+ !sched_boost())
+ env->flags |= LBF_IGNORE_BIG_TASKS;
+ else if (!same_cluster(env->dst_cpu, env->src_cpu))
+ env->flags |= LBF_IGNORE_PREFERRED_CLUSTER_TASKS;
+
+redo:
while (!list_empty(tasks)) {
/*
* We don't want to steal all, otherwise we may be treated likewise,
@@ -5838,6 +6981,15 @@ next:
list_move_tail(&p->se.group_node, tasks);
}
+ if (env->flags & (LBF_IGNORE_BIG_TASKS |
+ LBF_IGNORE_PREFERRED_CLUSTER_TASKS) && !detached) {
+ tasks = &env->src_rq->cfs_tasks;
+ env->flags &= ~(LBF_IGNORE_BIG_TASKS |
+ LBF_IGNORE_PREFERRED_CLUSTER_TASKS);
+ env->loop = orig_loop;
+ goto redo;
+ }
+
/*
* Right now, this is one of only two places we collect this stat
* so we can safely collect detach_one_task() stats here rather
@@ -5856,8 +7008,8 @@ static void attach_task(struct rq *rq, struct task_struct *p)
lockdep_assert_held(&rq->lock);
BUG_ON(task_rq(p) != rq);
- p->on_rq = TASK_ON_RQ_QUEUED;
activate_task(rq, p, 0);
+ p->on_rq = TASK_ON_RQ_QUEUED;
check_preempt_curr(rq, p, 0);
}
@@ -6002,6 +7154,10 @@ struct sg_lb_stats {
unsigned long group_capacity;
unsigned long group_util; /* Total utilization of the group */
unsigned int sum_nr_running; /* Nr tasks running in the group */
+#ifdef CONFIG_SCHED_HMP
+ unsigned long sum_nr_big_tasks;
+ u64 group_cpu_load; /* Scaled load of all CPUs of the group */
+#endif
unsigned int idle_cpus;
unsigned int group_weight;
enum group_type group_type;
@@ -6048,6 +7204,56 @@ static inline void init_sd_lb_stats(struct sd_lb_stats *sds)
};
}
+#ifdef CONFIG_SCHED_HMP
+
+static int
+bail_inter_cluster_balance(struct lb_env *env, struct sd_lb_stats *sds)
+{
+ int local_cpu, busiest_cpu;
+ int local_capacity, busiest_capacity;
+ int local_pwr_cost, busiest_pwr_cost;
+ int nr_cpus;
+
+ if (!sysctl_sched_restrict_cluster_spill || sched_boost())
+ return 0;
+
+ local_cpu = group_first_cpu(sds->local);
+ busiest_cpu = group_first_cpu(sds->busiest);
+
+ local_capacity = cpu_max_possible_capacity(local_cpu);
+ busiest_capacity = cpu_max_possible_capacity(busiest_cpu);
+
+ local_pwr_cost = cpu_max_power_cost(local_cpu);
+ busiest_pwr_cost = cpu_max_power_cost(busiest_cpu);
+
+ if (local_capacity < busiest_capacity ||
+ (local_capacity == busiest_capacity &&
+ local_pwr_cost <= busiest_pwr_cost))
+ return 0;
+
+ if (local_capacity > busiest_capacity &&
+ sds->busiest_stat.sum_nr_big_tasks)
+ return 0;
+
+ nr_cpus = cpumask_weight(sched_group_cpus(sds->busiest));
+ if ((sds->busiest_stat.group_cpu_load < nr_cpus * sched_spill_load) &&
+ (sds->busiest_stat.sum_nr_running <
+ nr_cpus * sysctl_sched_spill_nr_run))
+ return 1;
+
+ return 0;
+}
+
+#else /* CONFIG_SCHED_HMP */
+
+static inline int
+bail_inter_cluster_balance(struct lb_env *env, struct sd_lb_stats *sds)
+{
+ return 0;
+}
+
+#endif /* CONFIG_SCHED_HMP */
+
/**
* get_sd_load_idx - Obtain the load index for a given sched domain.
* @sd: The sched_domain whose load_idx is to be obtained.
@@ -6277,7 +7483,7 @@ group_is_overloaded(struct lb_env *env, struct sg_lb_stats *sgs)
static inline enum
group_type group_classify(struct sched_group *group,
- struct sg_lb_stats *sgs)
+ struct sg_lb_stats *sgs, struct lb_env *env)
{
if (sgs->group_no_capacity)
return group_overloaded;
@@ -6310,6 +7516,11 @@ static inline void update_sg_lb_stats(struct lb_env *env,
for_each_cpu_and(i, sched_group_cpus(group), env->cpus) {
struct rq *rq = cpu_rq(i);
+ trace_sched_cpu_load_lb(cpu_rq(i), idle_cpu(i),
+ sched_irqload(i),
+ power_cost(i, 0),
+ cpu_temp(i));
+
/* Bias balancing toward cpus of our domain */
if (local_group)
load = target_load(i, load_idx);
@@ -6323,6 +7534,11 @@ static inline void update_sg_lb_stats(struct lb_env *env,
if (rq->nr_running > 1)
*overload = true;
+#ifdef CONFIG_SCHED_HMP
+ sgs->sum_nr_big_tasks += rq->hmp_stats.nr_big_tasks;
+ sgs->group_cpu_load += cpu_load(i);
+#endif
+
#ifdef CONFIG_NUMA_BALANCING
sgs->nr_numa_running += rq->nr_numa_running;
sgs->nr_preferred_running += rq->nr_preferred_running;
@@ -6342,9 +7558,41 @@ static inline void update_sg_lb_stats(struct lb_env *env,
sgs->group_weight = group->group_weight;
sgs->group_no_capacity = group_is_overloaded(env, sgs);
- sgs->group_type = group_classify(group, sgs);
+ sgs->group_type = group_classify(group, sgs, env);
}
+#ifdef CONFIG_SCHED_HMP
+static bool update_sd_pick_busiest_active_balance(struct lb_env *env,
+ struct sd_lb_stats *sds,
+ struct sched_group *sg,
+ struct sg_lb_stats *sgs)
+{
+ if (env->idle != CPU_NOT_IDLE &&
+ cpu_capacity(env->dst_cpu) > group_rq_capacity(sg)) {
+ if (sched_boost() && !sds->busiest && sgs->sum_nr_running) {
+ env->flags |= LBF_SCHED_BOOST_ACTIVE_BALANCE;
+ return true;
+ }
+
+ if (sgs->sum_nr_big_tasks >
+ sds->busiest_stat.sum_nr_big_tasks) {
+ env->flags |= LBF_BIG_TASK_ACTIVE_BALANCE;
+ return true;
+ }
+ }
+
+ return false;
+}
+#else
+static bool update_sd_pick_busiest_active_balance(struct lb_env *env,
+ struct sd_lb_stats *sds,
+ struct sched_group *sg,
+ struct sg_lb_stats *sgs)
+{
+ return false;
+}
+#endif
+
/**
* update_sd_pick_busiest - return 1 on busiest group
* @env: The load balancing environment.
@@ -6365,6 +7613,9 @@ static bool update_sd_pick_busiest(struct lb_env *env,
{
struct sg_lb_stats *busiest = &sds->busiest_stat;
+ if (update_sd_pick_busiest_active_balance(env, sds, sg, sgs))
+ return true;
+
if (sgs->group_type > busiest->group_type)
return true;
@@ -6476,12 +7727,14 @@ static inline void update_sd_lb_stats(struct lb_env *env, struct sd_lb_stats *sd
group_has_capacity(env, &sds->local_stat) &&
(sgs->sum_nr_running > 1)) {
sgs->group_no_capacity = 1;
- sgs->group_type = group_classify(sg, sgs);
+ sgs->group_type = group_classify(sg, sgs, env);
}
if (update_sd_pick_busiest(env, sds, sg, sgs)) {
sds->busiest = sg;
sds->busiest_stat = *sgs;
+ env->busiest_nr_running = sgs->sum_nr_running;
+ env->busiest_grp_capacity = sgs->group_capacity;
}
next_group:
@@ -6733,6 +7986,12 @@ static struct sched_group *find_busiest_group(struct lb_env *env)
if (!sds.busiest || busiest->sum_nr_running == 0)
goto out_balanced;
+ if (env->flags & LBF_HMP_ACTIVE_BALANCE)
+ goto force_balance;
+
+ if (bail_inter_cluster_balance(env, &sds))
+ goto out_balanced;
+
sds.avg_load = (SCHED_CAPACITY_SCALE * sds.total_load)
/ sds.total_capacity;
@@ -6794,6 +8053,57 @@ out_balanced:
return NULL;
}
+#ifdef CONFIG_SCHED_HMP
+static struct rq *find_busiest_queue_hmp(struct lb_env *env,
+ struct sched_group *group)
+{
+ struct rq *busiest = NULL, *busiest_big = NULL;
+ u64 max_runnable_avg = 0, max_runnable_avg_big = 0;
+ int max_nr_big = 0, nr_big;
+ bool find_big = !!(env->flags & LBF_BIG_TASK_ACTIVE_BALANCE);
+ int i;
+
+ for_each_cpu(i, sched_group_cpus(group)) {
+ struct rq *rq = cpu_rq(i);
+ u64 cumulative_runnable_avg =
+ rq->hmp_stats.cumulative_runnable_avg;
+
+ if (!cpumask_test_cpu(i, env->cpus))
+ continue;
+
+
+ if (find_big) {
+ nr_big = nr_big_tasks(rq);
+ if (nr_big > max_nr_big ||
+ (nr_big > 0 && nr_big == max_nr_big &&
+ cumulative_runnable_avg > max_runnable_avg_big)) {
+ max_runnable_avg_big = cumulative_runnable_avg;
+ busiest_big = rq;
+ max_nr_big = nr_big;
+ continue;
+ }
+ }
+
+ if (cumulative_runnable_avg > max_runnable_avg) {
+ max_runnable_avg = cumulative_runnable_avg;
+ busiest = rq;
+ }
+ }
+
+ if (busiest_big)
+ return busiest_big;
+
+ env->flags &= ~LBF_BIG_TASK_ACTIVE_BALANCE;
+ return busiest;
+}
+#else
+static inline struct rq *find_busiest_queue_hmp(struct lb_env *env,
+ struct sched_group *group)
+{
+ return NULL;
+}
+#endif
+
/*
* find_busiest_queue - find the busiest runqueue among the cpus in group.
*/
@@ -6804,6 +8114,9 @@ static struct rq *find_busiest_queue(struct lb_env *env,
unsigned long busiest_load = 0, busiest_capacity = 1;
int i;
+ if (sched_enable_hmp)
+ return find_busiest_queue_hmp(env, group);
+
for_each_cpu_and(i, sched_group_cpus(group), env->cpus) {
unsigned long capacity, wl;
enum fbq_type rt;
@@ -6871,15 +8184,20 @@ static struct rq *find_busiest_queue(struct lb_env *env,
* Max backoff if we encounter pinned tasks. Pretty arbitrary value, but
* so long as it is large enough.
*/
-#define MAX_PINNED_INTERVAL 512
+#define MAX_PINNED_INTERVAL 16
/* Working cpumask for load_balance and load_balance_newidle. */
DEFINE_PER_CPU(cpumask_var_t, load_balance_mask);
+#define NEED_ACTIVE_BALANCE_THRESHOLD 10
+
static int need_active_balance(struct lb_env *env)
{
struct sched_domain *sd = env->sd;
+ if (env->flags & LBF_HMP_ACTIVE_BALANCE)
+ return 1;
+
if (env->idle == CPU_NEWLY_IDLE) {
/*
@@ -6904,11 +8222,10 @@ static int need_active_balance(struct lb_env *env)
return 1;
}
- return unlikely(sd->nr_balance_failed > sd->cache_nice_tries+2);
+ return unlikely(sd->nr_balance_failed >
+ sd->cache_nice_tries + NEED_ACTIVE_BALANCE_THRESHOLD);
}
-static int active_load_balance_cpu_stop(void *data);
-
static int should_we_balance(struct lb_env *env)
{
struct sched_group *sg = env->sd->groups;
@@ -6951,10 +8268,10 @@ static int load_balance(int this_cpu, struct rq *this_rq,
struct sched_domain *sd, enum cpu_idle_type idle,
int *continue_balancing)
{
- int ld_moved, cur_ld_moved, active_balance = 0;
+ int ld_moved = 0, cur_ld_moved, active_balance = 0;
struct sched_domain *sd_parent = sd->parent;
- struct sched_group *group;
- struct rq *busiest;
+ struct sched_group *group = NULL;
+ struct rq *busiest = NULL;
unsigned long flags;
struct cpumask *cpus = this_cpu_cpumask_var_ptr(load_balance_mask);
@@ -6968,6 +8285,11 @@ static int load_balance(int this_cpu, struct rq *this_rq,
.cpus = cpus,
.fbq_type = all,
.tasks = LIST_HEAD_INIT(env.tasks),
+ .imbalance = 0,
+ .flags = 0,
+ .loop = 0,
+ .busiest_nr_running = 0,
+ .busiest_grp_capacity = 0,
};
/*
@@ -7020,6 +8342,13 @@ redo:
more_balance:
raw_spin_lock_irqsave(&busiest->lock, flags);
+ /* The world might have changed. Validate assumptions */
+ if (busiest->nr_running <= 1) {
+ raw_spin_unlock_irqrestore(&busiest->lock, flags);
+ env.flags &= ~LBF_ALL_PINNED;
+ goto no_move;
+ }
+
/*
* cur_ld_moved - load moved in current iteration
* ld_moved - cumulative load moved across iterations
@@ -7107,15 +8436,19 @@ more_balance:
}
}
+no_move:
if (!ld_moved) {
- schedstat_inc(sd, lb_failed[idle]);
+ if (!(env.flags & LBF_HMP_ACTIVE_BALANCE))
+ schedstat_inc(sd, lb_failed[idle]);
+
/*
* Increment the failure counter only on periodic balance.
* We do not want newidle balance, which can be very
* frequent, pollute the failure counter causing
* excessive cache_hot migrations and active balances.
*/
- if (idle != CPU_NEWLY_IDLE)
+ if (idle != CPU_NEWLY_IDLE &&
+ !(env.flags & LBF_HMP_ACTIVE_BALANCE))
sd->nr_balance_failed++;
if (need_active_balance(&env)) {
@@ -7149,17 +8482,31 @@ more_balance:
stop_one_cpu_nowait(cpu_of(busiest),
active_load_balance_cpu_stop, busiest,
&busiest->active_balance_work);
+ *continue_balancing = 0;
}
/*
* We've kicked active balancing, reset the failure
* counter.
*/
- sd->nr_balance_failed = sd->cache_nice_tries+1;
+ sd->nr_balance_failed =
+ sd->cache_nice_tries +
+ NEED_ACTIVE_BALANCE_THRESHOLD - 1;
}
- } else
+ } else {
sd->nr_balance_failed = 0;
+ /* Assumes one 'busiest' cpu that we pulled tasks from */
+ if (!same_freq_domain(this_cpu, cpu_of(busiest))) {
+ int check_groups = !!(env.flags &
+ LBF_MOVED_RELATED_THREAD_GROUP_TASK);
+
+ check_for_freq_change(this_rq, false, check_groups);
+ check_for_freq_change(busiest, false, check_groups);
+ } else {
+ check_for_freq_change(this_rq, true, false);
+ }
+ }
if (likely(!active_balance)) {
/* We were unbalanced, so reset the balancing interval */
sd->balance_interval = sd->min_interval;
@@ -7207,6 +8554,11 @@ out_one_pinned:
ld_moved = 0;
out:
+ trace_sched_load_balance(this_cpu, idle, *continue_balancing,
+ group ? group->cpumask[0] : 0,
+ busiest ? busiest->nr_running : 0,
+ env.imbalance, env.flags, ld_moved,
+ sd->balance_interval);
return ld_moved;
}
@@ -7302,9 +8654,12 @@ static int idle_balance(struct rq *this_rq)
/*
* Stop searching for tasks to pull if there are
- * now runnable tasks on this rq.
+ * now runnable tasks on the balance rq or if
+ * continue_balancing has been unset (only possible
+ * due to active migration).
*/
- if (pulled_task || this_rq->nr_running > 0)
+ if (pulled_task || this_rq->nr_running > 0 ||
+ !continue_balancing)
break;
}
rcu_read_unlock();
@@ -7351,8 +8706,23 @@ static int active_load_balance_cpu_stop(void *data)
int busiest_cpu = cpu_of(busiest_rq);
int target_cpu = busiest_rq->push_cpu;
struct rq *target_rq = cpu_rq(target_cpu);
- struct sched_domain *sd;
+ struct sched_domain *sd = NULL;
struct task_struct *p = NULL;
+ struct task_struct *push_task;
+ int push_task_detached = 0;
+ struct lb_env env = {
+ .sd = sd,
+ .dst_cpu = target_cpu,
+ .dst_rq = target_rq,
+ .src_cpu = busiest_rq->cpu,
+ .src_rq = busiest_rq,
+ .idle = CPU_IDLE,
+ .busiest_nr_running = 0,
+ .busiest_grp_capacity = 0,
+ .flags = 0,
+ .loop = 0,
+ };
+ bool moved = false;
raw_spin_lock_irq(&busiest_rq->lock);
@@ -7372,6 +8742,20 @@ static int active_load_balance_cpu_stop(void *data)
*/
BUG_ON(busiest_rq == target_rq);
+ push_task = busiest_rq->push_task;
+ target_cpu = busiest_rq->push_cpu;
+ if (push_task) {
+ if (task_on_rq_queued(push_task) &&
+ push_task->state == TASK_RUNNING &&
+ task_cpu(push_task) == busiest_cpu &&
+ cpu_online(target_cpu)) {
+ detach_task(push_task, &env);
+ push_task_detached = 1;
+ moved = true;
+ }
+ goto out_unlock;
+ }
+
/* Search for an sd spanning us and the target CPU. */
rcu_read_lock();
for_each_domain(target_cpu, sd) {
@@ -7381,33 +8765,49 @@ static int active_load_balance_cpu_stop(void *data)
}
if (likely(sd)) {
- struct lb_env env = {
- .sd = sd,
- .dst_cpu = target_cpu,
- .dst_rq = target_rq,
- .src_cpu = busiest_rq->cpu,
- .src_rq = busiest_rq,
- .idle = CPU_IDLE,
- };
-
+ env.sd = sd;
schedstat_inc(sd, alb_count);
p = detach_one_task(&env);
- if (p)
+ if (p) {
schedstat_inc(sd, alb_pushed);
- else
+ moved = true;
+ } else {
schedstat_inc(sd, alb_failed);
+ }
}
rcu_read_unlock();
out_unlock:
busiest_rq->active_balance = 0;
+ push_task = busiest_rq->push_task;
+ target_cpu = busiest_rq->push_cpu;
+
+ if (push_task)
+ busiest_rq->push_task = NULL;
+
raw_spin_unlock(&busiest_rq->lock);
+ if (push_task) {
+ if (push_task_detached)
+ attach_one_task(target_rq, push_task);
+ put_task_struct(push_task);
+ clear_reserved(target_cpu);
+ }
+
if (p)
attach_one_task(target_rq, p);
local_irq_enable();
+ if (moved && !same_freq_domain(busiest_cpu, target_cpu)) {
+ int check_groups = !!(env.flags &
+ LBF_MOVED_RELATED_THREAD_GROUP_TASK);
+ check_for_freq_change(busiest_rq, false, check_groups);
+ check_for_freq_change(target_rq, false, check_groups);
+ } else if (moved) {
+ check_for_freq_change(target_rq, true, false);
+ }
+
return 0;
}
@@ -7429,9 +8829,50 @@ static struct {
unsigned long next_balance; /* in jiffy units */
} nohz ____cacheline_aligned;
-static inline int find_new_ilb(void)
+#ifdef CONFIG_SCHED_HMP
+static inline int find_new_hmp_ilb(int type)
+{
+ int call_cpu = raw_smp_processor_id();
+ struct sched_domain *sd;
+ int ilb;
+
+ rcu_read_lock();
+
+ /* Pick an idle cpu "closest" to call_cpu */
+ for_each_domain(call_cpu, sd) {
+ for_each_cpu_and(ilb, nohz.idle_cpus_mask,
+ sched_domain_span(sd)) {
+ if (idle_cpu(ilb) && (type != NOHZ_KICK_RESTRICT ||
+ (hmp_capable() &&
+ cpu_max_possible_capacity(ilb) <=
+ cpu_max_possible_capacity(call_cpu)) ||
+ cpu_max_power_cost(ilb) <=
+ cpu_max_power_cost(call_cpu))) {
+ rcu_read_unlock();
+ reset_balance_interval(ilb);
+ return ilb;
+ }
+ }
+ }
+
+ rcu_read_unlock();
+ return nr_cpu_ids;
+}
+#else /* CONFIG_SCHED_HMP */
+static inline int find_new_hmp_ilb(int type)
+{
+ return 0;
+}
+#endif /* CONFIG_SCHED_HMP */
+
+static inline int find_new_ilb(int type)
{
- int ilb = cpumask_first(nohz.idle_cpus_mask);
+ int ilb;
+
+ if (sched_enable_hmp)
+ return find_new_hmp_ilb(type);
+
+ ilb = cpumask_first(nohz.idle_cpus_mask);
if (ilb < nr_cpu_ids && idle_cpu(ilb))
return ilb;
@@ -7444,13 +8885,13 @@ static inline int find_new_ilb(void)
* nohz_load_balancer CPU (if there is one) otherwise fallback to any idle
* CPU (if there is one).
*/
-static void nohz_balancer_kick(void)
+static void nohz_balancer_kick(int type)
{
int ilb_cpu;
nohz.next_balance++;
- ilb_cpu = find_new_ilb();
+ ilb_cpu = find_new_ilb(type);
if (ilb_cpu >= nr_cpu_ids)
return;
@@ -7735,6 +9176,70 @@ end:
clear_bit(NOHZ_BALANCE_KICK, nohz_flags(this_cpu));
}
+#ifdef CONFIG_SCHED_HMP
+static inline int _nohz_kick_needed_hmp(struct rq *rq, int cpu, int *type)
+{
+ struct sched_domain *sd;
+ int i;
+
+ if (rq->nr_running < 2)
+ return 0;
+
+ if (!sysctl_sched_restrict_cluster_spill || sched_boost())
+ return 1;
+
+ if (hmp_capable() && cpu_max_possible_capacity(cpu) ==
+ max_possible_capacity)
+ return 1;
+
+ rcu_read_lock();
+ sd = rcu_dereference_check_sched_domain(rq->sd);
+ if (!sd) {
+ rcu_read_unlock();
+ return 0;
+ }
+
+ for_each_cpu(i, sched_domain_span(sd)) {
+ if (cpu_load(i) < sched_spill_load &&
+ cpu_rq(i)->nr_running <
+ sysctl_sched_spill_nr_run) {
+ /* Change the kick type to limit to CPUs that
+ * are of equal or lower capacity.
+ */
+ *type = NOHZ_KICK_RESTRICT;
+ break;
+ }
+ }
+ rcu_read_unlock();
+ return 1;
+}
+#else
+static inline int _nohz_kick_needed_hmp(struct rq *rq, int cpu, int *type)
+{
+ return 0;
+}
+#endif
+
+static inline int _nohz_kick_needed(struct rq *rq, int cpu, int *type)
+{
+ unsigned long now = jiffies;
+
+ /*
+ * None are in tickless mode and hence no need for NOHZ idle load
+ * balancing.
+ */
+ if (likely(!atomic_read(&nohz.nr_cpus)))
+ return 0;
+
+ if (sched_enable_hmp)
+ return _nohz_kick_needed_hmp(rq, cpu, type);
+
+ if (time_before(now, nohz.next_balance))
+ return 0;
+
+ return (rq->nr_running >= 2);
+}
+
/*
* Current heuristic for kicking the idle load balancer in the presence
* of an idle cpu in the system.
@@ -7746,12 +9251,14 @@ end:
* - For SD_ASYM_PACKING, if the lower numbered cpu's in the scheduler
* domain span are idle.
*/
-static inline bool nohz_kick_needed(struct rq *rq)
+static inline bool nohz_kick_needed(struct rq *rq, int *type)
{
- unsigned long now = jiffies;
+#ifndef CONFIG_SCHED_HMP
struct sched_domain *sd;
struct sched_group_capacity *sgc;
- int nr_busy, cpu = rq->cpu;
+ int nr_busy;
+#endif
+ int cpu = rq->cpu;
bool kick = false;
if (unlikely(rq->idle_balance))
@@ -7764,19 +9271,10 @@ static inline bool nohz_kick_needed(struct rq *rq)
set_cpu_sd_state_busy();
nohz_balance_exit_idle(cpu);
- /*
- * None are in tickless mode and hence no need for NOHZ idle load
- * balancing.
- */
- if (likely(!atomic_read(&nohz.nr_cpus)))
- return false;
-
- if (time_before(now, nohz.next_balance))
- return false;
-
- if (rq->nr_running >= 2)
+ if (_nohz_kick_needed(rq, cpu, type))
return true;
+#ifndef CONFIG_SCHED_HMP
rcu_read_lock();
sd = rcu_dereference(per_cpu(sd_busy, cpu));
if (sd) {
@@ -7808,6 +9306,7 @@ static inline bool nohz_kick_needed(struct rq *rq)
unlock:
rcu_read_unlock();
+#endif
return kick;
}
#else
@@ -7841,6 +9340,8 @@ static void run_rebalance_domains(struct softirq_action *h)
*/
void trigger_load_balance(struct rq *rq)
{
+ int type = NOHZ_KICK_ANY;
+
/* Don't need to rebalance while attached to NULL domain */
if (unlikely(on_null_domain(rq)))
return;
@@ -7848,8 +9349,8 @@ void trigger_load_balance(struct rq *rq)
if (time_after_eq(jiffies, rq->next_balance))
raise_softirq(SCHED_SOFTIRQ);
#ifdef CONFIG_NO_HZ_COMMON
- if (nohz_kick_needed(rq))
- nohz_balancer_kick();
+ if (nohz_kick_needed(rq, &type))
+ nohz_balancer_kick(type);
#endif
}
@@ -8309,6 +9810,11 @@ const struct sched_class fair_sched_class = {
#ifdef CONFIG_FAIR_GROUP_SCHED
.task_move_group = task_move_group_fair,
#endif
+#ifdef CONFIG_SCHED_HMP
+ .inc_hmp_sched_stats = inc_hmp_sched_stats_fair,
+ .dec_hmp_sched_stats = dec_hmp_sched_stats_fair,
+ .fixup_hmp_sched_stats = fixup_hmp_sched_stats_fair,
+#endif
};
#ifdef CONFIG_SCHED_DEBUG
diff --git a/kernel/sched/features.h b/kernel/sched/features.h
index 69631fa46c2f..acee1854c3d0 100644
--- a/kernel/sched/features.h
+++ b/kernel/sched/features.h
@@ -49,7 +49,7 @@ SCHED_FEAT(NONTASK_CAPACITY, true)
* Queue remote wakeups on the target CPU and process them
* using the scheduler IPI. Reduces rq->lock contention/bounces.
*/
-SCHED_FEAT(TTWU_QUEUE, true)
+SCHED_FEAT(TTWU_QUEUE, false)
#ifdef HAVE_RT_PUSH_IPI
/*
@@ -68,4 +68,3 @@ SCHED_FEAT(FORCE_SD_OVERLAP, false)
SCHED_FEAT(RT_RUNTIME_SHARE, true)
SCHED_FEAT(LB_MIN, false)
SCHED_FEAT(ATTACH_AGE_LOAD, true)
-
diff --git a/kernel/sched/hmp.c b/kernel/sched/hmp.c
new file mode 100644
index 000000000000..5002619961ce
--- /dev/null
+++ b/kernel/sched/hmp.c
@@ -0,0 +1,4019 @@
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Implementation credits: Srivatsa Vaddagiri, Steve Muckle
+ * Syed Rameez Mustafa, Olav haugan, Joonwoo Park, Pavan Kumar Kondeti
+ * and Vikram Mulukutla
+ */
+
+#include <linux/cpufreq.h>
+#include <linux/list_sort.h>
+#include <linux/syscore_ops.h>
+
+#include "sched.h"
+
+#include <trace/events/sched.h>
+
+const char *task_event_names[] = {"PUT_PREV_TASK", "PICK_NEXT_TASK",
+ "TASK_WAKE", "TASK_MIGRATE", "TASK_UPDATE",
+ "IRQ_UPDATE"};
+
+const char *migrate_type_names[] = {"GROUP_TO_RQ", "RQ_TO_GROUP",
+ "RQ_TO_RQ", "GROUP_TO_GROUP"};
+
+static ktime_t ktime_last;
+static bool sched_ktime_suspended;
+
+static bool use_cycle_counter;
+static struct cpu_cycle_counter_cb cpu_cycle_counter_cb;
+
+u64 sched_ktime_clock(void)
+{
+ if (unlikely(sched_ktime_suspended))
+ return ktime_to_ns(ktime_last);
+ return ktime_get_ns();
+}
+
+static void sched_resume(void)
+{
+ sched_ktime_suspended = false;
+}
+
+static int sched_suspend(void)
+{
+ ktime_last = ktime_get();
+ sched_ktime_suspended = true;
+ return 0;
+}
+
+static struct syscore_ops sched_syscore_ops = {
+ .resume = sched_resume,
+ .suspend = sched_suspend
+};
+
+static int __init sched_init_ops(void)
+{
+ register_syscore_ops(&sched_syscore_ops);
+ return 0;
+}
+late_initcall(sched_init_ops);
+
+inline void clear_ed_task(struct task_struct *p, struct rq *rq)
+{
+ if (p == rq->ed_task)
+ rq->ed_task = NULL;
+}
+
+inline void set_task_last_wake(struct task_struct *p, u64 wallclock)
+{
+ p->last_wake_ts = wallclock;
+}
+
+inline void set_task_last_switch_out(struct task_struct *p, u64 wallclock)
+{
+ p->last_switch_out_ts = wallclock;
+}
+
+/*
+ * Note C-state for (idle) cpus.
+ *
+ * @cstate = cstate index, 0 -> active state
+ * @wakeup_energy = energy spent in waking up cpu
+ * @wakeup_latency = latency to wakeup from cstate
+ *
+ */
+void
+sched_set_cpu_cstate(int cpu, int cstate, int wakeup_energy, int wakeup_latency)
+{
+ struct rq *rq = cpu_rq(cpu);
+
+ rq->cstate = cstate; /* C1, C2 etc */
+ rq->wakeup_energy = wakeup_energy;
+ rq->wakeup_latency = wakeup_latency;
+}
+
+/*
+ * Note D-state for (idle) cluster.
+ *
+ * @dstate = dstate index, 0 -> active state
+ * @wakeup_energy = energy spent in waking up cluster
+ * @wakeup_latency = latency to wakeup from cluster
+ *
+ */
+void sched_set_cluster_dstate(const cpumask_t *cluster_cpus, int dstate,
+ int wakeup_energy, int wakeup_latency)
+{
+ struct sched_cluster *cluster =
+ cpu_rq(cpumask_first(cluster_cpus))->cluster;
+ cluster->dstate = dstate;
+ cluster->dstate_wakeup_energy = wakeup_energy;
+ cluster->dstate_wakeup_latency = wakeup_latency;
+}
+
+u32 __weak get_freq_max_load(int cpu, u32 freq)
+{
+ /* 100% by default */
+ return 100;
+}
+
+struct freq_max_load_entry {
+ /* The maximum load which has accounted governor's headroom. */
+ u64 hdemand;
+};
+
+struct freq_max_load {
+ struct rcu_head rcu;
+ int length;
+ struct freq_max_load_entry freqs[0];
+};
+
+static DEFINE_PER_CPU(struct freq_max_load *, freq_max_load);
+static DEFINE_SPINLOCK(freq_max_load_lock);
+
+struct cpu_pwr_stats __weak *get_cpu_pwr_stats(void)
+{
+ return NULL;
+}
+
+int sched_update_freq_max_load(const cpumask_t *cpumask)
+{
+ int i, cpu, ret;
+ unsigned int freq;
+ struct cpu_pstate_pwr *costs;
+ struct cpu_pwr_stats *per_cpu_info = get_cpu_pwr_stats();
+ struct freq_max_load *max_load, *old_max_load;
+ struct freq_max_load_entry *entry;
+ u64 max_demand_capacity, max_demand;
+ unsigned long flags;
+ u32 hfreq;
+ int hpct;
+
+ if (!per_cpu_info)
+ return 0;
+
+ spin_lock_irqsave(&freq_max_load_lock, flags);
+ max_demand_capacity = div64_u64(max_task_load(), max_possible_capacity);
+ for_each_cpu(cpu, cpumask) {
+ if (!per_cpu_info[cpu].ptable) {
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ old_max_load = rcu_dereference(per_cpu(freq_max_load, cpu));
+
+ /*
+ * allocate len + 1 and leave the last power cost as 0 for
+ * power_cost() can stop iterating index when
+ * per_cpu_info[cpu].len > len of max_load due to race between
+ * cpu power stats update and get_cpu_pwr_stats().
+ */
+ max_load = kzalloc(sizeof(struct freq_max_load) +
+ sizeof(struct freq_max_load_entry) *
+ (per_cpu_info[cpu].len + 1), GFP_ATOMIC);
+ if (unlikely(!max_load)) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ max_load->length = per_cpu_info[cpu].len;
+
+ max_demand = max_demand_capacity *
+ cpu_max_possible_capacity(cpu);
+
+ i = 0;
+ costs = per_cpu_info[cpu].ptable;
+ while (costs[i].freq) {
+ entry = &max_load->freqs[i];
+ freq = costs[i].freq;
+ hpct = get_freq_max_load(cpu, freq);
+ if (hpct <= 0 && hpct > 100)
+ hpct = 100;
+ hfreq = div64_u64((u64)freq * hpct, 100);
+ entry->hdemand =
+ div64_u64(max_demand * hfreq,
+ cpu_max_possible_freq(cpu));
+ i++;
+ }
+
+ rcu_assign_pointer(per_cpu(freq_max_load, cpu), max_load);
+ if (old_max_load)
+ kfree_rcu(old_max_load, rcu);
+ }
+
+ spin_unlock_irqrestore(&freq_max_load_lock, flags);
+ return 0;
+
+fail:
+ for_each_cpu(cpu, cpumask) {
+ max_load = rcu_dereference(per_cpu(freq_max_load, cpu));
+ if (max_load) {
+ rcu_assign_pointer(per_cpu(freq_max_load, cpu), NULL);
+ kfree_rcu(max_load, rcu);
+ }
+ }
+
+ spin_unlock_irqrestore(&freq_max_load_lock, flags);
+ return ret;
+}
+
+unsigned int max_possible_efficiency = 1;
+unsigned int min_possible_efficiency = UINT_MAX;
+
+unsigned long __weak arch_get_cpu_efficiency(int cpu)
+{
+ return SCHED_LOAD_SCALE;
+}
+
+/* Keep track of max/min capacity possible across CPUs "currently" */
+static void __update_min_max_capacity(void)
+{
+ int i;
+ int max_cap = 0, min_cap = INT_MAX;
+
+ for_each_online_cpu(i) {
+ max_cap = max(max_cap, cpu_capacity(i));
+ min_cap = min(min_cap, cpu_capacity(i));
+ }
+
+ max_capacity = max_cap;
+ min_capacity = min_cap;
+}
+
+static void update_min_max_capacity(void)
+{
+ unsigned long flags;
+ int i;
+
+ local_irq_save(flags);
+ for_each_possible_cpu(i)
+ raw_spin_lock(&cpu_rq(i)->lock);
+
+ __update_min_max_capacity();
+
+ for_each_possible_cpu(i)
+ raw_spin_unlock(&cpu_rq(i)->lock);
+ local_irq_restore(flags);
+}
+
+/*
+ * Return 'capacity' of a cpu in reference to "least" efficient cpu, such that
+ * least efficient cpu gets capacity of 1024
+ */
+static unsigned long
+capacity_scale_cpu_efficiency(struct sched_cluster *cluster)
+{
+ return (1024 * cluster->efficiency) / min_possible_efficiency;
+}
+
+/*
+ * Return 'capacity' of a cpu in reference to cpu with lowest max_freq
+ * (min_max_freq), such that one with lowest max_freq gets capacity of 1024.
+ */
+static unsigned long capacity_scale_cpu_freq(struct sched_cluster *cluster)
+{
+ return (1024 * cluster_max_freq(cluster)) / min_max_freq;
+}
+
+/*
+ * Return load_scale_factor of a cpu in reference to "most" efficient cpu, so
+ * that "most" efficient cpu gets a load_scale_factor of 1
+ */
+static inline unsigned long
+load_scale_cpu_efficiency(struct sched_cluster *cluster)
+{
+ return DIV_ROUND_UP(1024 * max_possible_efficiency,
+ cluster->efficiency);
+}
+
+/*
+ * Return load_scale_factor of a cpu in reference to cpu with best max_freq
+ * (max_possible_freq), so that one with best max_freq gets a load_scale_factor
+ * of 1.
+ */
+static inline unsigned long load_scale_cpu_freq(struct sched_cluster *cluster)
+{
+ return DIV_ROUND_UP(1024 * max_possible_freq,
+ cluster_max_freq(cluster));
+}
+
+static int compute_capacity(struct sched_cluster *cluster)
+{
+ int capacity = 1024;
+
+ capacity *= capacity_scale_cpu_efficiency(cluster);
+ capacity >>= 10;
+
+ capacity *= capacity_scale_cpu_freq(cluster);
+ capacity >>= 10;
+
+ return capacity;
+}
+
+static int compute_max_possible_capacity(struct sched_cluster *cluster)
+{
+ int capacity = 1024;
+
+ capacity *= capacity_scale_cpu_efficiency(cluster);
+ capacity >>= 10;
+
+ capacity *= (1024 * cluster->max_possible_freq) / min_max_freq;
+ capacity >>= 10;
+
+ return capacity;
+}
+
+static int compute_load_scale_factor(struct sched_cluster *cluster)
+{
+ int load_scale = 1024;
+
+ /*
+ * load_scale_factor accounts for the fact that task load
+ * is in reference to "best" performing cpu. Task's load will need to be
+ * scaled (up) by a factor to determine suitability to be placed on a
+ * (little) cpu.
+ */
+ load_scale *= load_scale_cpu_efficiency(cluster);
+ load_scale >>= 10;
+
+ load_scale *= load_scale_cpu_freq(cluster);
+ load_scale >>= 10;
+
+ return load_scale;
+}
+
+struct list_head cluster_head;
+static DEFINE_MUTEX(cluster_lock);
+static cpumask_t all_cluster_cpus = CPU_MASK_NONE;
+DECLARE_BITMAP(all_cluster_ids, NR_CPUS);
+struct sched_cluster *sched_cluster[NR_CPUS];
+int num_clusters;
+
+struct sched_cluster init_cluster = {
+ .list = LIST_HEAD_INIT(init_cluster.list),
+ .id = 0,
+ .max_power_cost = 1,
+ .min_power_cost = 1,
+ .capacity = 1024,
+ .max_possible_capacity = 1024,
+ .efficiency = 1,
+ .load_scale_factor = 1024,
+ .cur_freq = 1,
+ .max_freq = 1,
+ .max_mitigated_freq = UINT_MAX,
+ .min_freq = 1,
+ .max_possible_freq = 1,
+ .dstate = 0,
+ .dstate_wakeup_energy = 0,
+ .dstate_wakeup_latency = 0,
+ .exec_scale_factor = 1024,
+ .notifier_sent = 0,
+};
+
+static void update_all_clusters_stats(void)
+{
+ struct sched_cluster *cluster;
+ u64 highest_mpc = 0, lowest_mpc = U64_MAX;
+
+ pre_big_task_count_change(cpu_possible_mask);
+
+ for_each_sched_cluster(cluster) {
+ u64 mpc;
+
+ cluster->capacity = compute_capacity(cluster);
+ mpc = cluster->max_possible_capacity =
+ compute_max_possible_capacity(cluster);
+ cluster->load_scale_factor = compute_load_scale_factor(cluster);
+
+ cluster->exec_scale_factor =
+ DIV_ROUND_UP(cluster->efficiency * 1024,
+ max_possible_efficiency);
+
+ if (mpc > highest_mpc)
+ highest_mpc = mpc;
+
+ if (mpc < lowest_mpc)
+ lowest_mpc = mpc;
+ }
+
+ max_possible_capacity = highest_mpc;
+ min_max_possible_capacity = lowest_mpc;
+
+ __update_min_max_capacity();
+ sched_update_freq_max_load(cpu_possible_mask);
+ post_big_task_count_change(cpu_possible_mask);
+}
+
+static void assign_cluster_ids(struct list_head *head)
+{
+ struct sched_cluster *cluster;
+ int pos = 0;
+
+ list_for_each_entry(cluster, head, list) {
+ cluster->id = pos;
+ sched_cluster[pos++] = cluster;
+ }
+}
+
+static void
+move_list(struct list_head *dst, struct list_head *src, bool sync_rcu)
+{
+ struct list_head *first, *last;
+
+ first = src->next;
+ last = src->prev;
+
+ if (sync_rcu) {
+ INIT_LIST_HEAD_RCU(src);
+ synchronize_rcu();
+ }
+
+ first->prev = dst;
+ dst->prev = last;
+ last->next = dst;
+
+ /* Ensure list sanity before making the head visible to all CPUs. */
+ smp_mb();
+ dst->next = first;
+}
+
+static int
+compare_clusters(void *priv, struct list_head *a, struct list_head *b)
+{
+ struct sched_cluster *cluster1, *cluster2;
+ int ret;
+
+ cluster1 = container_of(a, struct sched_cluster, list);
+ cluster2 = container_of(b, struct sched_cluster, list);
+
+ ret = cluster1->max_power_cost > cluster2->max_power_cost ||
+ (cluster1->max_power_cost == cluster2->max_power_cost &&
+ cluster1->max_possible_capacity <
+ cluster2->max_possible_capacity);
+
+ return ret;
+}
+
+static void sort_clusters(void)
+{
+ struct sched_cluster *cluster;
+ struct list_head new_head;
+
+ INIT_LIST_HEAD(&new_head);
+
+ for_each_sched_cluster(cluster) {
+ cluster->max_power_cost = power_cost(cluster_first_cpu(cluster),
+ max_task_load());
+ cluster->min_power_cost = power_cost(cluster_first_cpu(cluster),
+ 0);
+ }
+
+ move_list(&new_head, &cluster_head, true);
+
+ list_sort(NULL, &new_head, compare_clusters);
+ assign_cluster_ids(&new_head);
+
+ /*
+ * Ensure cluster ids are visible to all CPUs before making
+ * cluster_head visible.
+ */
+ move_list(&cluster_head, &new_head, false);
+}
+
+static void
+insert_cluster(struct sched_cluster *cluster, struct list_head *head)
+{
+ struct sched_cluster *tmp;
+ struct list_head *iter = head;
+
+ list_for_each_entry(tmp, head, list) {
+ if (cluster->max_power_cost < tmp->max_power_cost)
+ break;
+ iter = &tmp->list;
+ }
+
+ list_add(&cluster->list, iter);
+}
+
+static struct sched_cluster *alloc_new_cluster(const struct cpumask *cpus)
+{
+ struct sched_cluster *cluster = NULL;
+
+ cluster = kzalloc(sizeof(struct sched_cluster), GFP_ATOMIC);
+ if (!cluster) {
+ __WARN_printf("Cluster allocation failed. \
+ Possible bad scheduling\n");
+ return NULL;
+ }
+
+ INIT_LIST_HEAD(&cluster->list);
+ cluster->max_power_cost = 1;
+ cluster->min_power_cost = 1;
+ cluster->capacity = 1024;
+ cluster->max_possible_capacity = 1024;
+ cluster->efficiency = 1;
+ cluster->load_scale_factor = 1024;
+ cluster->cur_freq = 1;
+ cluster->max_freq = 1;
+ cluster->max_mitigated_freq = UINT_MAX;
+ cluster->min_freq = 1;
+ cluster->max_possible_freq = 1;
+ cluster->dstate = 0;
+ cluster->dstate_wakeup_energy = 0;
+ cluster->dstate_wakeup_latency = 0;
+ cluster->freq_init_done = false;
+
+ cluster->cpus = *cpus;
+ cluster->efficiency = arch_get_cpu_efficiency(cpumask_first(cpus));
+
+ if (cluster->efficiency > max_possible_efficiency)
+ max_possible_efficiency = cluster->efficiency;
+ if (cluster->efficiency < min_possible_efficiency)
+ min_possible_efficiency = cluster->efficiency;
+
+ cluster->notifier_sent = 0;
+ return cluster;
+}
+
+static void add_cluster(const struct cpumask *cpus, struct list_head *head)
+{
+ struct sched_cluster *cluster = alloc_new_cluster(cpus);
+ int i;
+
+ if (!cluster)
+ return;
+
+ for_each_cpu(i, cpus)
+ cpu_rq(i)->cluster = cluster;
+
+ insert_cluster(cluster, head);
+ set_bit(num_clusters, all_cluster_ids);
+ num_clusters++;
+}
+
+void update_cluster_topology(void)
+{
+ struct cpumask cpus = *cpu_possible_mask;
+ const struct cpumask *cluster_cpus;
+ struct list_head new_head;
+ int i;
+
+ INIT_LIST_HEAD(&new_head);
+
+ for_each_cpu(i, &cpus) {
+ cluster_cpus = cpu_coregroup_mask(i);
+ cpumask_or(&all_cluster_cpus, &all_cluster_cpus, cluster_cpus);
+ cpumask_andnot(&cpus, &cpus, cluster_cpus);
+ add_cluster(cluster_cpus, &new_head);
+ }
+
+ assign_cluster_ids(&new_head);
+
+ /*
+ * Ensure cluster ids are visible to all CPUs before making
+ * cluster_head visible.
+ */
+ move_list(&cluster_head, &new_head, false);
+}
+
+void init_clusters(void)
+{
+ bitmap_clear(all_cluster_ids, 0, NR_CPUS);
+ init_cluster.cpus = *cpu_possible_mask;
+ INIT_LIST_HEAD(&cluster_head);
+}
+
+int register_cpu_cycle_counter_cb(struct cpu_cycle_counter_cb *cb)
+{
+ mutex_lock(&cluster_lock);
+ if (!cb->get_cpu_cycle_counter) {
+ mutex_unlock(&cluster_lock);
+ return -EINVAL;
+ }
+
+ cpu_cycle_counter_cb = *cb;
+ use_cycle_counter = true;
+ mutex_unlock(&cluster_lock);
+
+ return 0;
+}
+
+int __init set_sched_enable_hmp(char *str)
+{
+ int enable_hmp = 0;
+
+ get_option(&str, &enable_hmp);
+
+ sched_enable_hmp = !!enable_hmp;
+
+ return 0;
+}
+
+early_param("sched_enable_hmp", set_sched_enable_hmp);
+
+int got_boost_kick(void)
+{
+ int cpu = smp_processor_id();
+ struct rq *rq = cpu_rq(cpu);
+
+ return test_bit(BOOST_KICK, &rq->hmp_flags);
+}
+
+inline void clear_boost_kick(int cpu)
+{
+ struct rq *rq = cpu_rq(cpu);
+
+ clear_bit(BOOST_KICK, &rq->hmp_flags);
+}
+
+inline void boost_kick(int cpu)
+{
+ struct rq *rq = cpu_rq(cpu);
+
+ if (!test_and_set_bit(BOOST_KICK, &rq->hmp_flags))
+ smp_send_reschedule(cpu);
+}
+
+/* Clear any HMP scheduler related requests pending from or on cpu */
+void clear_hmp_request(int cpu)
+{
+ struct rq *rq = cpu_rq(cpu);
+ unsigned long flags;
+
+ clear_boost_kick(cpu);
+ clear_reserved(cpu);
+ if (rq->push_task) {
+ raw_spin_lock_irqsave(&rq->lock, flags);
+ if (rq->push_task) {
+ clear_reserved(rq->push_cpu);
+ put_task_struct(rq->push_task);
+ rq->push_task = NULL;
+ }
+ rq->active_balance = 0;
+ raw_spin_unlock_irqrestore(&rq->lock, flags);
+ }
+}
+
+int sched_set_static_cpu_pwr_cost(int cpu, unsigned int cost)
+{
+ struct rq *rq = cpu_rq(cpu);
+
+ rq->static_cpu_pwr_cost = cost;
+ return 0;
+}
+
+unsigned int sched_get_static_cpu_pwr_cost(int cpu)
+{
+ return cpu_rq(cpu)->static_cpu_pwr_cost;
+}
+
+int sched_set_static_cluster_pwr_cost(int cpu, unsigned int cost)
+{
+ struct sched_cluster *cluster = cpu_rq(cpu)->cluster;
+
+ cluster->static_cluster_pwr_cost = cost;
+ return 0;
+}
+
+unsigned int sched_get_static_cluster_pwr_cost(int cpu)
+{
+ return cpu_rq(cpu)->cluster->static_cluster_pwr_cost;
+}
+
+/*
+ * sched_window_stats_policy and sched_ravg_hist_size have a 'sysctl' copy
+ * associated with them. This is required for atomic update of those variables
+ * when being modifed via sysctl interface.
+ *
+ * IMPORTANT: Initialize both copies to same value!!
+ */
+
+/*
+ * Tasks that are runnable continuously for a period greather than
+ * EARLY_DETECTION_DURATION can be flagged early as potential
+ * high load tasks.
+ */
+#define EARLY_DETECTION_DURATION 9500000
+
+static __read_mostly unsigned int sched_ravg_hist_size = 5;
+__read_mostly unsigned int sysctl_sched_ravg_hist_size = 5;
+
+static __read_mostly unsigned int sched_window_stats_policy =
+ WINDOW_STATS_MAX_RECENT_AVG;
+__read_mostly unsigned int sysctl_sched_window_stats_policy =
+ WINDOW_STATS_MAX_RECENT_AVG;
+
+#define SCHED_ACCOUNT_WAIT_TIME 1
+
+__read_mostly unsigned int sysctl_sched_cpu_high_irqload = (10 * NSEC_PER_MSEC);
+
+unsigned int __read_mostly sysctl_sched_enable_colocation = 1;
+
+/*
+ * Enable colocation and frequency aggregation for all threads in a process.
+ * The children inherits the group id from the parent.
+ */
+unsigned int __read_mostly sysctl_sched_enable_thread_grouping;
+
+
+__read_mostly unsigned int sysctl_sched_new_task_windows = 5;
+
+#define SCHED_FREQ_ACCOUNT_WAIT_TIME 0
+
+/*
+ * For increase, send notification if
+ * freq_required - cur_freq > sysctl_sched_freq_inc_notify
+ */
+__read_mostly int sysctl_sched_freq_inc_notify = 10 * 1024 * 1024; /* + 10GHz */
+
+/*
+ * For decrease, send notification if
+ * cur_freq - freq_required > sysctl_sched_freq_dec_notify
+ */
+__read_mostly int sysctl_sched_freq_dec_notify = 10 * 1024 * 1024; /* - 10GHz */
+
+static __read_mostly unsigned int sched_io_is_busy;
+
+__read_mostly unsigned int sysctl_sched_pred_alert_freq = 10 * 1024 * 1024;
+
+/*
+ * Maximum possible frequency across all cpus. Task demand and cpu
+ * capacity (cpu_power) metrics are scaled in reference to it.
+ */
+unsigned int max_possible_freq = 1;
+
+/*
+ * Minimum possible max_freq across all cpus. This will be same as
+ * max_possible_freq on homogeneous systems and could be different from
+ * max_possible_freq on heterogenous systems. min_max_freq is used to derive
+ * capacity (cpu_power) of cpus.
+ */
+unsigned int min_max_freq = 1;
+
+unsigned int max_capacity = 1024; /* max(rq->capacity) */
+unsigned int min_capacity = 1024; /* min(rq->capacity) */
+unsigned int max_possible_capacity = 1024; /* max(rq->max_possible_capacity) */
+unsigned int
+min_max_possible_capacity = 1024; /* min(rq->max_possible_capacity) */
+
+/* Window size (in ns) */
+__read_mostly unsigned int sched_ravg_window = 10000000;
+
+/* Min window size (in ns) = 10ms */
+#define MIN_SCHED_RAVG_WINDOW 10000000
+
+/* Max window size (in ns) = 1s */
+#define MAX_SCHED_RAVG_WINDOW 1000000000
+
+/* Temporarily disable window-stats activity on all cpus */
+unsigned int __read_mostly sched_disable_window_stats;
+
+/*
+ * Major task runtime. If a task runs for more than sched_major_task_runtime
+ * in a window, it's considered to be generating majority of workload
+ * for this window. Prediction could be adjusted for such tasks.
+ */
+__read_mostly unsigned int sched_major_task_runtime = 10000000;
+
+static unsigned int sync_cpu;
+
+static LIST_HEAD(related_thread_groups);
+static DEFINE_RWLOCK(related_thread_group_lock);
+
+#define for_each_related_thread_group(grp) \
+ list_for_each_entry(grp, &related_thread_groups, list)
+
+/*
+ * Demand aggregation for frequency purpose:
+ *
+ * 'sched_freq_aggregate' controls aggregation of cpu demand of related threads
+ * for frequency determination purpose. This aggregation is done per-cluster.
+ *
+ * CPU demand of tasks from various related groups is aggregated per-cluster and
+ * added to the "max_busy_cpu" in that cluster, where max_busy_cpu is determined
+ * by just rq->prev_runnable_sum.
+ *
+ * Some examples follow, which assume:
+ * Cluster0 = CPU0-3, Cluster1 = CPU4-7
+ * One related thread group A that has tasks A0, A1, A2
+ *
+ * A->cpu_time[X].curr/prev_sum = counters in which cpu execution stats of
+ * tasks belonging to group A are accumulated when they run on cpu X.
+ *
+ * CX->curr/prev_sum = counters in which cpu execution stats of all tasks
+ * not belonging to group A are accumulated when they run on cpu X
+ *
+ * Lets say the stats for window M was as below:
+ *
+ * C0->prev_sum = 1ms, A->cpu_time[0].prev_sum = 5ms
+ * Task A0 ran 5ms on CPU0
+ * Task B0 ran 1ms on CPU0
+ *
+ * C1->prev_sum = 5ms, A->cpu_time[1].prev_sum = 6ms
+ * Task A1 ran 4ms on CPU1
+ * Task A2 ran 2ms on CPU1
+ * Task B1 ran 5ms on CPU1
+ *
+ * C2->prev_sum = 0ms, A->cpu_time[2].prev_sum = 0
+ * CPU2 idle
+ *
+ * C3->prev_sum = 0ms, A->cpu_time[3].prev_sum = 0
+ * CPU3 idle
+ *
+ * In this case, CPU1 was most busy going by just its prev_sum counter. Demand
+ * from all group A tasks are added to CPU1. IOW, at end of window M, cpu busy
+ * time reported to governor will be:
+ *
+ *
+ * C0 busy time = 1ms
+ * C1 busy time = 5 + 5 + 6 = 16ms
+ *
+ */
+static __read_mostly unsigned int sched_freq_aggregate;
+__read_mostly unsigned int sysctl_sched_freq_aggregate;
+
+unsigned int __read_mostly sysctl_sched_freq_aggregate_threshold_pct;
+static unsigned int __read_mostly sched_freq_aggregate_threshold;
+
+/* Initial task load. Newly created tasks are assigned this load. */
+unsigned int __read_mostly sched_init_task_load_windows;
+unsigned int __read_mostly sysctl_sched_init_task_load_pct = 15;
+
+unsigned int max_task_load(void)
+{
+ return sched_ravg_window;
+}
+
+/* Use this knob to turn on or off HMP-aware task placement logic */
+unsigned int __read_mostly sched_enable_hmp;
+
+/*
+ * Scheduler boost is a mechanism to temporarily place tasks on CPUs
+ * with higher capacity than those where a task would have normally
+ * ended up with their load characteristics. Any entity enabling
+ * boost is responsible for disabling it as well.
+ */
+unsigned int sysctl_sched_boost;
+
+/* A cpu can no longer accommodate more tasks if:
+ *
+ * rq->nr_running > sysctl_sched_spill_nr_run ||
+ * rq->hmp_stats.cumulative_runnable_avg > sched_spill_load
+ */
+unsigned int __read_mostly sysctl_sched_spill_nr_run = 10;
+
+/*
+ * Place sync wakee tasks those have less than configured demand to the waker's
+ * cluster.
+ */
+unsigned int __read_mostly sched_small_wakee_task_load;
+unsigned int __read_mostly sysctl_sched_small_wakee_task_load_pct = 10;
+
+unsigned int __read_mostly sched_big_waker_task_load;
+unsigned int __read_mostly sysctl_sched_big_waker_task_load_pct = 25;
+
+/*
+ * CPUs with load greater than the sched_spill_load_threshold are not
+ * eligible for task placement. When all CPUs in a cluster achieve a
+ * load higher than this level, tasks becomes eligible for inter
+ * cluster migration.
+ */
+unsigned int __read_mostly sched_spill_load;
+unsigned int __read_mostly sysctl_sched_spill_load_pct = 100;
+
+/*
+ * Tasks whose bandwidth consumption on a cpu is more than
+ * sched_upmigrate are considered "big" tasks. Big tasks will be
+ * considered for "up" migration, i.e migrating to a cpu with better
+ * capacity.
+ */
+unsigned int __read_mostly sched_upmigrate;
+unsigned int __read_mostly sysctl_sched_upmigrate_pct = 80;
+
+/*
+ * Big tasks, once migrated, will need to drop their bandwidth
+ * consumption to less than sched_downmigrate before they are "down"
+ * migrated.
+ */
+unsigned int __read_mostly sched_downmigrate;
+unsigned int __read_mostly sysctl_sched_downmigrate_pct = 60;
+
+/*
+ * The load scale factor of a CPU gets boosted when its max frequency
+ * is restricted due to which the tasks are migrating to higher capacity
+ * CPUs early. The sched_upmigrate threshold is auto-upgraded by
+ * rq->max_possible_freq/rq->max_freq of a lower capacity CPU.
+ */
+unsigned int up_down_migrate_scale_factor = 1024;
+
+/*
+ * Scheduler selects and places task to its previous CPU if sleep time is
+ * less than sysctl_sched_select_prev_cpu_us.
+ */
+unsigned int __read_mostly
+sched_short_sleep_task_threshold = 2000 * NSEC_PER_USEC;
+
+unsigned int __read_mostly sysctl_sched_select_prev_cpu_us = 2000;
+
+unsigned int __read_mostly
+sched_long_cpu_selection_threshold = 100 * NSEC_PER_MSEC;
+
+unsigned int __read_mostly sysctl_sched_restrict_cluster_spill;
+
+void update_up_down_migrate(void)
+{
+ unsigned int up_migrate = pct_to_real(sysctl_sched_upmigrate_pct);
+ unsigned int down_migrate = pct_to_real(sysctl_sched_downmigrate_pct);
+ unsigned int delta;
+
+ if (up_down_migrate_scale_factor == 1024)
+ goto done;
+
+ delta = up_migrate - down_migrate;
+
+ up_migrate /= NSEC_PER_USEC;
+ up_migrate *= up_down_migrate_scale_factor;
+ up_migrate >>= 10;
+ up_migrate *= NSEC_PER_USEC;
+
+ up_migrate = min(up_migrate, sched_ravg_window);
+
+ down_migrate /= NSEC_PER_USEC;
+ down_migrate *= up_down_migrate_scale_factor;
+ down_migrate >>= 10;
+ down_migrate *= NSEC_PER_USEC;
+
+ down_migrate = min(down_migrate, up_migrate - delta);
+done:
+ sched_upmigrate = up_migrate;
+ sched_downmigrate = down_migrate;
+}
+
+void set_hmp_defaults(void)
+{
+ sched_spill_load =
+ pct_to_real(sysctl_sched_spill_load_pct);
+
+ update_up_down_migrate();
+
+ sched_major_task_runtime =
+ mult_frac(sched_ravg_window, MAJOR_TASK_PCT, 100);
+
+ sched_init_task_load_windows =
+ div64_u64((u64)sysctl_sched_init_task_load_pct *
+ (u64)sched_ravg_window, 100);
+
+ sched_short_sleep_task_threshold = sysctl_sched_select_prev_cpu_us *
+ NSEC_PER_USEC;
+
+ sched_small_wakee_task_load =
+ div64_u64((u64)sysctl_sched_small_wakee_task_load_pct *
+ (u64)sched_ravg_window, 100);
+
+ sched_big_waker_task_load =
+ div64_u64((u64)sysctl_sched_big_waker_task_load_pct *
+ (u64)sched_ravg_window, 100);
+
+ sched_freq_aggregate_threshold =
+ pct_to_real(sysctl_sched_freq_aggregate_threshold_pct);
+}
+
+u32 sched_get_init_task_load(struct task_struct *p)
+{
+ return p->init_load_pct;
+}
+
+int sched_set_init_task_load(struct task_struct *p, int init_load_pct)
+{
+ if (init_load_pct < 0 || init_load_pct > 100)
+ return -EINVAL;
+
+ p->init_load_pct = init_load_pct;
+
+ return 0;
+}
+
+#ifdef CONFIG_CGROUP_SCHED
+
+int upmigrate_discouraged(struct task_struct *p)
+{
+ return task_group(p)->upmigrate_discouraged;
+}
+
+#else
+
+static inline int upmigrate_discouraged(struct task_struct *p)
+{
+ return 0;
+}
+
+#endif
+
+/* Is a task "big" on its current cpu */
+static inline int __is_big_task(struct task_struct *p, u64 scaled_load)
+{
+ int nice = task_nice(p);
+
+ if (nice > SCHED_UPMIGRATE_MIN_NICE || upmigrate_discouraged(p))
+ return 0;
+
+ return scaled_load > sched_upmigrate;
+}
+
+int is_big_task(struct task_struct *p)
+{
+ return __is_big_task(p, scale_load_to_cpu(task_load(p), task_cpu(p)));
+}
+
+u64 cpu_load(int cpu)
+{
+ struct rq *rq = cpu_rq(cpu);
+
+ return scale_load_to_cpu(rq->hmp_stats.cumulative_runnable_avg, cpu);
+}
+
+u64 cpu_load_sync(int cpu, int sync)
+{
+ return scale_load_to_cpu(cpu_cravg_sync(cpu, sync), cpu);
+}
+
+static int boost_refcount;
+static DEFINE_SPINLOCK(boost_lock);
+static DEFINE_MUTEX(boost_mutex);
+
+static void boost_kick_cpus(void)
+{
+ int i;
+
+ for_each_online_cpu(i) {
+ if (cpu_capacity(i) != max_capacity)
+ boost_kick(i);
+ }
+}
+
+int sched_boost(void)
+{
+ return boost_refcount > 0;
+}
+
+int sched_set_boost(int enable)
+{
+ unsigned long flags;
+ int ret = 0;
+ int old_refcount;
+
+ if (!sched_enable_hmp)
+ return -EINVAL;
+
+ spin_lock_irqsave(&boost_lock, flags);
+
+ old_refcount = boost_refcount;
+
+ if (enable == 1) {
+ boost_refcount++;
+ } else if (!enable) {
+ if (boost_refcount >= 1)
+ boost_refcount--;
+ else
+ ret = -EINVAL;
+ } else {
+ ret = -EINVAL;
+ }
+
+ if (!old_refcount && boost_refcount)
+ boost_kick_cpus();
+
+ trace_sched_set_boost(boost_refcount);
+ spin_unlock_irqrestore(&boost_lock, flags);
+
+ return ret;
+}
+
+int sched_boost_handler(struct ctl_table *table, int write,
+ void __user *buffer, size_t *lenp,
+ loff_t *ppos)
+{
+ int ret;
+
+ mutex_lock(&boost_mutex);
+ if (!write)
+ sysctl_sched_boost = sched_boost();
+
+ ret = proc_dointvec(table, write, buffer, lenp, ppos);
+ if (ret || !write)
+ goto done;
+
+ ret = (sysctl_sched_boost <= 1) ?
+ sched_set_boost(sysctl_sched_boost) : -EINVAL;
+
+done:
+ mutex_unlock(&boost_mutex);
+ return ret;
+}
+
+/*
+ * Task will fit on a cpu if it's bandwidth consumption on that cpu
+ * will be less than sched_upmigrate. A big task that was previously
+ * "up" migrated will be considered fitting on "little" cpu if its
+ * bandwidth consumption on "little" cpu will be less than
+ * sched_downmigrate. This will help avoid frequenty migrations for
+ * tasks with load close to the upmigrate threshold
+ */
+int task_load_will_fit(struct task_struct *p, u64 task_load, int cpu,
+ enum sched_boost_type boost_type)
+{
+ int upmigrate;
+
+ if (cpu_capacity(cpu) == max_capacity)
+ return 1;
+
+ if (boost_type != SCHED_BOOST_ON_BIG) {
+ if (task_nice(p) > SCHED_UPMIGRATE_MIN_NICE ||
+ upmigrate_discouraged(p))
+ return 1;
+
+ upmigrate = sched_upmigrate;
+ if (cpu_capacity(task_cpu(p)) > cpu_capacity(cpu))
+ upmigrate = sched_downmigrate;
+
+ if (task_load < upmigrate)
+ return 1;
+ }
+
+ return 0;
+}
+
+enum sched_boost_type sched_boost_type(void)
+{
+ if (sched_boost()) {
+ if (min_possible_efficiency != max_possible_efficiency)
+ return SCHED_BOOST_ON_BIG;
+ else
+ return SCHED_BOOST_ON_ALL;
+ }
+ return SCHED_BOOST_NONE;
+}
+
+int task_will_fit(struct task_struct *p, int cpu)
+{
+ u64 tload = scale_load_to_cpu(task_load(p), cpu);
+
+ return task_load_will_fit(p, tload, cpu, sched_boost_type());
+}
+
+int group_will_fit(struct sched_cluster *cluster,
+ struct related_thread_group *grp, u64 demand)
+{
+ int cpu = cluster_first_cpu(cluster);
+ int prev_capacity = 0;
+ unsigned int threshold = sched_upmigrate;
+ u64 load;
+
+ if (cluster->capacity == max_capacity)
+ return 1;
+
+ if (grp->preferred_cluster)
+ prev_capacity = grp->preferred_cluster->capacity;
+
+ if (cluster->capacity < prev_capacity)
+ threshold = sched_downmigrate;
+
+ load = scale_load_to_cpu(demand, cpu);
+ if (load < threshold)
+ return 1;
+
+ return 0;
+}
+
+/*
+ * Return the cost of running task p on CPU cpu. This function
+ * currently assumes that task p is the only task which will run on
+ * the CPU.
+ */
+unsigned int power_cost(int cpu, u64 demand)
+{
+ int first, mid, last;
+ struct cpu_pwr_stats *per_cpu_info = get_cpu_pwr_stats();
+ struct cpu_pstate_pwr *costs;
+ struct freq_max_load *max_load;
+ int total_static_pwr_cost = 0;
+ struct rq *rq = cpu_rq(cpu);
+ unsigned int pc;
+
+ if (!per_cpu_info || !per_cpu_info[cpu].ptable)
+ /*
+ * When power aware scheduling is not in use, or CPU
+ * power data is not available, just use the CPU
+ * capacity as a rough stand-in for real CPU power
+ * numbers, assuming bigger CPUs are more power
+ * hungry.
+ */
+ return cpu_max_possible_capacity(cpu);
+
+ rcu_read_lock();
+ max_load = rcu_dereference(per_cpu(freq_max_load, cpu));
+ if (!max_load) {
+ pc = cpu_max_possible_capacity(cpu);
+ goto unlock;
+ }
+
+ costs = per_cpu_info[cpu].ptable;
+
+ if (demand <= max_load->freqs[0].hdemand) {
+ pc = costs[0].power;
+ goto unlock;
+ } else if (demand > max_load->freqs[max_load->length - 1].hdemand) {
+ pc = costs[max_load->length - 1].power;
+ goto unlock;
+ }
+
+ first = 0;
+ last = max_load->length - 1;
+ mid = (last - first) >> 1;
+ while (1) {
+ if (demand <= max_load->freqs[mid].hdemand)
+ last = mid;
+ else
+ first = mid;
+
+ if (last - first == 1)
+ break;
+ mid = first + ((last - first) >> 1);
+ }
+
+ pc = costs[last].power;
+
+unlock:
+ rcu_read_unlock();
+
+ if (idle_cpu(cpu) && rq->cstate) {
+ total_static_pwr_cost += rq->static_cpu_pwr_cost;
+ if (rq->cluster->dstate)
+ total_static_pwr_cost +=
+ rq->cluster->static_cluster_pwr_cost;
+ }
+
+ return pc + total_static_pwr_cost;
+
+}
+
+void inc_nr_big_task(struct hmp_sched_stats *stats, struct task_struct *p)
+{
+ if (!sched_enable_hmp || sched_disable_window_stats)
+ return;
+
+ if (is_big_task(p))
+ stats->nr_big_tasks++;
+}
+
+void dec_nr_big_task(struct hmp_sched_stats *stats, struct task_struct *p)
+{
+ if (!sched_enable_hmp || sched_disable_window_stats)
+ return;
+
+ if (is_big_task(p))
+ stats->nr_big_tasks--;
+
+ BUG_ON(stats->nr_big_tasks < 0);
+}
+
+void inc_rq_hmp_stats(struct rq *rq, struct task_struct *p, int change_cra)
+{
+ inc_nr_big_task(&rq->hmp_stats, p);
+ if (change_cra)
+ inc_cumulative_runnable_avg(&rq->hmp_stats, p);
+}
+
+void dec_rq_hmp_stats(struct rq *rq, struct task_struct *p, int change_cra)
+{
+ dec_nr_big_task(&rq->hmp_stats, p);
+ if (change_cra)
+ dec_cumulative_runnable_avg(&rq->hmp_stats, p);
+}
+
+static void reset_hmp_stats(struct hmp_sched_stats *stats, int reset_cra)
+{
+ stats->nr_big_tasks = 0;
+ if (reset_cra) {
+ stats->cumulative_runnable_avg = 0;
+ stats->pred_demands_sum = 0;
+ }
+}
+
+/*
+ * Invoked from three places:
+ * 1) try_to_wake_up() -> ... -> select_best_cpu()
+ * 2) scheduler_tick() -> ... -> migration_needed() -> select_best_cpu()
+ * 3) can_migrate_task()
+ *
+ * Its safe to de-reference p->grp in first case (since p->pi_lock is held)
+ * but not in other cases. p->grp is hence freed after a RCU grace period and
+ * accessed under rcu_read_lock()
+ */
+int preferred_cluster(struct sched_cluster *cluster, struct task_struct *p)
+{
+ struct related_thread_group *grp;
+ int rc = 0;
+
+ rcu_read_lock();
+
+ grp = task_related_thread_group(p);
+ if (!grp || !sysctl_sched_enable_colocation)
+ rc = 1;
+ else
+ rc = (grp->preferred_cluster == cluster);
+
+ rcu_read_unlock();
+ return rc;
+}
+
+struct sched_cluster *rq_cluster(struct rq *rq)
+{
+ return rq->cluster;
+}
+
+/*
+ * reset_cpu_hmp_stats - reset HMP stats for a cpu
+ * nr_big_tasks
+ * cumulative_runnable_avg (iff reset_cra is true)
+ */
+void reset_cpu_hmp_stats(int cpu, int reset_cra)
+{
+ reset_cfs_rq_hmp_stats(cpu, reset_cra);
+ reset_hmp_stats(&cpu_rq(cpu)->hmp_stats, reset_cra);
+}
+
+void fixup_nr_big_tasks(struct hmp_sched_stats *stats,
+ struct task_struct *p, s64 delta)
+{
+ u64 new_task_load;
+ u64 old_task_load;
+
+ if (!sched_enable_hmp || sched_disable_window_stats)
+ return;
+
+ old_task_load = scale_load_to_cpu(task_load(p), task_cpu(p));
+ new_task_load = scale_load_to_cpu(delta + task_load(p), task_cpu(p));
+
+ if (__is_big_task(p, old_task_load) && !__is_big_task(p, new_task_load))
+ stats->nr_big_tasks--;
+ else if (!__is_big_task(p, old_task_load) &&
+ __is_big_task(p, new_task_load))
+ stats->nr_big_tasks++;
+
+ BUG_ON(stats->nr_big_tasks < 0);
+}
+
+/*
+ * Walk runqueue of cpu and re-initialize 'nr_big_tasks' counters.
+ */
+static void update_nr_big_tasks(int cpu)
+{
+ struct rq *rq = cpu_rq(cpu);
+ struct task_struct *p;
+
+ /* Do not reset cumulative_runnable_avg */
+ reset_cpu_hmp_stats(cpu, 0);
+
+ list_for_each_entry(p, &rq->cfs_tasks, se.group_node)
+ _inc_hmp_sched_stats_fair(rq, p, 0);
+}
+
+/* Disable interrupts and grab runqueue lock of all cpus listed in @cpus */
+void pre_big_task_count_change(const struct cpumask *cpus)
+{
+ int i;
+
+ local_irq_disable();
+
+ for_each_cpu(i, cpus)
+ raw_spin_lock(&cpu_rq(i)->lock);
+}
+
+/*
+ * Reinitialize 'nr_big_tasks' counters on all affected cpus
+ */
+void post_big_task_count_change(const struct cpumask *cpus)
+{
+ int i;
+
+ /* Assumes local_irq_disable() keeps online cpumap stable */
+ for_each_cpu(i, cpus)
+ update_nr_big_tasks(i);
+
+ for_each_cpu(i, cpus)
+ raw_spin_unlock(&cpu_rq(i)->lock);
+
+ local_irq_enable();
+}
+
+DEFINE_MUTEX(policy_mutex);
+
+static inline int invalid_value_freq_input(unsigned int *data)
+{
+ if (data == &sysctl_sched_freq_aggregate)
+ return !(*data == 0 || *data == 1);
+
+ return 0;
+}
+
+static inline int invalid_value(unsigned int *data)
+{
+ unsigned int val = *data;
+
+ if (data == &sysctl_sched_ravg_hist_size)
+ return (val < 2 || val > RAVG_HIST_SIZE_MAX);
+
+ if (data == &sysctl_sched_window_stats_policy)
+ return val >= WINDOW_STATS_INVALID_POLICY;
+
+ return invalid_value_freq_input(data);
+}
+
+/*
+ * Handle "atomic" update of sysctl_sched_window_stats_policy,
+ * sysctl_sched_ravg_hist_size and sched_freq_legacy_mode variables.
+ */
+int sched_window_update_handler(struct ctl_table *table, int write,
+ void __user *buffer, size_t *lenp,
+ loff_t *ppos)
+{
+ int ret;
+ unsigned int *data = (unsigned int *)table->data;
+ unsigned int old_val;
+
+ if (!sched_enable_hmp)
+ return -EINVAL;
+
+ mutex_lock(&policy_mutex);
+
+ old_val = *data;
+
+ ret = proc_dointvec(table, write, buffer, lenp, ppos);
+ if (ret || !write || (write && (old_val == *data)))
+ goto done;
+
+ if (invalid_value(data)) {
+ *data = old_val;
+ ret = -EINVAL;
+ goto done;
+ }
+
+ reset_all_window_stats(0, 0);
+
+done:
+ mutex_unlock(&policy_mutex);
+
+ return ret;
+}
+
+/*
+ * Convert percentage value into absolute form. This will avoid div() operation
+ * in fast path, to convert task load in percentage scale.
+ */
+int sched_hmp_proc_update_handler(struct ctl_table *table, int write,
+ void __user *buffer, size_t *lenp,
+ loff_t *ppos)
+{
+ int ret;
+ unsigned int old_val;
+ unsigned int *data = (unsigned int *)table->data;
+ int update_min_nice = 0;
+
+ mutex_lock(&policy_mutex);
+
+ old_val = *data;
+
+ ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
+
+ if (ret || !write || !sched_enable_hmp)
+ goto done;
+
+ if (write && (old_val == *data))
+ goto done;
+
+ /*
+ * Special handling for sched_freq_aggregate_threshold_pct
+ * which can be greater than 100. Use 1000 as an upper bound
+ * value which works for all practical use cases.
+ */
+ if (data == &sysctl_sched_freq_aggregate_threshold_pct) {
+ if (*data > 1000) {
+ *data = old_val;
+ ret = -EINVAL;
+ goto done;
+ }
+ } else if (data != &sysctl_sched_select_prev_cpu_us) {
+ /*
+ * all tunables other than sched_select_prev_cpu_us are
+ * in percentage.
+ */
+ if (sysctl_sched_downmigrate_pct >
+ sysctl_sched_upmigrate_pct || *data > 100) {
+ *data = old_val;
+ ret = -EINVAL;
+ goto done;
+ }
+ }
+
+ /*
+ * Big task tunable change will need to re-classify tasks on
+ * runqueue as big and set their counters appropriately.
+ * sysctl interface affects secondary variables (*_pct), which is then
+ * "atomically" carried over to the primary variables. Atomic change
+ * includes taking runqueue lock of all online cpus and re-initiatizing
+ * their big counter values based on changed criteria.
+ */
+ if ((data == &sysctl_sched_upmigrate_pct || update_min_nice)) {
+ get_online_cpus();
+ pre_big_task_count_change(cpu_online_mask);
+ }
+
+ set_hmp_defaults();
+
+ if ((data == &sysctl_sched_upmigrate_pct || update_min_nice)) {
+ post_big_task_count_change(cpu_online_mask);
+ put_online_cpus();
+ }
+
+done:
+ mutex_unlock(&policy_mutex);
+ return ret;
+}
+
+inline int nr_big_tasks(struct rq *rq)
+{
+ return rq->hmp_stats.nr_big_tasks;
+}
+
+unsigned int cpu_temp(int cpu)
+{
+ struct cpu_pwr_stats *per_cpu_info = get_cpu_pwr_stats();
+
+ if (per_cpu_info)
+ return per_cpu_info[cpu].temp;
+ else
+ return 0;
+}
+
+void init_new_task_load(struct task_struct *p)
+{
+ int i;
+ u32 init_load_windows = sched_init_task_load_windows;
+ u32 init_load_pct = current->init_load_pct;
+
+ p->init_load_pct = 0;
+ rcu_assign_pointer(p->grp, NULL);
+ INIT_LIST_HEAD(&p->grp_list);
+ memset(&p->ravg, 0, sizeof(struct ravg));
+ p->cpu_cycles = 0;
+
+ if (init_load_pct)
+ init_load_windows = div64_u64((u64)init_load_pct *
+ (u64)sched_ravg_window, 100);
+
+ p->ravg.demand = init_load_windows;
+ p->ravg.pred_demand = 0;
+ for (i = 0; i < RAVG_HIST_SIZE_MAX; ++i)
+ p->ravg.sum_history[i] = init_load_windows;
+}
+
+/* Return task demand in percentage scale */
+unsigned int pct_task_load(struct task_struct *p)
+{
+ unsigned int load;
+
+ load = div64_u64((u64)task_load(p) * 100, (u64)max_task_load());
+
+ return load;
+}
+
+/*
+ * Return total number of tasks "eligible" to run on highest capacity cpu
+ *
+ * This is simply nr_big_tasks for cpus which are not of max_capacity and
+ * nr_running for cpus of max_capacity
+ */
+unsigned int nr_eligible_big_tasks(int cpu)
+{
+ struct rq *rq = cpu_rq(cpu);
+ int nr_big = rq->hmp_stats.nr_big_tasks;
+ int nr = rq->nr_running;
+
+ if (cpu_max_possible_capacity(cpu) != max_possible_capacity)
+ return nr_big;
+
+ return nr;
+}
+
+static inline int exiting_task(struct task_struct *p)
+{
+ return (p->ravg.sum_history[0] == EXITING_TASK_MARKER);
+}
+
+static int __init set_sched_ravg_window(char *str)
+{
+ unsigned int window_size;
+
+ get_option(&str, &window_size);
+
+ if (window_size < MIN_SCHED_RAVG_WINDOW ||
+ window_size > MAX_SCHED_RAVG_WINDOW) {
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ sched_ravg_window = window_size;
+ return 0;
+}
+
+early_param("sched_ravg_window", set_sched_ravg_window);
+
+static inline void
+update_window_start(struct rq *rq, u64 wallclock)
+{
+ s64 delta;
+ int nr_windows;
+
+ delta = wallclock - rq->window_start;
+ BUG_ON(delta < 0);
+ if (delta < sched_ravg_window)
+ return;
+
+ nr_windows = div64_u64(delta, sched_ravg_window);
+ rq->window_start += (u64)nr_windows * (u64)sched_ravg_window;
+}
+
+#define DIV64_U64_ROUNDUP(X, Y) div64_u64((X) + (Y - 1), Y)
+
+static inline u64 scale_exec_time(u64 delta, struct rq *rq)
+{
+ u32 freq;
+
+ freq = cpu_cycles_to_freq(rq->cc.cycles, rq->cc.time);
+ delta = DIV64_U64_ROUNDUP(delta * freq, max_possible_freq);
+ delta *= rq->cluster->exec_scale_factor;
+ delta >>= 10;
+
+ return delta;
+}
+
+static inline int cpu_is_waiting_on_io(struct rq *rq)
+{
+ if (!sched_io_is_busy)
+ return 0;
+
+ return atomic_read(&rq->nr_iowait);
+}
+
+/* Does freq_required sufficiently exceed or fall behind cur_freq? */
+static inline int
+nearly_same_freq(unsigned int cur_freq, unsigned int freq_required)
+{
+ int delta = freq_required - cur_freq;
+
+ if (freq_required > cur_freq)
+ return delta < sysctl_sched_freq_inc_notify;
+
+ delta = -delta;
+
+ return delta < sysctl_sched_freq_dec_notify;
+}
+
+/* Convert busy time to frequency equivalent */
+static inline unsigned int load_to_freq(struct rq *rq, u64 load)
+{
+ unsigned int freq;
+
+ load = scale_load_to_cpu(load, cpu_of(rq));
+ load *= 128;
+ load = div64_u64(load, max_task_load());
+
+ freq = load * cpu_max_possible_freq(cpu_of(rq));
+ freq /= 128;
+
+ return freq;
+}
+
+static inline struct group_cpu_time *
+_group_cpu_time(struct related_thread_group *grp, int cpu);
+
+/*
+ * Return load from all related group in given cpu.
+ * Caller must ensure that related_thread_group_lock is held.
+ */
+static void _group_load_in_cpu(int cpu, u64 *grp_load, u64 *new_grp_load)
+{
+ struct related_thread_group *grp;
+
+ for_each_related_thread_group(grp) {
+ struct group_cpu_time *cpu_time;
+
+ cpu_time = _group_cpu_time(grp, cpu);
+ *grp_load += cpu_time->prev_runnable_sum;
+ if (new_grp_load)
+ *new_grp_load += cpu_time->nt_prev_runnable_sum;
+ }
+}
+
+/*
+ * Return load from all related groups in given frequency domain.
+ * Caller must ensure that related_thread_group_lock is held.
+ */
+static void group_load_in_freq_domain(struct cpumask *cpus,
+ u64 *grp_load, u64 *new_grp_load)
+{
+ struct related_thread_group *grp;
+ int j;
+
+ for_each_related_thread_group(grp) {
+ for_each_cpu(j, cpus) {
+ struct group_cpu_time *cpu_time;
+
+ cpu_time = _group_cpu_time(grp, j);
+ *grp_load += cpu_time->prev_runnable_sum;
+ *new_grp_load += cpu_time->nt_prev_runnable_sum;
+ }
+ }
+}
+
+/*
+ * Should scheduler alert governor for changing frequency?
+ *
+ * @check_pred - evaluate frequency based on the predictive demand
+ * @check_groups - add load from all related groups on given cpu
+ *
+ * check_groups is set to 1 if a "related" task movement/wakeup is triggering
+ * the notification check. To avoid "re-aggregation" of demand in such cases,
+ * we check whether the migrated/woken tasks demand (along with demand from
+ * existing tasks on the cpu) can be met on target cpu
+ *
+ */
+
+static int send_notification(struct rq *rq, int check_pred, int check_groups)
+{
+ unsigned int cur_freq, freq_required;
+ unsigned long flags;
+ int rc = 0;
+ u64 group_load = 0, new_load = 0;
+
+ if (!sched_enable_hmp)
+ return 0;
+
+ if (check_pred) {
+ u64 prev = rq->old_busy_time;
+ u64 predicted = rq->hmp_stats.pred_demands_sum;
+
+ if (rq->cluster->cur_freq == cpu_max_freq(cpu_of(rq)))
+ return 0;
+
+ prev = max(prev, rq->old_estimated_time);
+ if (prev > predicted)
+ return 0;
+
+ cur_freq = load_to_freq(rq, prev);
+ freq_required = load_to_freq(rq, predicted);
+
+ if (freq_required < cur_freq + sysctl_sched_pred_alert_freq)
+ return 0;
+ } else {
+ read_lock(&related_thread_group_lock);
+ /*
+ * Protect from concurrent update of rq->prev_runnable_sum and
+ * group cpu load
+ */
+ raw_spin_lock_irqsave(&rq->lock, flags);
+ if (check_groups)
+ _group_load_in_cpu(cpu_of(rq), &group_load, NULL);
+
+ new_load = rq->prev_runnable_sum + group_load;
+
+ raw_spin_unlock_irqrestore(&rq->lock, flags);
+ read_unlock(&related_thread_group_lock);
+
+ cur_freq = load_to_freq(rq, rq->old_busy_time);
+ freq_required = load_to_freq(rq, new_load);
+
+ if (nearly_same_freq(cur_freq, freq_required))
+ return 0;
+ }
+
+ raw_spin_lock_irqsave(&rq->lock, flags);
+ if (!rq->cluster->notifier_sent) {
+ rq->cluster->notifier_sent = 1;
+ rc = 1;
+ trace_sched_freq_alert(cpu_of(rq), check_pred, check_groups, rq,
+ new_load);
+ }
+ raw_spin_unlock_irqrestore(&rq->lock, flags);
+
+ return rc;
+}
+
+/* Alert governor if there is a need to change frequency */
+void check_for_freq_change(struct rq *rq, bool check_pred, bool check_groups)
+{
+ int cpu = cpu_of(rq);
+
+ if (!send_notification(rq, check_pred, check_groups))
+ return;
+
+ atomic_notifier_call_chain(
+ &load_alert_notifier_head, 0,
+ (void *)(long)cpu);
+}
+
+void notify_migration(int src_cpu, int dest_cpu, bool src_cpu_dead,
+ struct task_struct *p)
+{
+ bool check_groups;
+
+ rcu_read_lock();
+ check_groups = task_in_related_thread_group(p);
+ rcu_read_unlock();
+
+ if (!same_freq_domain(src_cpu, dest_cpu)) {
+ if (!src_cpu_dead)
+ check_for_freq_change(cpu_rq(src_cpu), false,
+ check_groups);
+ check_for_freq_change(cpu_rq(dest_cpu), false, check_groups);
+ } else {
+ check_for_freq_change(cpu_rq(dest_cpu), true, check_groups);
+ }
+}
+
+static int account_busy_for_cpu_time(struct rq *rq, struct task_struct *p,
+ u64 irqtime, int event)
+{
+ if (is_idle_task(p)) {
+ /* TASK_WAKE && TASK_MIGRATE is not possible on idle task! */
+ if (event == PICK_NEXT_TASK)
+ return 0;
+
+ /* PUT_PREV_TASK, TASK_UPDATE && IRQ_UPDATE are left */
+ return irqtime || cpu_is_waiting_on_io(rq);
+ }
+
+ if (event == TASK_WAKE)
+ return 0;
+
+ if (event == PUT_PREV_TASK || event == IRQ_UPDATE)
+ return 1;
+
+ /*
+ * TASK_UPDATE can be called on sleeping task, when its moved between
+ * related groups
+ */
+ if (event == TASK_UPDATE) {
+ if (rq->curr == p)
+ return 1;
+
+ return p->on_rq ? SCHED_FREQ_ACCOUNT_WAIT_TIME : 0;
+ }
+
+ /* TASK_MIGRATE, PICK_NEXT_TASK left */
+ return SCHED_FREQ_ACCOUNT_WAIT_TIME;
+}
+
+static inline bool is_new_task(struct task_struct *p)
+{
+ return p->ravg.active_windows < sysctl_sched_new_task_windows;
+}
+
+#define INC_STEP 8
+#define DEC_STEP 2
+#define CONSISTENT_THRES 16
+#define INC_STEP_BIG 16
+/*
+ * bucket_increase - update the count of all buckets
+ *
+ * @buckets: array of buckets tracking busy time of a task
+ * @idx: the index of bucket to be incremented
+ *
+ * Each time a complete window finishes, count of bucket that runtime
+ * falls in (@idx) is incremented. Counts of all other buckets are
+ * decayed. The rate of increase and decay could be different based
+ * on current count in the bucket.
+ */
+static inline void bucket_increase(u8 *buckets, int idx)
+{
+ int i, step;
+
+ for (i = 0; i < NUM_BUSY_BUCKETS; i++) {
+ if (idx != i) {
+ if (buckets[i] > DEC_STEP)
+ buckets[i] -= DEC_STEP;
+ else
+ buckets[i] = 0;
+ } else {
+ step = buckets[i] >= CONSISTENT_THRES ?
+ INC_STEP_BIG : INC_STEP;
+ if (buckets[i] > U8_MAX - step)
+ buckets[i] = U8_MAX;
+ else
+ buckets[i] += step;
+ }
+ }
+}
+
+static inline int busy_to_bucket(u32 normalized_rt)
+{
+ int bidx;
+
+ bidx = mult_frac(normalized_rt, NUM_BUSY_BUCKETS, max_task_load());
+ bidx = min(bidx, NUM_BUSY_BUCKETS - 1);
+
+ /*
+ * Combine lowest two buckets. The lowest frequency falls into
+ * 2nd bucket and thus keep predicting lowest bucket is not
+ * useful.
+ */
+ if (!bidx)
+ bidx++;
+
+ return bidx;
+}
+
+static inline u64
+scale_load_to_freq(u64 load, unsigned int src_freq, unsigned int dst_freq)
+{
+ return div64_u64(load * (u64)src_freq, (u64)dst_freq);
+}
+
+#define HEAVY_TASK_SKIP 2
+#define HEAVY_TASK_SKIP_LIMIT 4
+/*
+ * get_pred_busy - calculate predicted demand for a task on runqueue
+ *
+ * @rq: runqueue of task p
+ * @p: task whose prediction is being updated
+ * @start: starting bucket. returned prediction should not be lower than
+ * this bucket.
+ * @runtime: runtime of the task. returned prediction should not be lower
+ * than this runtime.
+ * Note: @start can be derived from @runtime. It's passed in only to
+ * avoid duplicated calculation in some cases.
+ *
+ * A new predicted busy time is returned for task @p based on @runtime
+ * passed in. The function searches through buckets that represent busy
+ * time equal to or bigger than @runtime and attempts to find the bucket to
+ * to use for prediction. Once found, it searches through historical busy
+ * time and returns the latest that falls into the bucket. If no such busy
+ * time exists, it returns the medium of that bucket.
+ */
+static u32 get_pred_busy(struct rq *rq, struct task_struct *p,
+ int start, u32 runtime)
+{
+ int i;
+ u8 *buckets = p->ravg.busy_buckets;
+ u32 *hist = p->ravg.sum_history;
+ u32 dmin, dmax;
+ u64 cur_freq_runtime = 0;
+ int first = NUM_BUSY_BUCKETS, final, skip_to;
+ u32 ret = runtime;
+
+ /* skip prediction for new tasks due to lack of history */
+ if (unlikely(is_new_task(p)))
+ goto out;
+
+ /* find minimal bucket index to pick */
+ for (i = start; i < NUM_BUSY_BUCKETS; i++) {
+ if (buckets[i]) {
+ first = i;
+ break;
+ }
+ }
+ /* if no higher buckets are filled, predict runtime */
+ if (first >= NUM_BUSY_BUCKETS)
+ goto out;
+
+ /* compute the bucket for prediction */
+ final = first;
+ if (first < HEAVY_TASK_SKIP_LIMIT) {
+ /* compute runtime at current CPU frequency */
+ cur_freq_runtime = mult_frac(runtime, max_possible_efficiency,
+ rq->cluster->efficiency);
+ cur_freq_runtime = scale_load_to_freq(cur_freq_runtime,
+ max_possible_freq, rq->cluster->cur_freq);
+ /*
+ * if the task runs for majority of the window, try to
+ * pick higher buckets.
+ */
+ if (cur_freq_runtime >= sched_major_task_runtime) {
+ int next = NUM_BUSY_BUCKETS;
+ /*
+ * if there is a higher bucket that's consistently
+ * hit, don't jump beyond that.
+ */
+ for (i = start + 1; i <= HEAVY_TASK_SKIP_LIMIT &&
+ i < NUM_BUSY_BUCKETS; i++) {
+ if (buckets[i] > CONSISTENT_THRES) {
+ next = i;
+ break;
+ }
+ }
+ skip_to = min(next, start + HEAVY_TASK_SKIP);
+ /* don't jump beyond HEAVY_TASK_SKIP_LIMIT */
+ skip_to = min(HEAVY_TASK_SKIP_LIMIT, skip_to);
+ /* don't go below first non-empty bucket, if any */
+ final = max(first, skip_to);
+ }
+ }
+
+ /* determine demand range for the predicted bucket */
+ if (final < 2) {
+ /* lowest two buckets are combined */
+ dmin = 0;
+ final = 1;
+ } else {
+ dmin = mult_frac(final, max_task_load(), NUM_BUSY_BUCKETS);
+ }
+ dmax = mult_frac(final + 1, max_task_load(), NUM_BUSY_BUCKETS);
+
+ /*
+ * search through runtime history and return first runtime that falls
+ * into the range of predicted bucket.
+ */
+ for (i = 0; i < sched_ravg_hist_size; i++) {
+ if (hist[i] >= dmin && hist[i] < dmax) {
+ ret = hist[i];
+ break;
+ }
+ }
+ /* no historical runtime within bucket found, use average of the bin */
+ if (ret < dmin)
+ ret = (dmin + dmax) / 2;
+ /*
+ * when updating in middle of a window, runtime could be higher
+ * than all recorded history. Always predict at least runtime.
+ */
+ ret = max(runtime, ret);
+out:
+ trace_sched_update_pred_demand(rq, p, runtime,
+ mult_frac((unsigned int)cur_freq_runtime, 100,
+ sched_ravg_window), ret);
+ return ret;
+}
+
+static inline u32 calc_pred_demand(struct rq *rq, struct task_struct *p)
+{
+ if (p->ravg.pred_demand >= p->ravg.curr_window)
+ return p->ravg.pred_demand;
+
+ return get_pred_busy(rq, p, busy_to_bucket(p->ravg.curr_window),
+ p->ravg.curr_window);
+}
+
+/*
+ * predictive demand of a task is calculated at the window roll-over.
+ * if the task current window busy time exceeds the predicted
+ * demand, update it here to reflect the task needs.
+ */
+void update_task_pred_demand(struct rq *rq, struct task_struct *p, int event)
+{
+ u32 new, old;
+
+ if (is_idle_task(p) || exiting_task(p))
+ return;
+
+ if (event != PUT_PREV_TASK && event != TASK_UPDATE &&
+ (!SCHED_FREQ_ACCOUNT_WAIT_TIME ||
+ (event != TASK_MIGRATE &&
+ event != PICK_NEXT_TASK)))
+ return;
+
+ /*
+ * TASK_UPDATE can be called on sleeping task, when its moved between
+ * related groups
+ */
+ if (event == TASK_UPDATE) {
+ if (!p->on_rq && !SCHED_FREQ_ACCOUNT_WAIT_TIME)
+ return;
+ }
+
+ new = calc_pred_demand(rq, p);
+ old = p->ravg.pred_demand;
+
+ if (old >= new)
+ return;
+
+ if (task_on_rq_queued(p) && (!task_has_dl_policy(p) ||
+ !p->dl.dl_throttled))
+ p->sched_class->fixup_hmp_sched_stats(rq, p,
+ p->ravg.demand,
+ new);
+
+ p->ravg.pred_demand = new;
+}
+
+/*
+ * Account cpu activity in its busy time counters (rq->curr/prev_runnable_sum)
+ */
+static void update_cpu_busy_time(struct task_struct *p, struct rq *rq,
+ int event, u64 wallclock, u64 irqtime)
+{
+ int new_window, full_window = 0;
+ int p_is_curr_task = (p == rq->curr);
+ u64 mark_start = p->ravg.mark_start;
+ u64 window_start = rq->window_start;
+ u32 window_size = sched_ravg_window;
+ u64 delta;
+ u64 *curr_runnable_sum = &rq->curr_runnable_sum;
+ u64 *prev_runnable_sum = &rq->prev_runnable_sum;
+ u64 *nt_curr_runnable_sum = &rq->nt_curr_runnable_sum;
+ u64 *nt_prev_runnable_sum = &rq->nt_prev_runnable_sum;
+ int flip_counters = 0;
+ int prev_sum_reset = 0;
+ bool new_task;
+ struct related_thread_group *grp;
+
+ new_window = mark_start < window_start;
+ if (new_window) {
+ full_window = (window_start - mark_start) >= window_size;
+ if (p->ravg.active_windows < USHRT_MAX)
+ p->ravg.active_windows++;
+ }
+
+ new_task = is_new_task(p);
+
+ grp = p->grp;
+ if (grp && sched_freq_aggregate) {
+ /* cpu_time protected by rq_lock */
+ struct group_cpu_time *cpu_time =
+ _group_cpu_time(grp, cpu_of(rq));
+
+ curr_runnable_sum = &cpu_time->curr_runnable_sum;
+ prev_runnable_sum = &cpu_time->prev_runnable_sum;
+
+ nt_curr_runnable_sum = &cpu_time->nt_curr_runnable_sum;
+ nt_prev_runnable_sum = &cpu_time->nt_prev_runnable_sum;
+
+ if (cpu_time->window_start != rq->window_start) {
+ int nr_windows;
+
+ delta = rq->window_start - cpu_time->window_start;
+ nr_windows = div64_u64(delta, window_size);
+ if (nr_windows > 1)
+ prev_sum_reset = 1;
+
+ cpu_time->window_start = rq->window_start;
+ flip_counters = 1;
+ }
+
+ if (p_is_curr_task && new_window) {
+ u64 curr_sum = rq->curr_runnable_sum;
+ u64 nt_curr_sum = rq->nt_curr_runnable_sum;
+
+ if (full_window)
+ curr_sum = nt_curr_sum = 0;
+
+ rq->prev_runnable_sum = curr_sum;
+ rq->nt_prev_runnable_sum = nt_curr_sum;
+
+ rq->curr_runnable_sum = 0;
+ rq->nt_curr_runnable_sum = 0;
+ }
+ } else {
+ if (p_is_curr_task && new_window) {
+ flip_counters = 1;
+ if (full_window)
+ prev_sum_reset = 1;
+ }
+ }
+
+ /*
+ * Handle per-task window rollover. We don't care about the idle
+ * task or exiting tasks.
+ */
+ if (new_window && !is_idle_task(p) && !exiting_task(p)) {
+ u32 curr_window = 0;
+
+ if (!full_window)
+ curr_window = p->ravg.curr_window;
+
+ p->ravg.prev_window = curr_window;
+ p->ravg.curr_window = 0;
+ }
+
+ if (flip_counters) {
+ u64 curr_sum = *curr_runnable_sum;
+ u64 nt_curr_sum = *nt_curr_runnable_sum;
+
+ if (prev_sum_reset)
+ curr_sum = nt_curr_sum = 0;
+
+ *prev_runnable_sum = curr_sum;
+ *nt_prev_runnable_sum = nt_curr_sum;
+
+ *curr_runnable_sum = 0;
+ *nt_curr_runnable_sum = 0;
+ }
+
+ if (!account_busy_for_cpu_time(rq, p, irqtime, event)) {
+ /*
+ * account_busy_for_cpu_time() = 0, so no update to the
+ * task's current window needs to be made. This could be
+ * for example
+ *
+ * - a wakeup event on a task within the current
+ * window (!new_window below, no action required),
+ * - switching to a new task from idle (PICK_NEXT_TASK)
+ * in a new window where irqtime is 0 and we aren't
+ * waiting on IO
+ */
+
+ if (!new_window)
+ return;
+
+ /*
+ * A new window has started. The RQ demand must be rolled
+ * over if p is the current task.
+ */
+ if (p_is_curr_task) {
+ /* p is idle task */
+ BUG_ON(p != rq->idle);
+ }
+
+ return;
+ }
+
+ if (!new_window) {
+ /*
+ * account_busy_for_cpu_time() = 1 so busy time needs
+ * to be accounted to the current window. No rollover
+ * since we didn't start a new window. An example of this is
+ * when a task starts execution and then sleeps within the
+ * same window.
+ */
+
+ if (!irqtime || !is_idle_task(p) || cpu_is_waiting_on_io(rq))
+ delta = wallclock - mark_start;
+ else
+ delta = irqtime;
+ delta = scale_exec_time(delta, rq);
+ *curr_runnable_sum += delta;
+ if (new_task)
+ *nt_curr_runnable_sum += delta;
+
+ if (!is_idle_task(p) && !exiting_task(p))
+ p->ravg.curr_window += delta;
+
+ return;
+ }
+
+ if (!p_is_curr_task) {
+ /*
+ * account_busy_for_cpu_time() = 1 so busy time needs
+ * to be accounted to the current window. A new window
+ * has also started, but p is not the current task, so the
+ * window is not rolled over - just split up and account
+ * as necessary into curr and prev. The window is only
+ * rolled over when a new window is processed for the current
+ * task.
+ *
+ * Irqtime can't be accounted by a task that isn't the
+ * currently running task.
+ */
+
+ if (!full_window) {
+ /*
+ * A full window hasn't elapsed, account partial
+ * contribution to previous completed window.
+ */
+ delta = scale_exec_time(window_start - mark_start, rq);
+ if (!exiting_task(p))
+ p->ravg.prev_window += delta;
+ } else {
+ /*
+ * Since at least one full window has elapsed,
+ * the contribution to the previous window is the
+ * full window (window_size).
+ */
+ delta = scale_exec_time(window_size, rq);
+ if (!exiting_task(p))
+ p->ravg.prev_window = delta;
+ }
+
+ *prev_runnable_sum += delta;
+ if (new_task)
+ *nt_prev_runnable_sum += delta;
+
+ /* Account piece of busy time in the current window. */
+ delta = scale_exec_time(wallclock - window_start, rq);
+ *curr_runnable_sum += delta;
+ if (new_task)
+ *nt_curr_runnable_sum += delta;
+
+ if (!exiting_task(p))
+ p->ravg.curr_window = delta;
+
+ return;
+ }
+
+ if (!irqtime || !is_idle_task(p) || cpu_is_waiting_on_io(rq)) {
+ /*
+ * account_busy_for_cpu_time() = 1 so busy time needs
+ * to be accounted to the current window. A new window
+ * has started and p is the current task so rollover is
+ * needed. If any of these three above conditions are true
+ * then this busy time can't be accounted as irqtime.
+ *
+ * Busy time for the idle task or exiting tasks need not
+ * be accounted.
+ *
+ * An example of this would be a task that starts execution
+ * and then sleeps once a new window has begun.
+ */
+
+ if (!full_window) {
+ /*
+ * A full window hasn't elapsed, account partial
+ * contribution to previous completed window.
+ */
+ delta = scale_exec_time(window_start - mark_start, rq);
+ if (!is_idle_task(p) && !exiting_task(p))
+ p->ravg.prev_window += delta;
+ } else {
+ /*
+ * Since at least one full window has elapsed,
+ * the contribution to the previous window is the
+ * full window (window_size).
+ */
+ delta = scale_exec_time(window_size, rq);
+ if (!is_idle_task(p) && !exiting_task(p))
+ p->ravg.prev_window = delta;
+ }
+
+ /*
+ * Rollover is done here by overwriting the values in
+ * prev_runnable_sum and curr_runnable_sum.
+ */
+ *prev_runnable_sum += delta;
+ if (new_task)
+ *nt_prev_runnable_sum += delta;
+
+ /* Account piece of busy time in the current window. */
+ delta = scale_exec_time(wallclock - window_start, rq);
+ *curr_runnable_sum += delta;
+ if (new_task)
+ *nt_curr_runnable_sum += delta;
+
+ if (!is_idle_task(p) && !exiting_task(p))
+ p->ravg.curr_window = delta;
+
+ return;
+ }
+
+ if (irqtime) {
+ /*
+ * account_busy_for_cpu_time() = 1 so busy time needs
+ * to be accounted to the current window. A new window
+ * has started and p is the current task so rollover is
+ * needed. The current task must be the idle task because
+ * irqtime is not accounted for any other task.
+ *
+ * Irqtime will be accounted each time we process IRQ activity
+ * after a period of idleness, so we know the IRQ busy time
+ * started at wallclock - irqtime.
+ */
+
+ BUG_ON(!is_idle_task(p));
+ mark_start = wallclock - irqtime;
+
+ /*
+ * Roll window over. If IRQ busy time was just in the current
+ * window then that is all that need be accounted.
+ */
+ if (mark_start > window_start) {
+ *curr_runnable_sum = scale_exec_time(irqtime, rq);
+ return;
+ }
+
+ /*
+ * The IRQ busy time spanned multiple windows. Process the
+ * busy time preceding the current window start first.
+ */
+ delta = window_start - mark_start;
+ if (delta > window_size)
+ delta = window_size;
+ delta = scale_exec_time(delta, rq);
+ *prev_runnable_sum += delta;
+
+ /* Process the remaining IRQ busy time in the current window. */
+ delta = wallclock - window_start;
+ rq->curr_runnable_sum = scale_exec_time(delta, rq);
+
+ return;
+ }
+
+ BUG();
+}
+
+static inline u32 predict_and_update_buckets(struct rq *rq,
+ struct task_struct *p, u32 runtime) {
+
+ int bidx;
+ u32 pred_demand;
+
+ bidx = busy_to_bucket(runtime);
+ pred_demand = get_pred_busy(rq, p, bidx, runtime);
+ bucket_increase(p->ravg.busy_buckets, bidx);
+
+ return pred_demand;
+}
+
+static void update_task_cpu_cycles(struct task_struct *p, int cpu)
+{
+ if (use_cycle_counter)
+ p->cpu_cycles = cpu_cycle_counter_cb.get_cpu_cycle_counter(cpu);
+}
+
+static void
+update_task_rq_cpu_cycles(struct task_struct *p, struct rq *rq, int event,
+ u64 wallclock, u64 irqtime)
+{
+ u64 cur_cycles;
+ int cpu = cpu_of(rq);
+
+ lockdep_assert_held(&rq->lock);
+
+ if (!use_cycle_counter) {
+ rq->cc.cycles = cpu_cur_freq(cpu);
+ rq->cc.time = 1;
+ return;
+ }
+
+ cur_cycles = cpu_cycle_counter_cb.get_cpu_cycle_counter(cpu);
+
+ /*
+ * If current task is idle task and irqtime == 0 CPU was
+ * indeed idle and probably its cycle counter was not
+ * increasing. We still need estimatied CPU frequency
+ * for IO wait time accounting. Use the previously
+ * calculated frequency in such a case.
+ */
+ if (!is_idle_task(rq->curr) || irqtime) {
+ if (unlikely(cur_cycles < p->cpu_cycles))
+ rq->cc.cycles = cur_cycles + (U64_MAX - p->cpu_cycles);
+ else
+ rq->cc.cycles = cur_cycles - p->cpu_cycles;
+ rq->cc.cycles = rq->cc.cycles * NSEC_PER_MSEC;
+
+ if (event == IRQ_UPDATE && is_idle_task(p))
+ /*
+ * Time between mark_start of idle task and IRQ handler
+ * entry time is CPU cycle counter stall period.
+ * Upon IRQ handler entry sched_account_irqstart()
+ * replenishes idle task's cpu cycle counter so
+ * rq->cc.cycles now represents increased cycles during
+ * IRQ handler rather than time between idle entry and
+ * IRQ exit. Thus use irqtime as time delta.
+ */
+ rq->cc.time = irqtime;
+ else
+ rq->cc.time = wallclock - p->ravg.mark_start;
+ BUG_ON((s64)rq->cc.time < 0);
+ }
+
+ p->cpu_cycles = cur_cycles;
+
+ trace_sched_get_task_cpu_cycles(cpu, event, rq->cc.cycles, rq->cc.time);
+}
+
+static int account_busy_for_task_demand(struct task_struct *p, int event)
+{
+ /*
+ * No need to bother updating task demand for exiting tasks
+ * or the idle task.
+ */
+ if (exiting_task(p) || is_idle_task(p))
+ return 0;
+
+ /*
+ * When a task is waking up it is completing a segment of non-busy
+ * time. Likewise, if wait time is not treated as busy time, then
+ * when a task begins to run or is migrated, it is not running and
+ * is completing a segment of non-busy time.
+ */
+ if (event == TASK_WAKE || (!SCHED_ACCOUNT_WAIT_TIME &&
+ (event == PICK_NEXT_TASK || event == TASK_MIGRATE)))
+ return 0;
+
+ return 1;
+}
+
+/*
+ * Called when new window is starting for a task, to record cpu usage over
+ * recently concluded window(s). Normally 'samples' should be 1. It can be > 1
+ * when, say, a real-time task runs without preemption for several windows at a
+ * stretch.
+ */
+static void update_history(struct rq *rq, struct task_struct *p,
+ u32 runtime, int samples, int event)
+{
+ u32 *hist = &p->ravg.sum_history[0];
+ int ridx, widx;
+ u32 max = 0, avg, demand, pred_demand;
+ u64 sum = 0;
+
+ /* Ignore windows where task had no activity */
+ if (!runtime || is_idle_task(p) || exiting_task(p) || !samples)
+ goto done;
+
+ /* Push new 'runtime' value onto stack */
+ widx = sched_ravg_hist_size - 1;
+ ridx = widx - samples;
+ for (; ridx >= 0; --widx, --ridx) {
+ hist[widx] = hist[ridx];
+ sum += hist[widx];
+ if (hist[widx] > max)
+ max = hist[widx];
+ }
+
+ for (widx = 0; widx < samples && widx < sched_ravg_hist_size; widx++) {
+ hist[widx] = runtime;
+ sum += hist[widx];
+ if (hist[widx] > max)
+ max = hist[widx];
+ }
+
+ p->ravg.sum = 0;
+
+ if (sched_window_stats_policy == WINDOW_STATS_RECENT) {
+ demand = runtime;
+ } else if (sched_window_stats_policy == WINDOW_STATS_MAX) {
+ demand = max;
+ } else {
+ avg = div64_u64(sum, sched_ravg_hist_size);
+ if (sched_window_stats_policy == WINDOW_STATS_AVG)
+ demand = avg;
+ else
+ demand = max(avg, runtime);
+ }
+ pred_demand = predict_and_update_buckets(rq, p, runtime);
+
+ /*
+ * A throttled deadline sched class task gets dequeued without
+ * changing p->on_rq. Since the dequeue decrements hmp stats
+ * avoid decrementing it here again.
+ */
+ if (task_on_rq_queued(p) && (!task_has_dl_policy(p) ||
+ !p->dl.dl_throttled))
+ p->sched_class->fixup_hmp_sched_stats(rq, p, demand,
+ pred_demand);
+
+ p->ravg.demand = demand;
+ p->ravg.pred_demand = pred_demand;
+
+done:
+ trace_sched_update_history(rq, p, runtime, samples, event);
+}
+
+static void add_to_task_demand(struct rq *rq, struct task_struct *p, u64 delta)
+{
+ delta = scale_exec_time(delta, rq);
+ p->ravg.sum += delta;
+ if (unlikely(p->ravg.sum > sched_ravg_window))
+ p->ravg.sum = sched_ravg_window;
+}
+
+/*
+ * Account cpu demand of task and/or update task's cpu demand history
+ *
+ * ms = p->ravg.mark_start;
+ * wc = wallclock
+ * ws = rq->window_start
+ *
+ * Three possibilities:
+ *
+ * a) Task event is contained within one window.
+ * window_start < mark_start < wallclock
+ *
+ * ws ms wc
+ * | | |
+ * V V V
+ * |---------------|
+ *
+ * In this case, p->ravg.sum is updated *iff* event is appropriate
+ * (ex: event == PUT_PREV_TASK)
+ *
+ * b) Task event spans two windows.
+ * mark_start < window_start < wallclock
+ *
+ * ms ws wc
+ * | | |
+ * V V V
+ * -----|-------------------
+ *
+ * In this case, p->ravg.sum is updated with (ws - ms) *iff* event
+ * is appropriate, then a new window sample is recorded followed
+ * by p->ravg.sum being set to (wc - ws) *iff* event is appropriate.
+ *
+ * c) Task event spans more than two windows.
+ *
+ * ms ws_tmp ws wc
+ * | | | |
+ * V V V V
+ * ---|-------|-------|-------|-------|------
+ * | |
+ * |<------ nr_full_windows ------>|
+ *
+ * In this case, p->ravg.sum is updated with (ws_tmp - ms) first *iff*
+ * event is appropriate, window sample of p->ravg.sum is recorded,
+ * 'nr_full_window' samples of window_size is also recorded *iff*
+ * event is appropriate and finally p->ravg.sum is set to (wc - ws)
+ * *iff* event is appropriate.
+ *
+ * IMPORTANT : Leave p->ravg.mark_start unchanged, as update_cpu_busy_time()
+ * depends on it!
+ */
+static void update_task_demand(struct task_struct *p, struct rq *rq,
+ int event, u64 wallclock)
+{
+ u64 mark_start = p->ravg.mark_start;
+ u64 delta, window_start = rq->window_start;
+ int new_window, nr_full_windows;
+ u32 window_size = sched_ravg_window;
+
+ new_window = mark_start < window_start;
+ if (!account_busy_for_task_demand(p, event)) {
+ if (new_window)
+ /*
+ * If the time accounted isn't being accounted as
+ * busy time, and a new window started, only the
+ * previous window need be closed out with the
+ * pre-existing demand. Multiple windows may have
+ * elapsed, but since empty windows are dropped,
+ * it is not necessary to account those.
+ */
+ update_history(rq, p, p->ravg.sum, 1, event);
+ return;
+ }
+
+ if (!new_window) {
+ /*
+ * The simple case - busy time contained within the existing
+ * window.
+ */
+ add_to_task_demand(rq, p, wallclock - mark_start);
+ return;
+ }
+
+ /*
+ * Busy time spans at least two windows. Temporarily rewind
+ * window_start to first window boundary after mark_start.
+ */
+ delta = window_start - mark_start;
+ nr_full_windows = div64_u64(delta, window_size);
+ window_start -= (u64)nr_full_windows * (u64)window_size;
+
+ /* Process (window_start - mark_start) first */
+ add_to_task_demand(rq, p, window_start - mark_start);
+
+ /* Push new sample(s) into task's demand history */
+ update_history(rq, p, p->ravg.sum, 1, event);
+ if (nr_full_windows)
+ update_history(rq, p, scale_exec_time(window_size, rq),
+ nr_full_windows, event);
+
+ /*
+ * Roll window_start back to current to process any remainder
+ * in current window.
+ */
+ window_start += (u64)nr_full_windows * (u64)window_size;
+
+ /* Process (wallclock - window_start) next */
+ mark_start = window_start;
+ add_to_task_demand(rq, p, wallclock - mark_start);
+}
+
+/* Reflect task activity on its demand and cpu's busy time statistics */
+void update_task_ravg(struct task_struct *p, struct rq *rq, int event,
+ u64 wallclock, u64 irqtime)
+{
+ if (!rq->window_start || sched_disable_window_stats)
+ return;
+
+ lockdep_assert_held(&rq->lock);
+
+ update_window_start(rq, wallclock);
+
+ if (!p->ravg.mark_start) {
+ update_task_cpu_cycles(p, cpu_of(rq));
+ goto done;
+ }
+
+ update_task_rq_cpu_cycles(p, rq, event, wallclock, irqtime);
+ update_task_demand(p, rq, event, wallclock);
+ update_cpu_busy_time(p, rq, event, wallclock, irqtime);
+ update_task_pred_demand(rq, p, event);
+done:
+ trace_sched_update_task_ravg(p, rq, event, wallclock, irqtime,
+ rq->cc.cycles, rq->cc.time,
+ _group_cpu_time(p->grp, cpu_of(rq)));
+
+ p->ravg.mark_start = wallclock;
+}
+
+void sched_account_irqtime(int cpu, struct task_struct *curr,
+ u64 delta, u64 wallclock)
+{
+ struct rq *rq = cpu_rq(cpu);
+ unsigned long flags, nr_windows;
+ u64 cur_jiffies_ts;
+
+ raw_spin_lock_irqsave(&rq->lock, flags);
+
+ /*
+ * cputime (wallclock) uses sched_clock so use the same here for
+ * consistency.
+ */
+ delta += sched_clock() - wallclock;
+ cur_jiffies_ts = get_jiffies_64();
+
+ if (is_idle_task(curr))
+ update_task_ravg(curr, rq, IRQ_UPDATE, sched_ktime_clock(),
+ delta);
+
+ nr_windows = cur_jiffies_ts - rq->irqload_ts;
+
+ if (nr_windows) {
+ if (nr_windows < 10) {
+ /* Decay CPU's irqload by 3/4 for each window. */
+ rq->avg_irqload *= (3 * nr_windows);
+ rq->avg_irqload = div64_u64(rq->avg_irqload,
+ 4 * nr_windows);
+ } else {
+ rq->avg_irqload = 0;
+ }
+ rq->avg_irqload += rq->cur_irqload;
+ rq->cur_irqload = 0;
+ }
+
+ rq->cur_irqload += delta;
+ rq->irqload_ts = cur_jiffies_ts;
+ raw_spin_unlock_irqrestore(&rq->lock, flags);
+}
+
+void sched_account_irqstart(int cpu, struct task_struct *curr, u64 wallclock)
+{
+ struct rq *rq = cpu_rq(cpu);
+
+ if (!rq->window_start || sched_disable_window_stats)
+ return;
+
+ if (is_idle_task(curr)) {
+ /* We're here without rq->lock held, IRQ disabled */
+ raw_spin_lock(&rq->lock);
+ update_task_cpu_cycles(curr, cpu);
+ raw_spin_unlock(&rq->lock);
+ }
+}
+
+void reset_task_stats(struct task_struct *p)
+{
+ u32 sum = 0;
+
+ if (exiting_task(p))
+ sum = EXITING_TASK_MARKER;
+
+ memset(&p->ravg, 0, sizeof(struct ravg));
+ /* Retain EXITING_TASK marker */
+ p->ravg.sum_history[0] = sum;
+}
+
+void mark_task_starting(struct task_struct *p)
+{
+ u64 wallclock;
+ struct rq *rq = task_rq(p);
+
+ if (!rq->window_start || sched_disable_window_stats) {
+ reset_task_stats(p);
+ return;
+ }
+
+ wallclock = sched_ktime_clock();
+ p->ravg.mark_start = p->last_wake_ts = wallclock;
+ p->last_cpu_selected_ts = wallclock;
+ p->last_switch_out_ts = 0;
+ update_task_cpu_cycles(p, cpu_of(rq));
+}
+
+void set_window_start(struct rq *rq)
+{
+ int cpu = cpu_of(rq);
+ struct rq *sync_rq = cpu_rq(sync_cpu);
+
+ if (rq->window_start || !sched_enable_hmp)
+ return;
+
+ if (cpu == sync_cpu) {
+ rq->window_start = sched_ktime_clock();
+ } else {
+ raw_spin_unlock(&rq->lock);
+ double_rq_lock(rq, sync_rq);
+ rq->window_start = cpu_rq(sync_cpu)->window_start;
+ rq->curr_runnable_sum = rq->prev_runnable_sum = 0;
+ rq->nt_curr_runnable_sum = rq->nt_prev_runnable_sum = 0;
+ raw_spin_unlock(&sync_rq->lock);
+ }
+
+ rq->curr->ravg.mark_start = rq->window_start;
+}
+
+void migrate_sync_cpu(int cpu)
+{
+ if (cpu == sync_cpu)
+ sync_cpu = smp_processor_id();
+}
+
+static void reset_all_task_stats(void)
+{
+ struct task_struct *g, *p;
+
+ read_lock(&tasklist_lock);
+ do_each_thread(g, p) {
+ reset_task_stats(p);
+ } while_each_thread(g, p);
+ read_unlock(&tasklist_lock);
+}
+
+static void disable_window_stats(void)
+{
+ unsigned long flags;
+ int i;
+
+ local_irq_save(flags);
+ for_each_possible_cpu(i)
+ raw_spin_lock(&cpu_rq(i)->lock);
+
+ sched_disable_window_stats = 1;
+
+ for_each_possible_cpu(i)
+ raw_spin_unlock(&cpu_rq(i)->lock);
+
+ local_irq_restore(flags);
+}
+
+/* Called with all cpu's rq->lock held */
+static void enable_window_stats(void)
+{
+ sched_disable_window_stats = 0;
+
+}
+
+enum reset_reason_code {
+ WINDOW_CHANGE,
+ POLICY_CHANGE,
+ HIST_SIZE_CHANGE,
+ FREQ_AGGREGATE_CHANGE,
+};
+
+const char *sched_window_reset_reasons[] = {
+ "WINDOW_CHANGE",
+ "POLICY_CHANGE",
+ "HIST_SIZE_CHANGE",
+};
+
+/* Called with IRQs enabled */
+void reset_all_window_stats(u64 window_start, unsigned int window_size)
+{
+ int cpu;
+ unsigned long flags;
+ u64 start_ts = sched_ktime_clock();
+ int reason = WINDOW_CHANGE;
+ unsigned int old = 0, new = 0;
+ struct related_thread_group *grp;
+
+ disable_window_stats();
+
+ reset_all_task_stats();
+
+ local_irq_save(flags);
+
+ read_lock(&related_thread_group_lock);
+
+ for_each_possible_cpu(cpu)
+ raw_spin_lock(&cpu_rq(cpu)->lock);
+
+ list_for_each_entry(grp, &related_thread_groups, list) {
+ int j;
+
+ for_each_possible_cpu(j) {
+ struct group_cpu_time *cpu_time;
+ /* Protected by rq lock */
+ cpu_time = _group_cpu_time(grp, j);
+ memset(cpu_time, 0, sizeof(struct group_cpu_time));
+ if (window_start)
+ cpu_time->window_start = window_start;
+ }
+ }
+
+ if (window_size) {
+ sched_ravg_window = window_size * TICK_NSEC;
+ set_hmp_defaults();
+ }
+
+ enable_window_stats();
+
+ for_each_possible_cpu(cpu) {
+ struct rq *rq = cpu_rq(cpu);
+
+ if (window_start)
+ rq->window_start = window_start;
+ rq->curr_runnable_sum = rq->prev_runnable_sum = 0;
+ rq->nt_curr_runnable_sum = rq->nt_prev_runnable_sum = 0;
+ reset_cpu_hmp_stats(cpu, 1);
+ }
+
+ if (sched_window_stats_policy != sysctl_sched_window_stats_policy) {
+ reason = POLICY_CHANGE;
+ old = sched_window_stats_policy;
+ new = sysctl_sched_window_stats_policy;
+ sched_window_stats_policy = sysctl_sched_window_stats_policy;
+ } else if (sched_ravg_hist_size != sysctl_sched_ravg_hist_size) {
+ reason = HIST_SIZE_CHANGE;
+ old = sched_ravg_hist_size;
+ new = sysctl_sched_ravg_hist_size;
+ sched_ravg_hist_size = sysctl_sched_ravg_hist_size;
+ } else if (sched_freq_aggregate !=
+ sysctl_sched_freq_aggregate) {
+ reason = FREQ_AGGREGATE_CHANGE;
+ old = sched_freq_aggregate;
+ new = sysctl_sched_freq_aggregate;
+ sched_freq_aggregate = sysctl_sched_freq_aggregate;
+ }
+
+ for_each_possible_cpu(cpu)
+ raw_spin_unlock(&cpu_rq(cpu)->lock);
+
+ read_unlock(&related_thread_group_lock);
+
+ local_irq_restore(flags);
+
+ trace_sched_reset_all_window_stats(window_start, window_size,
+ sched_ktime_clock() - start_ts, reason, old, new);
+}
+
+static inline void
+sync_window_start(struct rq *rq, struct group_cpu_time *cpu_time);
+
+void sched_get_cpus_busy(struct sched_load *busy,
+ const struct cpumask *query_cpus)
+{
+ unsigned long flags;
+ struct rq *rq;
+ const int cpus = cpumask_weight(query_cpus);
+ u64 load[cpus], group_load[cpus];
+ u64 nload[cpus], ngload[cpus];
+ u64 pload[cpus];
+ unsigned int cur_freq[cpus], max_freq[cpus];
+ int notifier_sent = 0;
+ int early_detection[cpus];
+ int cpu, i = 0;
+ unsigned int window_size;
+ u64 max_prev_sum = 0;
+ int max_busy_cpu = cpumask_first(query_cpus);
+ struct related_thread_group *grp;
+ u64 total_group_load = 0, total_ngload = 0;
+ bool aggregate_load = false;
+
+ if (unlikely(cpus == 0))
+ return;
+
+ /*
+ * This function could be called in timer context, and the
+ * current task may have been executing for a long time. Ensure
+ * that the window stats are current by doing an update.
+ */
+ read_lock(&related_thread_group_lock);
+
+ local_irq_save(flags);
+ for_each_cpu(cpu, query_cpus)
+ raw_spin_lock(&cpu_rq(cpu)->lock);
+
+ window_size = sched_ravg_window;
+
+ for_each_cpu(cpu, query_cpus) {
+ rq = cpu_rq(cpu);
+
+ update_task_ravg(rq->curr, rq, TASK_UPDATE, sched_ktime_clock(),
+ 0);
+ cur_freq[i] = cpu_cycles_to_freq(rq->cc.cycles, rq->cc.time);
+
+ load[i] = rq->old_busy_time = rq->prev_runnable_sum;
+ nload[i] = rq->nt_prev_runnable_sum;
+ pload[i] = rq->hmp_stats.pred_demands_sum;
+ rq->old_estimated_time = pload[i];
+
+ if (load[i] > max_prev_sum) {
+ max_prev_sum = load[i];
+ max_busy_cpu = cpu;
+ }
+
+ /*
+ * sched_get_cpus_busy() is called for all CPUs in a
+ * frequency domain. So the notifier_sent flag per
+ * cluster works even when a frequency domain spans
+ * more than 1 cluster.
+ */
+ if (rq->cluster->notifier_sent) {
+ notifier_sent = 1;
+ rq->cluster->notifier_sent = 0;
+ }
+ early_detection[i] = (rq->ed_task != NULL);
+ cur_freq[i] = cpu_cur_freq(cpu);
+ max_freq[i] = cpu_max_freq(cpu);
+ i++;
+ }
+
+ for_each_related_thread_group(grp) {
+ for_each_cpu(cpu, query_cpus) {
+ /* Protected by rq_lock */
+ struct group_cpu_time *cpu_time =
+ _group_cpu_time(grp, cpu);
+ sync_window_start(cpu_rq(cpu), cpu_time);
+ }
+ }
+
+ group_load_in_freq_domain(
+ &cpu_rq(max_busy_cpu)->freq_domain_cpumask,
+ &total_group_load, &total_ngload);
+ aggregate_load = !!(total_group_load > sched_freq_aggregate_threshold);
+
+ i = 0;
+ for_each_cpu(cpu, query_cpus) {
+ group_load[i] = 0;
+ ngload[i] = 0;
+
+ if (early_detection[i])
+ goto skip_early;
+
+ rq = cpu_rq(cpu);
+ if (aggregate_load) {
+ if (cpu == max_busy_cpu) {
+ group_load[i] = total_group_load;
+ ngload[i] = total_ngload;
+ }
+ } else {
+ _group_load_in_cpu(cpu, &group_load[i], &ngload[i]);
+ }
+
+ load[i] += group_load[i];
+ nload[i] += ngload[i];
+ /*
+ * Scale load in reference to cluster max_possible_freq.
+ *
+ * Note that scale_load_to_cpu() scales load in reference to
+ * the cluster max_freq.
+ */
+ load[i] = scale_load_to_cpu(load[i], cpu);
+ nload[i] = scale_load_to_cpu(nload[i], cpu);
+ pload[i] = scale_load_to_cpu(pload[i], cpu);
+skip_early:
+ i++;
+ }
+
+ for_each_cpu(cpu, query_cpus)
+ raw_spin_unlock(&(cpu_rq(cpu))->lock);
+ local_irq_restore(flags);
+
+ read_unlock(&related_thread_group_lock);
+
+ i = 0;
+ for_each_cpu(cpu, query_cpus) {
+ rq = cpu_rq(cpu);
+
+ if (early_detection[i]) {
+ busy[i].prev_load = div64_u64(sched_ravg_window,
+ NSEC_PER_USEC);
+ busy[i].new_task_load = 0;
+ goto exit_early;
+ }
+
+ /*
+ * When the load aggregation is controlled by
+ * sched_freq_aggregate_threshold, allow reporting loads
+ * greater than 100 @ Fcur to ramp up the frequency
+ * faster.
+ */
+ if (notifier_sent || (aggregate_load &&
+ sched_freq_aggregate_threshold)) {
+ load[i] = scale_load_to_freq(load[i], max_freq[i],
+ cpu_max_possible_freq(cpu));
+ nload[i] = scale_load_to_freq(nload[i], max_freq[i],
+ cpu_max_possible_freq(cpu));
+ } else {
+ load[i] = scale_load_to_freq(load[i], max_freq[i],
+ cur_freq[i]);
+ nload[i] = scale_load_to_freq(nload[i], max_freq[i],
+ cur_freq[i]);
+ if (load[i] > window_size)
+ load[i] = window_size;
+ if (nload[i] > window_size)
+ nload[i] = window_size;
+
+ load[i] = scale_load_to_freq(load[i], cur_freq[i],
+ cpu_max_possible_freq(cpu));
+ nload[i] = scale_load_to_freq(nload[i], cur_freq[i],
+ cpu_max_possible_freq(cpu));
+ }
+ pload[i] = scale_load_to_freq(pload[i], max_freq[i],
+ rq->cluster->max_possible_freq);
+
+ busy[i].prev_load = div64_u64(load[i], NSEC_PER_USEC);
+ busy[i].new_task_load = div64_u64(nload[i], NSEC_PER_USEC);
+ busy[i].predicted_load = div64_u64(pload[i], NSEC_PER_USEC);
+
+exit_early:
+ trace_sched_get_busy(cpu, busy[i].prev_load,
+ busy[i].new_task_load,
+ busy[i].predicted_load,
+ early_detection[i]);
+ i++;
+ }
+}
+
+void sched_set_io_is_busy(int val)
+{
+ sched_io_is_busy = val;
+}
+
+int sched_set_window(u64 window_start, unsigned int window_size)
+{
+ u64 now, cur_jiffies, jiffy_ktime_ns;
+ s64 ws;
+ unsigned long flags;
+
+ if (window_size * TICK_NSEC < MIN_SCHED_RAVG_WINDOW)
+ return -EINVAL;
+
+ mutex_lock(&policy_mutex);
+
+ /*
+ * Get a consistent view of ktime, jiffies, and the time
+ * since the last jiffy (based on last_jiffies_update).
+ */
+ local_irq_save(flags);
+ cur_jiffies = jiffy_to_ktime_ns(&now, &jiffy_ktime_ns);
+ local_irq_restore(flags);
+
+ /* translate window_start from jiffies to nanoseconds */
+ ws = (window_start - cur_jiffies); /* jiffy difference */
+ ws *= TICK_NSEC;
+ ws += jiffy_ktime_ns;
+
+ /*
+ * Roll back calculated window start so that it is in
+ * the past (window stats must have a current window).
+ */
+ while (ws > now)
+ ws -= (window_size * TICK_NSEC);
+
+ BUG_ON(sched_ktime_clock() < ws);
+
+ reset_all_window_stats(ws, window_size);
+
+ sched_update_freq_max_load(cpu_possible_mask);
+
+ mutex_unlock(&policy_mutex);
+
+ return 0;
+}
+
+void fixup_busy_time(struct task_struct *p, int new_cpu)
+{
+ struct rq *src_rq = task_rq(p);
+ struct rq *dest_rq = cpu_rq(new_cpu);
+ u64 wallclock;
+ u64 *src_curr_runnable_sum, *dst_curr_runnable_sum;
+ u64 *src_prev_runnable_sum, *dst_prev_runnable_sum;
+ u64 *src_nt_curr_runnable_sum, *dst_nt_curr_runnable_sum;
+ u64 *src_nt_prev_runnable_sum, *dst_nt_prev_runnable_sum;
+ int migrate_type;
+ struct migration_sum_data d;
+ bool new_task;
+ struct related_thread_group *grp;
+
+ if (!sched_enable_hmp || (!p->on_rq && p->state != TASK_WAKING))
+ return;
+
+ if (exiting_task(p)) {
+ clear_ed_task(p, src_rq);
+ return;
+ }
+
+ if (p->state == TASK_WAKING)
+ double_rq_lock(src_rq, dest_rq);
+
+ if (sched_disable_window_stats)
+ goto done;
+
+ wallclock = sched_ktime_clock();
+
+ update_task_ravg(task_rq(p)->curr, task_rq(p),
+ TASK_UPDATE,
+ wallclock, 0);
+ update_task_ravg(dest_rq->curr, dest_rq,
+ TASK_UPDATE, wallclock, 0);
+
+ update_task_ravg(p, task_rq(p), TASK_MIGRATE,
+ wallclock, 0);
+
+ update_task_cpu_cycles(p, new_cpu);
+
+ new_task = is_new_task(p);
+ /* Protected by rq_lock */
+ grp = p->grp;
+ if (grp && sched_freq_aggregate) {
+ struct group_cpu_time *cpu_time;
+
+ migrate_type = GROUP_TO_GROUP;
+ /* Protected by rq_lock */
+ cpu_time = _group_cpu_time(grp, cpu_of(src_rq));
+ d.src_rq = NULL;
+ d.src_cpu_time = cpu_time;
+ src_curr_runnable_sum = &cpu_time->curr_runnable_sum;
+ src_prev_runnable_sum = &cpu_time->prev_runnable_sum;
+ src_nt_curr_runnable_sum = &cpu_time->nt_curr_runnable_sum;
+ src_nt_prev_runnable_sum = &cpu_time->nt_prev_runnable_sum;
+
+ /* Protected by rq_lock */
+ cpu_time = _group_cpu_time(grp, cpu_of(dest_rq));
+ d.dst_rq = NULL;
+ d.dst_cpu_time = cpu_time;
+ dst_curr_runnable_sum = &cpu_time->curr_runnable_sum;
+ dst_prev_runnable_sum = &cpu_time->prev_runnable_sum;
+ dst_nt_curr_runnable_sum = &cpu_time->nt_curr_runnable_sum;
+ dst_nt_prev_runnable_sum = &cpu_time->nt_prev_runnable_sum;
+ sync_window_start(dest_rq, cpu_time);
+ } else {
+ migrate_type = RQ_TO_RQ;
+ d.src_rq = src_rq;
+ d.src_cpu_time = NULL;
+ d.dst_rq = dest_rq;
+ d.dst_cpu_time = NULL;
+ src_curr_runnable_sum = &src_rq->curr_runnable_sum;
+ src_prev_runnable_sum = &src_rq->prev_runnable_sum;
+ src_nt_curr_runnable_sum = &src_rq->nt_curr_runnable_sum;
+ src_nt_prev_runnable_sum = &src_rq->nt_prev_runnable_sum;
+
+ dst_curr_runnable_sum = &dest_rq->curr_runnable_sum;
+ dst_prev_runnable_sum = &dest_rq->prev_runnable_sum;
+ dst_nt_curr_runnable_sum = &dest_rq->nt_curr_runnable_sum;
+ dst_nt_prev_runnable_sum = &dest_rq->nt_prev_runnable_sum;
+ }
+
+ if (p->ravg.curr_window) {
+ *src_curr_runnable_sum -= p->ravg.curr_window;
+ *dst_curr_runnable_sum += p->ravg.curr_window;
+ if (new_task) {
+ *src_nt_curr_runnable_sum -= p->ravg.curr_window;
+ *dst_nt_curr_runnable_sum += p->ravg.curr_window;
+ }
+ }
+
+ if (p->ravg.prev_window) {
+ *src_prev_runnable_sum -= p->ravg.prev_window;
+ *dst_prev_runnable_sum += p->ravg.prev_window;
+ if (new_task) {
+ *src_nt_prev_runnable_sum -= p->ravg.prev_window;
+ *dst_nt_prev_runnable_sum += p->ravg.prev_window;
+ }
+ }
+
+ if (p == src_rq->ed_task) {
+ src_rq->ed_task = NULL;
+ if (!dest_rq->ed_task)
+ dest_rq->ed_task = p;
+ }
+
+ trace_sched_migration_update_sum(p, migrate_type, &d);
+ BUG_ON((s64)*src_prev_runnable_sum < 0);
+ BUG_ON((s64)*src_curr_runnable_sum < 0);
+ BUG_ON((s64)*src_nt_prev_runnable_sum < 0);
+ BUG_ON((s64)*src_nt_curr_runnable_sum < 0);
+
+done:
+ if (p->state == TASK_WAKING)
+ double_rq_unlock(src_rq, dest_rq);
+}
+
+#define sched_up_down_migrate_auto_update 1
+static void check_for_up_down_migrate_update(const struct cpumask *cpus)
+{
+ int i = cpumask_first(cpus);
+
+ if (!sched_up_down_migrate_auto_update)
+ return;
+
+ if (cpu_max_possible_capacity(i) == max_possible_capacity)
+ return;
+
+ if (cpu_max_possible_freq(i) == cpu_max_freq(i))
+ up_down_migrate_scale_factor = 1024;
+ else
+ up_down_migrate_scale_factor = (1024 *
+ cpu_max_possible_freq(i)) / cpu_max_freq(i);
+
+ update_up_down_migrate();
+}
+
+/* Return cluster which can offer required capacity for group */
+static struct sched_cluster *
+best_cluster(struct related_thread_group *grp, u64 total_demand)
+{
+ struct sched_cluster *cluster = NULL;
+
+ for_each_sched_cluster(cluster) {
+ if (group_will_fit(cluster, grp, total_demand))
+ return cluster;
+ }
+
+ return NULL;
+}
+
+static void _set_preferred_cluster(struct related_thread_group *grp)
+{
+ struct task_struct *p;
+ u64 combined_demand = 0;
+
+ if (!sysctl_sched_enable_colocation) {
+ grp->last_update = sched_ktime_clock();
+ grp->preferred_cluster = NULL;
+ return;
+ }
+
+ /*
+ * wakeup of two or more related tasks could race with each other and
+ * could result in multiple calls to _set_preferred_cluster being issued
+ * at same time. Avoid overhead in such cases of rechecking preferred
+ * cluster
+ */
+ if (sched_ktime_clock() - grp->last_update < sched_ravg_window / 10)
+ return;
+
+ list_for_each_entry(p, &grp->tasks, grp_list)
+ combined_demand += p->ravg.demand;
+
+ grp->preferred_cluster = best_cluster(grp, combined_demand);
+ grp->last_update = sched_ktime_clock();
+ trace_sched_set_preferred_cluster(grp, combined_demand);
+}
+
+void set_preferred_cluster(struct related_thread_group *grp)
+{
+ raw_spin_lock(&grp->lock);
+ _set_preferred_cluster(grp);
+ raw_spin_unlock(&grp->lock);
+}
+
+#define ADD_TASK 0
+#define REM_TASK 1
+
+static inline void free_group_cputime(struct related_thread_group *grp)
+{
+ free_percpu(grp->cpu_time);
+}
+
+static int alloc_group_cputime(struct related_thread_group *grp)
+{
+ int i;
+ struct group_cpu_time *cpu_time;
+ int cpu = raw_smp_processor_id();
+ struct rq *rq = cpu_rq(cpu);
+ u64 window_start = rq->window_start;
+
+ grp->cpu_time = alloc_percpu(struct group_cpu_time);
+ if (!grp->cpu_time)
+ return -ENOMEM;
+
+ for_each_possible_cpu(i) {
+ cpu_time = per_cpu_ptr(grp->cpu_time, i);
+ memset(cpu_time, 0, sizeof(struct group_cpu_time));
+ cpu_time->window_start = window_start;
+ }
+
+ return 0;
+}
+
+/*
+ * A group's window_start may be behind. When moving it forward, flip prev/curr
+ * counters. When moving forward > 1 window, prev counter is set to 0
+ */
+static inline void
+sync_window_start(struct rq *rq, struct group_cpu_time *cpu_time)
+{
+ u64 delta;
+ int nr_windows;
+ u64 curr_sum = cpu_time->curr_runnable_sum;
+ u64 nt_curr_sum = cpu_time->nt_curr_runnable_sum;
+
+ delta = rq->window_start - cpu_time->window_start;
+ if (!delta)
+ return;
+
+ nr_windows = div64_u64(delta, sched_ravg_window);
+ if (nr_windows > 1)
+ curr_sum = nt_curr_sum = 0;
+
+ cpu_time->prev_runnable_sum = curr_sum;
+ cpu_time->curr_runnable_sum = 0;
+
+ cpu_time->nt_prev_runnable_sum = nt_curr_sum;
+ cpu_time->nt_curr_runnable_sum = 0;
+
+ cpu_time->window_start = rq->window_start;
+}
+
+/*
+ * Task's cpu usage is accounted in:
+ * rq->curr/prev_runnable_sum, when its ->grp is NULL
+ * grp->cpu_time[cpu]->curr/prev_runnable_sum, when its ->grp is !NULL
+ *
+ * Transfer task's cpu usage between those counters when transitioning between
+ * groups
+ */
+static void transfer_busy_time(struct rq *rq, struct related_thread_group *grp,
+ struct task_struct *p, int event)
+{
+ u64 wallclock;
+ struct group_cpu_time *cpu_time;
+ u64 *src_curr_runnable_sum, *dst_curr_runnable_sum;
+ u64 *src_prev_runnable_sum, *dst_prev_runnable_sum;
+ u64 *src_nt_curr_runnable_sum, *dst_nt_curr_runnable_sum;
+ u64 *src_nt_prev_runnable_sum, *dst_nt_prev_runnable_sum;
+ struct migration_sum_data d;
+ int migrate_type;
+
+ if (!sched_freq_aggregate)
+ return;
+
+ wallclock = sched_ktime_clock();
+
+ update_task_ravg(rq->curr, rq, TASK_UPDATE, wallclock, 0);
+ update_task_ravg(p, rq, TASK_UPDATE, wallclock, 0);
+
+ /* cpu_time protected by related_thread_group_lock, grp->lock rq_lock */
+ cpu_time = _group_cpu_time(grp, cpu_of(rq));
+ if (event == ADD_TASK) {
+ sync_window_start(rq, cpu_time);
+ migrate_type = RQ_TO_GROUP;
+ d.src_rq = rq;
+ d.src_cpu_time = NULL;
+ d.dst_rq = NULL;
+ d.dst_cpu_time = cpu_time;
+ src_curr_runnable_sum = &rq->curr_runnable_sum;
+ dst_curr_runnable_sum = &cpu_time->curr_runnable_sum;
+ src_prev_runnable_sum = &rq->prev_runnable_sum;
+ dst_prev_runnable_sum = &cpu_time->prev_runnable_sum;
+
+ src_nt_curr_runnable_sum = &rq->nt_curr_runnable_sum;
+ dst_nt_curr_runnable_sum = &cpu_time->nt_curr_runnable_sum;
+ src_nt_prev_runnable_sum = &rq->nt_prev_runnable_sum;
+ dst_nt_prev_runnable_sum = &cpu_time->nt_prev_runnable_sum;
+ } else {
+ migrate_type = GROUP_TO_RQ;
+ d.src_rq = NULL;
+ d.src_cpu_time = cpu_time;
+ d.dst_rq = rq;
+ d.dst_cpu_time = NULL;
+
+ /*
+ * In case of REM_TASK, cpu_time->window_start would be
+ * uptodate, because of the update_task_ravg() we called
+ * above on the moving task. Hence no need for
+ * sync_window_start()
+ */
+ src_curr_runnable_sum = &cpu_time->curr_runnable_sum;
+ dst_curr_runnable_sum = &rq->curr_runnable_sum;
+ src_prev_runnable_sum = &cpu_time->prev_runnable_sum;
+ dst_prev_runnable_sum = &rq->prev_runnable_sum;
+
+ src_nt_curr_runnable_sum = &cpu_time->nt_curr_runnable_sum;
+ dst_nt_curr_runnable_sum = &rq->nt_curr_runnable_sum;
+ src_nt_prev_runnable_sum = &cpu_time->nt_prev_runnable_sum;
+ dst_nt_prev_runnable_sum = &rq->nt_prev_runnable_sum;
+ }
+
+ *src_curr_runnable_sum -= p->ravg.curr_window;
+ *dst_curr_runnable_sum += p->ravg.curr_window;
+
+ *src_prev_runnable_sum -= p->ravg.prev_window;
+ *dst_prev_runnable_sum += p->ravg.prev_window;
+
+ if (is_new_task(p)) {
+ *src_nt_curr_runnable_sum -= p->ravg.curr_window;
+ *dst_nt_curr_runnable_sum += p->ravg.curr_window;
+ *src_nt_prev_runnable_sum -= p->ravg.prev_window;
+ *dst_nt_prev_runnable_sum += p->ravg.prev_window;
+ }
+
+ trace_sched_migration_update_sum(p, migrate_type, &d);
+
+ BUG_ON((s64)*src_curr_runnable_sum < 0);
+ BUG_ON((s64)*src_prev_runnable_sum < 0);
+}
+
+static inline struct group_cpu_time *
+task_group_cpu_time(struct task_struct *p, int cpu)
+{
+ return _group_cpu_time(rcu_dereference(p->grp), cpu);
+}
+
+static inline struct group_cpu_time *
+_group_cpu_time(struct related_thread_group *grp, int cpu)
+{
+ return grp ? per_cpu_ptr(grp->cpu_time, cpu) : NULL;
+}
+
+struct related_thread_group *alloc_related_thread_group(int group_id)
+{
+ struct related_thread_group *grp;
+
+ grp = kzalloc(sizeof(*grp), GFP_KERNEL);
+ if (!grp)
+ return ERR_PTR(-ENOMEM);
+
+ if (alloc_group_cputime(grp)) {
+ kfree(grp);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ grp->id = group_id;
+ INIT_LIST_HEAD(&grp->tasks);
+ INIT_LIST_HEAD(&grp->list);
+ raw_spin_lock_init(&grp->lock);
+
+ return grp;
+}
+
+struct related_thread_group *lookup_related_thread_group(unsigned int group_id)
+{
+ struct related_thread_group *grp;
+
+ list_for_each_entry(grp, &related_thread_groups, list) {
+ if (grp->id == group_id)
+ return grp;
+ }
+
+ return NULL;
+}
+
+/* See comments before preferred_cluster() */
+static void free_related_thread_group(struct rcu_head *rcu)
+{
+ struct related_thread_group *grp = container_of(rcu, struct
+ related_thread_group, rcu);
+
+ free_group_cputime(grp);
+ kfree(grp);
+}
+
+static void remove_task_from_group(struct task_struct *p)
+{
+ struct related_thread_group *grp = p->grp;
+ struct rq *rq;
+ int empty_group = 1;
+
+ raw_spin_lock(&grp->lock);
+
+ rq = __task_rq_lock(p);
+ transfer_busy_time(rq, p->grp, p, REM_TASK);
+ list_del_init(&p->grp_list);
+ rcu_assign_pointer(p->grp, NULL);
+ __task_rq_unlock(rq);
+
+ if (!list_empty(&grp->tasks)) {
+ empty_group = 0;
+ _set_preferred_cluster(grp);
+ }
+
+ raw_spin_unlock(&grp->lock);
+
+ if (empty_group) {
+ list_del(&grp->list);
+ call_rcu(&grp->rcu, free_related_thread_group);
+ }
+}
+
+static int
+add_task_to_group(struct task_struct *p, struct related_thread_group *grp)
+{
+ struct rq *rq;
+
+ raw_spin_lock(&grp->lock);
+
+ /*
+ * Change p->grp under rq->lock. Will prevent races with read-side
+ * reference of p->grp in various hot-paths
+ */
+ rq = __task_rq_lock(p);
+ transfer_busy_time(rq, grp, p, ADD_TASK);
+ list_add(&p->grp_list, &grp->tasks);
+ rcu_assign_pointer(p->grp, grp);
+ __task_rq_unlock(rq);
+
+ _set_preferred_cluster(grp);
+
+ raw_spin_unlock(&grp->lock);
+
+ return 0;
+}
+
+void add_new_task_to_grp(struct task_struct *new)
+{
+ unsigned long flags;
+ struct related_thread_group *grp;
+ struct task_struct *parent;
+
+ if (!sysctl_sched_enable_thread_grouping)
+ return;
+
+ if (thread_group_leader(new))
+ return;
+
+ parent = new->group_leader;
+
+ /*
+ * The parent's pi_lock is required here to protect race
+ * against the parent task being removed from the
+ * group.
+ */
+ raw_spin_lock_irqsave(&parent->pi_lock, flags);
+
+ /* protected by pi_lock. */
+ grp = task_related_thread_group(parent);
+ if (!grp) {
+ raw_spin_unlock_irqrestore(&parent->pi_lock, flags);
+ return;
+ }
+ raw_spin_lock(&grp->lock);
+
+ rcu_assign_pointer(new->grp, grp);
+ list_add(&new->grp_list, &grp->tasks);
+
+ raw_spin_unlock(&grp->lock);
+ raw_spin_unlock_irqrestore(&parent->pi_lock, flags);
+}
+
+int sched_set_group_id(struct task_struct *p, unsigned int group_id)
+{
+ int rc = 0, destroy = 0;
+ unsigned long flags;
+ struct related_thread_group *grp = NULL, *new = NULL;
+
+redo:
+ raw_spin_lock_irqsave(&p->pi_lock, flags);
+
+ if ((current != p && p->flags & PF_EXITING) ||
+ (!p->grp && !group_id) ||
+ (p->grp && p->grp->id == group_id))
+ goto done;
+
+ write_lock(&related_thread_group_lock);
+
+ if (!group_id) {
+ remove_task_from_group(p);
+ write_unlock(&related_thread_group_lock);
+ goto done;
+ }
+
+ if (p->grp && p->grp->id != group_id)
+ remove_task_from_group(p);
+
+ grp = lookup_related_thread_group(group_id);
+ if (!grp && !new) {
+ /* New group */
+ write_unlock(&related_thread_group_lock);
+ raw_spin_unlock_irqrestore(&p->pi_lock, flags);
+ new = alloc_related_thread_group(group_id);
+ if (IS_ERR(new))
+ return -ENOMEM;
+ destroy = 1;
+ /* Rerun checks (like task exiting), since we dropped pi_lock */
+ goto redo;
+ } else if (!grp && new) {
+ /* New group - use object allocated before */
+ destroy = 0;
+ list_add(&new->list, &related_thread_groups);
+ grp = new;
+ }
+
+ BUG_ON(!grp);
+ rc = add_task_to_group(p, grp);
+ write_unlock(&related_thread_group_lock);
+done:
+ raw_spin_unlock_irqrestore(&p->pi_lock, flags);
+
+ if (new && destroy) {
+ free_group_cputime(new);
+ kfree(new);
+ }
+
+ return rc;
+}
+
+unsigned int sched_get_group_id(struct task_struct *p)
+{
+ unsigned int group_id;
+ struct related_thread_group *grp;
+
+ rcu_read_lock();
+ grp = task_related_thread_group(p);
+ group_id = grp ? grp->id : 0;
+ rcu_read_unlock();
+
+ return group_id;
+}
+
+static void update_cpu_cluster_capacity(const cpumask_t *cpus)
+{
+ int i;
+ struct sched_cluster *cluster;
+ struct cpumask cpumask;
+
+ cpumask_copy(&cpumask, cpus);
+ pre_big_task_count_change(cpu_possible_mask);
+
+ for_each_cpu(i, &cpumask) {
+ cluster = cpu_rq(i)->cluster;
+ cpumask_andnot(&cpumask, &cpumask, &cluster->cpus);
+
+ cluster->capacity = compute_capacity(cluster);
+ cluster->load_scale_factor = compute_load_scale_factor(cluster);
+
+ /* 'cpus' can contain cpumask more than one cluster */
+ check_for_up_down_migrate_update(&cluster->cpus);
+ }
+
+ __update_min_max_capacity();
+
+ post_big_task_count_change(cpu_possible_mask);
+}
+
+static DEFINE_SPINLOCK(cpu_freq_min_max_lock);
+void sched_update_cpu_freq_min_max(const cpumask_t *cpus, u32 fmin, u32 fmax)
+{
+ struct cpumask cpumask;
+ struct sched_cluster *cluster;
+ int i, update_capacity = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&cpu_freq_min_max_lock, flags);
+ cpumask_copy(&cpumask, cpus);
+ for_each_cpu(i, &cpumask) {
+ cluster = cpu_rq(i)->cluster;
+ cpumask_andnot(&cpumask, &cpumask, &cluster->cpus);
+
+ update_capacity += (cluster->max_mitigated_freq != fmax);
+ cluster->max_mitigated_freq = fmax;
+ }
+ spin_unlock_irqrestore(&cpu_freq_min_max_lock, flags);
+
+ if (update_capacity)
+ update_cpu_cluster_capacity(cpus);
+}
+
+static int cpufreq_notifier_policy(struct notifier_block *nb,
+ unsigned long val, void *data)
+{
+ struct cpufreq_policy *policy = (struct cpufreq_policy *)data;
+ struct sched_cluster *cluster = NULL;
+ struct cpumask policy_cluster = *policy->related_cpus;
+ unsigned int orig_max_freq = 0;
+ int i, j, update_capacity = 0;
+
+ if (val != CPUFREQ_NOTIFY && val != CPUFREQ_REMOVE_POLICY &&
+ val != CPUFREQ_CREATE_POLICY)
+ return 0;
+
+ if (val == CPUFREQ_REMOVE_POLICY || val == CPUFREQ_CREATE_POLICY) {
+ update_min_max_capacity();
+ return 0;
+ }
+
+ max_possible_freq = max(max_possible_freq, policy->cpuinfo.max_freq);
+ if (min_max_freq == 1)
+ min_max_freq = UINT_MAX;
+ min_max_freq = min(min_max_freq, policy->cpuinfo.max_freq);
+ BUG_ON(!min_max_freq);
+ BUG_ON(!policy->max);
+
+ for_each_cpu(i, &policy_cluster) {
+ cluster = cpu_rq(i)->cluster;
+ cpumask_andnot(&policy_cluster, &policy_cluster,
+ &cluster->cpus);
+
+ orig_max_freq = cluster->max_freq;
+ cluster->min_freq = policy->min;
+ cluster->max_freq = policy->max;
+ cluster->cur_freq = policy->cur;
+
+ if (!cluster->freq_init_done) {
+ mutex_lock(&cluster_lock);
+ for_each_cpu(j, &cluster->cpus)
+ cpumask_copy(&cpu_rq(j)->freq_domain_cpumask,
+ policy->related_cpus);
+ cluster->max_possible_freq = policy->cpuinfo.max_freq;
+ cluster->max_possible_capacity =
+ compute_max_possible_capacity(cluster);
+ cluster->freq_init_done = true;
+
+ sort_clusters();
+ update_all_clusters_stats();
+ mutex_unlock(&cluster_lock);
+ continue;
+ }
+
+ update_capacity += (orig_max_freq != cluster->max_freq);
+ }
+
+ if (update_capacity)
+ update_cpu_cluster_capacity(policy->related_cpus);
+
+ return 0;
+}
+
+static int cpufreq_notifier_trans(struct notifier_block *nb,
+ unsigned long val, void *data)
+{
+ struct cpufreq_freqs *freq = (struct cpufreq_freqs *)data;
+ unsigned int cpu = freq->cpu, new_freq = freq->new;
+ unsigned long flags;
+ struct sched_cluster *cluster;
+ struct cpumask policy_cpus = cpu_rq(cpu)->freq_domain_cpumask;
+ int i, j;
+
+ if (val != CPUFREQ_POSTCHANGE)
+ return 0;
+
+ BUG_ON(!new_freq);
+
+ if (cpu_cur_freq(cpu) == new_freq)
+ return 0;
+
+ for_each_cpu(i, &policy_cpus) {
+ cluster = cpu_rq(i)->cluster;
+
+ for_each_cpu(j, &cluster->cpus) {
+ struct rq *rq = cpu_rq(j);
+
+ raw_spin_lock_irqsave(&rq->lock, flags);
+ update_task_ravg(rq->curr, rq, TASK_UPDATE,
+ sched_ktime_clock(), 0);
+ raw_spin_unlock_irqrestore(&rq->lock, flags);
+ }
+
+ cluster->cur_freq = new_freq;
+ cpumask_andnot(&policy_cpus, &policy_cpus, &cluster->cpus);
+ }
+
+ return 0;
+}
+
+static int pwr_stats_ready_notifier(struct notifier_block *nb,
+ unsigned long cpu, void *data)
+{
+ cpumask_t mask = CPU_MASK_NONE;
+
+ cpumask_set_cpu(cpu, &mask);
+ sched_update_freq_max_load(&mask);
+
+ mutex_lock(&cluster_lock);
+ sort_clusters();
+ mutex_unlock(&cluster_lock);
+
+ return 0;
+}
+
+static struct notifier_block notifier_policy_block = {
+ .notifier_call = cpufreq_notifier_policy
+};
+
+static struct notifier_block notifier_trans_block = {
+ .notifier_call = cpufreq_notifier_trans
+};
+
+static struct notifier_block notifier_pwr_stats_ready = {
+ .notifier_call = pwr_stats_ready_notifier
+};
+
+int __weak register_cpu_pwr_stats_ready_notifier(struct notifier_block *nb)
+{
+ return -EINVAL;
+}
+
+static int register_sched_callback(void)
+{
+ int ret;
+
+ if (!sched_enable_hmp)
+ return 0;
+
+ ret = cpufreq_register_notifier(&notifier_policy_block,
+ CPUFREQ_POLICY_NOTIFIER);
+
+ if (!ret)
+ ret = cpufreq_register_notifier(&notifier_trans_block,
+ CPUFREQ_TRANSITION_NOTIFIER);
+
+ register_cpu_pwr_stats_ready_notifier(&notifier_pwr_stats_ready);
+
+ return 0;
+}
+
+/*
+ * cpufreq callbacks can be registered at core_initcall or later time.
+ * Any registration done prior to that is "forgotten" by cpufreq. See
+ * initialization of variable init_cpufreq_transition_notifier_list_called
+ * for further information.
+ */
+core_initcall(register_sched_callback);
+
+int update_preferred_cluster(struct related_thread_group *grp,
+ struct task_struct *p, u32 old_load)
+{
+ u32 new_load = task_load(p);
+
+ if (!grp)
+ return 0;
+
+ /*
+ * Update if task's load has changed significantly or a complete window
+ * has passed since we last updated preference
+ */
+ if (abs(new_load - old_load) > sched_ravg_window / 4 ||
+ sched_ktime_clock() - grp->last_update > sched_ravg_window)
+ return 1;
+
+ return 0;
+}
+
+bool early_detection_notify(struct rq *rq, u64 wallclock)
+{
+ struct task_struct *p;
+ int loop_max = 10;
+
+ if (!sched_boost() || !rq->cfs.h_nr_running)
+ return 0;
+
+ rq->ed_task = NULL;
+ list_for_each_entry(p, &rq->cfs_tasks, se.group_node) {
+ if (!loop_max)
+ break;
+
+ if (wallclock - p->last_wake_ts >= EARLY_DETECTION_DURATION) {
+ rq->ed_task = p;
+ return 1;
+ }
+
+ loop_max--;
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_CGROUP_SCHED
+u64 cpu_upmigrate_discourage_read_u64(struct cgroup_subsys_state *css,
+ struct cftype *cft)
+{
+ struct task_group *tg = css_tg(css);
+
+ return tg->upmigrate_discouraged;
+}
+
+int cpu_upmigrate_discourage_write_u64(struct cgroup_subsys_state *css,
+ struct cftype *cft, u64 upmigrate_discourage)
+{
+ struct task_group *tg = css_tg(css);
+ int discourage = upmigrate_discourage > 0;
+
+ if (tg->upmigrate_discouraged == discourage)
+ return 0;
+
+ /*
+ * Revisit big-task classification for tasks of this cgroup. It would
+ * have been efficient to walk tasks of just this cgroup in running
+ * state, but we don't have easy means to do that. Walk all tasks in
+ * running state on all cpus instead and re-visit their big task
+ * classification.
+ */
+ get_online_cpus();
+ pre_big_task_count_change(cpu_online_mask);
+
+ tg->upmigrate_discouraged = discourage;
+
+ post_big_task_count_change(cpu_online_mask);
+ put_online_cpus();
+
+ return 0;
+}
+#endif /* CONFIG_CGROUP_SCHED */
diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c
index 4a2ef5a02fd3..2489140a7c51 100644
--- a/kernel/sched/idle.c
+++ b/kernel/sched/idle.c
@@ -219,6 +219,7 @@ static void cpu_idle_loop(void)
*/
__current_set_polling();
+ quiet_vmstat();
tick_nohz_idle_enter();
while (!need_resched()) {
diff --git a/kernel/sched/idle_task.c b/kernel/sched/idle_task.c
index c4ae0f1fdf9b..36c6634236fb 100644
--- a/kernel/sched/idle_task.c
+++ b/kernel/sched/idle_task.c
@@ -79,6 +79,26 @@ static void update_curr_idle(struct rq *rq)
{
}
+#ifdef CONFIG_SCHED_HMP
+
+static void
+inc_hmp_sched_stats_idle(struct rq *rq, struct task_struct *p)
+{
+}
+
+static void
+dec_hmp_sched_stats_idle(struct rq *rq, struct task_struct *p)
+{
+}
+
+static void
+fixup_hmp_sched_stats_idle(struct rq *rq, struct task_struct *p,
+ u32 new_task_load, u32 new_pred_demand)
+{
+}
+
+#endif
+
/*
* Simple, special scheduling class for the per-CPU idle tasks:
*/
@@ -107,4 +127,9 @@ const struct sched_class idle_sched_class = {
.prio_changed = prio_changed_idle,
.switched_to = switched_to_idle,
.update_curr = update_curr_idle,
+#ifdef CONFIG_SCHED_HMP
+ .inc_hmp_sched_stats = inc_hmp_sched_stats_idle,
+ .dec_hmp_sched_stats = dec_hmp_sched_stats_idle,
+ .fixup_hmp_sched_stats = fixup_hmp_sched_stats_idle,
+#endif
};
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index 8ec86abe0ea1..cfec881491ef 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -7,6 +7,7 @@
#include <linux/slab.h>
#include <linux/irq_work.h>
+#include <trace/events/sched.h>
int sched_rr_timeslice = RR_TIMESLICE;
@@ -889,6 +890,51 @@ static inline int rt_se_prio(struct sched_rt_entity *rt_se)
return rt_task_of(rt_se)->prio;
}
+static void dump_throttled_rt_tasks(struct rt_rq *rt_rq)
+{
+ struct rt_prio_array *array = &rt_rq->active;
+ struct sched_rt_entity *rt_se;
+ char buf[500];
+ char *pos = buf;
+ char *end = buf + sizeof(buf);
+ int idx;
+
+ pos += snprintf(pos, sizeof(buf),
+ "sched: RT throttling activated for rt_rq %p (cpu %d)\n",
+ rt_rq, cpu_of(rq_of_rt_rq(rt_rq)));
+
+ if (bitmap_empty(array->bitmap, MAX_RT_PRIO))
+ goto out;
+
+ pos += snprintf(pos, end - pos, "potential CPU hogs:\n");
+ idx = sched_find_first_bit(array->bitmap);
+ while (idx < MAX_RT_PRIO) {
+ list_for_each_entry(rt_se, array->queue + idx, run_list) {
+ struct task_struct *p;
+
+ if (!rt_entity_is_task(rt_se))
+ continue;
+
+ p = rt_task_of(rt_se);
+ if (pos < end)
+ pos += snprintf(pos, end - pos, "\t%s (%d)\n",
+ p->comm, p->pid);
+ }
+ idx = find_next_bit(array->bitmap, MAX_RT_PRIO, idx + 1);
+ }
+out:
+#ifdef CONFIG_PANIC_ON_RT_THROTTLING
+ /*
+ * Use pr_err() in the BUG() case since printk_sched() will
+ * not get flushed and deadlock is not a concern.
+ */
+ pr_err("%s", buf);
+ BUG();
+#else
+ printk_deferred("%s", buf);
+#endif
+}
+
static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq)
{
u64 runtime = sched_rt_runtime(rt_rq);
@@ -912,8 +958,14 @@ static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq)
* but accrue some time due to boosting.
*/
if (likely(rt_b->rt_runtime)) {
+ static bool once = false;
+
rt_rq->rt_throttled = 1;
- printk_deferred_once("sched: RT throttling activated\n");
+
+ if (!once) {
+ once = true;
+ dump_throttled_rt_tasks(rt_rq);
+ }
} else {
/*
* In case we did anyway, make it go away,
@@ -1130,6 +1182,41 @@ void dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) {}
#endif /* CONFIG_RT_GROUP_SCHED */
+#ifdef CONFIG_SCHED_HMP
+
+static void
+inc_hmp_sched_stats_rt(struct rq *rq, struct task_struct *p)
+{
+ inc_cumulative_runnable_avg(&rq->hmp_stats, p);
+}
+
+static void
+dec_hmp_sched_stats_rt(struct rq *rq, struct task_struct *p)
+{
+ dec_cumulative_runnable_avg(&rq->hmp_stats, p);
+}
+
+static void
+fixup_hmp_sched_stats_rt(struct rq *rq, struct task_struct *p,
+ u32 new_task_load, u32 new_pred_demand)
+{
+ s64 task_load_delta = (s64)new_task_load - task_load(p);
+ s64 pred_demand_delta = PRED_DEMAND_DELTA;
+
+ fixup_cumulative_runnable_avg(&rq->hmp_stats, p, task_load_delta,
+ pred_demand_delta);
+}
+
+#else /* CONFIG_SCHED_HMP */
+
+static inline void
+inc_hmp_sched_stats_rt(struct rq *rq, struct task_struct *p) { }
+
+static inline void
+dec_hmp_sched_stats_rt(struct rq *rq, struct task_struct *p) { }
+
+#endif /* CONFIG_SCHED_HMP */
+
static inline
unsigned int rt_se_nr_running(struct sched_rt_entity *rt_se)
{
@@ -1261,6 +1348,7 @@ enqueue_task_rt(struct rq *rq, struct task_struct *p, int flags)
rt_se->timeout = 0;
enqueue_rt_entity(rt_se, flags & ENQUEUE_HEAD);
+ inc_hmp_sched_stats_rt(rq, p);
if (!task_current(rq, p) && p->nr_cpus_allowed > 1)
enqueue_pushable_task(rq, p);
@@ -1272,6 +1360,7 @@ static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags)
update_curr_rt(rq);
dequeue_rt_entity(rt_se);
+ dec_hmp_sched_stats_rt(rq, p);
dequeue_pushable_task(rq, p);
}
@@ -1314,11 +1403,28 @@ static void yield_task_rt(struct rq *rq)
static int find_lowest_rq(struct task_struct *task);
static int
+select_task_rq_rt_hmp(struct task_struct *p, int cpu, int sd_flag, int flags)
+{
+ int target;
+
+ rcu_read_lock();
+ target = find_lowest_rq(p);
+ if (target != -1)
+ cpu = target;
+ rcu_read_unlock();
+
+ return cpu;
+}
+
+static int
select_task_rq_rt(struct task_struct *p, int cpu, int sd_flag, int flags)
{
struct task_struct *curr;
struct rq *rq;
+ if (sched_enable_hmp)
+ return select_task_rq_rt_hmp(p, cpu, sd_flag, flags);
+
/* For anything but wake ups, just return the task_cpu */
if (sd_flag != SD_BALANCE_WAKE && sd_flag != SD_BALANCE_FORK)
goto out;
@@ -1556,6 +1662,74 @@ static struct task_struct *pick_highest_pushable_task(struct rq *rq, int cpu)
static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask);
+#ifdef CONFIG_SCHED_HMP
+
+static int find_lowest_rq_hmp(struct task_struct *task)
+{
+ struct cpumask *lowest_mask = *this_cpu_ptr(&local_cpu_mask);
+ struct cpumask candidate_mask = CPU_MASK_NONE;
+ struct sched_cluster *cluster;
+ int best_cpu = -1;
+ int prev_cpu = task_cpu(task);
+ u64 cpu_load, min_load = ULLONG_MAX;
+ int i;
+ int restrict_cluster = sched_boost() ? 0 :
+ sysctl_sched_restrict_cluster_spill;
+
+ /* Make sure the mask is initialized first */
+ if (unlikely(!lowest_mask))
+ return best_cpu;
+
+ if (task->nr_cpus_allowed == 1)
+ return best_cpu; /* No other targets possible */
+
+ if (!cpupri_find(&task_rq(task)->rd->cpupri, task, lowest_mask))
+ return best_cpu; /* No targets found */
+
+ /*
+ * At this point we have built a mask of cpus representing the
+ * lowest priority tasks in the system. Now we want to elect
+ * the best one based on our affinity and topology.
+ */
+
+ for_each_sched_cluster(cluster) {
+ cpumask_and(&candidate_mask, &cluster->cpus, lowest_mask);
+
+ if (cpumask_empty(&candidate_mask))
+ continue;
+
+ for_each_cpu(i, &candidate_mask) {
+ if (sched_cpu_high_irqload(i))
+ continue;
+
+ cpu_load = cpu_rq(i)->hmp_stats.cumulative_runnable_avg;
+ if (!restrict_cluster)
+ cpu_load = scale_load_to_cpu(cpu_load, i);
+
+ if (cpu_load < min_load ||
+ (cpu_load == min_load &&
+ (i == prev_cpu || (best_cpu != prev_cpu &&
+ cpus_share_cache(prev_cpu, i))))) {
+ min_load = cpu_load;
+ best_cpu = i;
+ }
+ }
+ if (restrict_cluster && best_cpu != -1)
+ break;
+ }
+
+ return best_cpu;
+}
+
+#else /* CONFIG_SCHED_HMP */
+
+static int find_lowest_rq_hmp(struct task_struct *task)
+{
+ return -1;
+}
+
+#endif /* CONFIG_SCHED_HMP */
+
static int find_lowest_rq(struct task_struct *task)
{
struct sched_domain *sd;
@@ -1563,6 +1737,9 @@ static int find_lowest_rq(struct task_struct *task)
int this_cpu = smp_processor_id();
int cpu = task_cpu(task);
+ if (sched_enable_hmp)
+ return find_lowest_rq_hmp(task);
+
/* Make sure the mask is initialized first */
if (unlikely(!lowest_mask))
return -1;
@@ -1780,7 +1957,9 @@ retry:
}
deactivate_task(rq, next_task, 0);
+ next_task->on_rq = TASK_ON_RQ_MIGRATING;
set_task_cpu(next_task, lowest_rq->cpu);
+ next_task->on_rq = TASK_ON_RQ_QUEUED;
activate_task(lowest_rq, next_task, 0);
ret = 1;
@@ -2034,7 +2213,9 @@ static void pull_rt_task(struct rq *this_rq)
resched = true;
deactivate_task(src_rq, p, 0);
+ p->on_rq = TASK_ON_RQ_MIGRATING;
set_task_cpu(p, this_cpu);
+ p->on_rq = TASK_ON_RQ_QUEUED;
activate_task(this_rq, p, 0);
/*
* We continue with the search, just in
@@ -2116,6 +2297,7 @@ void __init init_sched_rt_class(void)
GFP_KERNEL, cpu_to_node(i));
}
}
+
#endif /* CONFIG_SMP */
/*
@@ -2290,6 +2472,11 @@ const struct sched_class rt_sched_class = {
.switched_to = switched_to_rt,
.update_curr = update_curr_rt,
+#ifdef CONFIG_SCHED_HMP
+ .inc_hmp_sched_stats = inc_hmp_sched_stats_rt,
+ .dec_hmp_sched_stats = dec_hmp_sched_stats_rt,
+ .fixup_hmp_sched_stats = fixup_hmp_sched_stats_rt,
+#endif
};
#ifdef CONFIG_SCHED_DEBUG
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 0517abd7dd73..ec7721112b05 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -27,6 +27,7 @@ extern unsigned long calc_load_update;
extern atomic_long_t calc_load_tasks;
extern void calc_global_load_tick(struct rq *this_rq);
+
extern long calc_load_fold_active(struct rq *this_rq);
#ifdef CONFIG_SMP
@@ -240,6 +241,10 @@ struct cfs_bandwidth {
struct task_group {
struct cgroup_subsys_state css;
+#ifdef CONFIG_SCHED_HMP
+ bool upmigrate_discouraged;
+#endif
+
#ifdef CONFIG_FAIR_GROUP_SCHED
/* schedulable entities of this group on each cpu */
struct sched_entity **se;
@@ -337,12 +342,82 @@ extern void sched_move_task(struct task_struct *tsk);
extern int sched_group_set_shares(struct task_group *tg, unsigned long shares);
#endif
+extern struct task_group *css_tg(struct cgroup_subsys_state *css);
#else /* CONFIG_CGROUP_SCHED */
struct cfs_bandwidth { };
#endif /* CONFIG_CGROUP_SCHED */
+#ifdef CONFIG_SCHED_HMP
+
+struct hmp_sched_stats {
+ int nr_big_tasks;
+ u64 cumulative_runnable_avg;
+ u64 pred_demands_sum;
+};
+
+struct sched_cluster {
+ struct list_head list;
+ struct cpumask cpus;
+ int id;
+ int max_power_cost;
+ int min_power_cost;
+ int max_possible_capacity;
+ int capacity;
+ int efficiency; /* Differentiate cpus with different IPC capability */
+ int load_scale_factor;
+ unsigned int exec_scale_factor;
+ /*
+ * max_freq = user maximum
+ * max_mitigated_freq = thermal defined maximum
+ * max_possible_freq = maximum supported by hardware
+ */
+ unsigned int cur_freq, max_freq, max_mitigated_freq, min_freq;
+ unsigned int max_possible_freq;
+ bool freq_init_done;
+ int dstate, dstate_wakeup_latency, dstate_wakeup_energy;
+ unsigned int static_cluster_pwr_cost;
+ int notifier_sent;
+};
+
+extern unsigned long all_cluster_ids[];
+
+static inline int cluster_first_cpu(struct sched_cluster *cluster)
+{
+ return cpumask_first(&cluster->cpus);
+}
+
+struct related_thread_group {
+ int id;
+ raw_spinlock_t lock;
+ struct list_head tasks;
+ struct list_head list;
+ struct sched_cluster *preferred_cluster;
+ struct rcu_head rcu;
+ u64 last_update;
+ struct group_cpu_time __percpu *cpu_time; /* one per cluster */
+};
+
+struct migration_sum_data {
+ struct rq *src_rq, *dst_rq;
+ struct group_cpu_time *src_cpu_time, *dst_cpu_time;
+};
+
+extern struct list_head cluster_head;
+extern int num_clusters;
+extern struct sched_cluster *sched_cluster[NR_CPUS];
+
+struct cpu_cycle {
+ u64 cycles;
+ u64 time;
+};
+
+#define for_each_sched_cluster(cluster) \
+ list_for_each_entry_rcu(cluster, &cluster_head, list)
+
+#endif /* CONFIG_SCHED_HMP */
+
/* CFS-related fields in a runqueue */
struct cfs_rq {
struct load_weight load;
@@ -411,6 +486,11 @@ struct cfs_rq {
struct task_group *tg; /* group that "owns" this runqueue */
#ifdef CONFIG_CFS_BANDWIDTH
+
+#ifdef CONFIG_SCHED_HMP
+ struct hmp_sched_stats hmp_stats;
+#endif
+
int runtime_enabled;
u64 runtime_expires;
s64 runtime_remaining;
@@ -624,6 +704,7 @@ struct rq {
/* For active balancing */
int active_balance;
int push_cpu;
+ struct task_struct *push_task;
struct cpu_stop_work active_balance_work;
/* cpu of this runqueue: */
int cpu;
@@ -640,6 +721,29 @@ struct rq {
u64 max_idle_balance_cost;
#endif
+#ifdef CONFIG_SCHED_HMP
+ struct sched_cluster *cluster;
+ struct cpumask freq_domain_cpumask;
+ struct hmp_sched_stats hmp_stats;
+
+ int cstate, wakeup_latency, wakeup_energy;
+ u64 window_start;
+ unsigned long hmp_flags;
+
+ u64 cur_irqload;
+ u64 avg_irqload;
+ u64 irqload_ts;
+ unsigned int static_cpu_pwr_cost;
+ struct task_struct *ed_task;
+ struct cpu_cycle cc;
+ u64 old_busy_time, old_busy_time_group;
+ u64 old_estimated_time;
+ u64 curr_runnable_sum;
+ u64 prev_runnable_sum;
+ u64 nt_curr_runnable_sum;
+ u64 nt_prev_runnable_sum;
+#endif
+
#ifdef CONFIG_IRQ_TIME_ACCOUNTING
u64 prev_irq_time;
#endif
@@ -905,6 +1009,598 @@ static inline void sched_ttwu_pending(void) { }
#include "stats.h"
#include "auto_group.h"
+#ifdef CONFIG_SCHED_HMP
+
+#define WINDOW_STATS_RECENT 0
+#define WINDOW_STATS_MAX 1
+#define WINDOW_STATS_MAX_RECENT_AVG 2
+#define WINDOW_STATS_AVG 3
+#define WINDOW_STATS_INVALID_POLICY 4
+
+#define MAJOR_TASK_PCT 85
+#define SCHED_UPMIGRATE_MIN_NICE 15
+#define EXITING_TASK_MARKER 0xdeaddead
+
+#define UP_MIGRATION 1
+#define DOWN_MIGRATION 2
+#define IRQLOAD_MIGRATION 3
+
+extern struct mutex policy_mutex;
+extern unsigned int sched_ravg_window;
+extern unsigned int sched_disable_window_stats;
+extern unsigned int sched_enable_hmp;
+extern unsigned int max_possible_freq;
+extern unsigned int min_max_freq;
+extern unsigned int pct_task_load(struct task_struct *p);
+extern unsigned int max_possible_efficiency;
+extern unsigned int min_possible_efficiency;
+extern unsigned int max_capacity;
+extern unsigned int min_capacity;
+extern unsigned int max_load_scale_factor;
+extern unsigned int max_possible_capacity;
+extern unsigned int min_max_possible_capacity;
+extern unsigned int sched_upmigrate;
+extern unsigned int sched_downmigrate;
+extern unsigned int sched_init_task_load_windows;
+extern unsigned int up_down_migrate_scale_factor;
+extern unsigned int sysctl_sched_restrict_cluster_spill;
+extern unsigned int sched_pred_alert_load;
+extern unsigned int sched_major_task_runtime;
+extern struct sched_cluster init_cluster;
+extern unsigned int __read_mostly sched_short_sleep_task_threshold;
+extern unsigned int __read_mostly sched_long_cpu_selection_threshold;
+extern unsigned int __read_mostly sched_big_waker_task_load;
+extern unsigned int __read_mostly sched_small_wakee_task_load;
+extern unsigned int __read_mostly sched_spill_load;
+extern unsigned int __read_mostly sched_upmigrate;
+extern unsigned int __read_mostly sched_downmigrate;
+extern unsigned int __read_mostly sysctl_sched_spill_nr_run;
+
+extern void init_new_task_load(struct task_struct *p);
+extern u64 sched_ktime_clock(void);
+extern int got_boost_kick(void);
+extern int register_cpu_cycle_counter_cb(struct cpu_cycle_counter_cb *cb);
+extern void update_task_ravg(struct task_struct *p, struct rq *rq, int event,
+ u64 wallclock, u64 irqtime);
+extern bool early_detection_notify(struct rq *rq, u64 wallclock);
+extern void clear_ed_task(struct task_struct *p, struct rq *rq);
+extern void fixup_busy_time(struct task_struct *p, int new_cpu);
+extern void clear_boost_kick(int cpu);
+extern void clear_hmp_request(int cpu);
+extern void mark_task_starting(struct task_struct *p);
+extern void set_window_start(struct rq *rq);
+extern void migrate_sync_cpu(int cpu);
+extern void update_cluster_topology(void);
+extern void set_task_last_wake(struct task_struct *p, u64 wallclock);
+extern void set_task_last_switch_out(struct task_struct *p, u64 wallclock);
+extern void init_clusters(void);
+extern int __init set_sched_enable_hmp(char *str);
+extern void reset_cpu_hmp_stats(int cpu, int reset_cra);
+extern unsigned int max_task_load(void);
+extern void sched_account_irqtime(int cpu, struct task_struct *curr,
+ u64 delta, u64 wallclock);
+extern void sched_account_irqstart(int cpu, struct task_struct *curr,
+ u64 wallclock);
+extern unsigned int cpu_temp(int cpu);
+extern unsigned int nr_eligible_big_tasks(int cpu);
+extern void update_up_down_migrate(void);
+extern int update_preferred_cluster(struct related_thread_group *grp,
+ struct task_struct *p, u32 old_load);
+extern void set_preferred_cluster(struct related_thread_group *grp);
+extern void add_new_task_to_grp(struct task_struct *new);
+
+enum sched_boost_type {
+ SCHED_BOOST_NONE,
+ SCHED_BOOST_ON_BIG,
+ SCHED_BOOST_ON_ALL,
+};
+
+static inline struct sched_cluster *cpu_cluster(int cpu)
+{
+ return cpu_rq(cpu)->cluster;
+}
+
+static inline int cpu_capacity(int cpu)
+{
+ return cpu_rq(cpu)->cluster->capacity;
+}
+
+static inline int cpu_max_possible_capacity(int cpu)
+{
+ return cpu_rq(cpu)->cluster->max_possible_capacity;
+}
+
+static inline int cpu_load_scale_factor(int cpu)
+{
+ return cpu_rq(cpu)->cluster->load_scale_factor;
+}
+
+static inline int cpu_efficiency(int cpu)
+{
+ return cpu_rq(cpu)->cluster->efficiency;
+}
+
+static inline unsigned int cpu_cur_freq(int cpu)
+{
+ return cpu_rq(cpu)->cluster->cur_freq;
+}
+
+static inline unsigned int cpu_min_freq(int cpu)
+{
+ return cpu_rq(cpu)->cluster->min_freq;
+}
+
+static inline unsigned int cluster_max_freq(struct sched_cluster *cluster)
+{
+ /*
+ * Governor and thermal driver don't know the other party's mitigation
+ * voting. So struct cluster saves both and return min() for current
+ * cluster fmax.
+ */
+ return min(cluster->max_mitigated_freq, cluster->max_freq);
+}
+
+static inline unsigned int cpu_max_freq(int cpu)
+{
+ return cluster_max_freq(cpu_rq(cpu)->cluster);
+}
+
+static inline unsigned int cpu_max_possible_freq(int cpu)
+{
+ return cpu_rq(cpu)->cluster->max_possible_freq;
+}
+
+static inline int same_cluster(int src_cpu, int dst_cpu)
+{
+ return cpu_rq(src_cpu)->cluster == cpu_rq(dst_cpu)->cluster;
+}
+
+static inline int cpu_max_power_cost(int cpu)
+{
+ return cpu_rq(cpu)->cluster->max_power_cost;
+}
+
+static inline u32 cpu_cycles_to_freq(u64 cycles, u32 period)
+{
+ return div64_u64(cycles, period);
+}
+
+static inline bool hmp_capable(void)
+{
+ return max_possible_capacity != min_max_possible_capacity;
+}
+
+/*
+ * 'load' is in reference to "best cpu" at its best frequency.
+ * Scale that in reference to a given cpu, accounting for how bad it is
+ * in reference to "best cpu".
+ */
+static inline u64 scale_load_to_cpu(u64 task_load, int cpu)
+{
+ u64 lsf = cpu_load_scale_factor(cpu);
+
+ if (lsf != 1024) {
+ task_load *= lsf;
+ task_load /= 1024;
+ }
+
+ return task_load;
+}
+
+static inline unsigned int task_load(struct task_struct *p)
+{
+ return p->ravg.demand;
+}
+
+static inline void
+inc_cumulative_runnable_avg(struct hmp_sched_stats *stats,
+ struct task_struct *p)
+{
+ u32 task_load;
+
+ if (!sched_enable_hmp || sched_disable_window_stats)
+ return;
+
+ task_load = sched_disable_window_stats ? 0 : p->ravg.demand;
+
+ stats->cumulative_runnable_avg += task_load;
+ stats->pred_demands_sum += p->ravg.pred_demand;
+}
+
+static inline void
+dec_cumulative_runnable_avg(struct hmp_sched_stats *stats,
+ struct task_struct *p)
+{
+ u32 task_load;
+
+ if (!sched_enable_hmp || sched_disable_window_stats)
+ return;
+
+ task_load = sched_disable_window_stats ? 0 : p->ravg.demand;
+
+ stats->cumulative_runnable_avg -= task_load;
+
+ BUG_ON((s64)stats->cumulative_runnable_avg < 0);
+
+ stats->pred_demands_sum -= p->ravg.pred_demand;
+ BUG_ON((s64)stats->pred_demands_sum < 0);
+}
+
+static inline void
+fixup_cumulative_runnable_avg(struct hmp_sched_stats *stats,
+ struct task_struct *p, s64 task_load_delta,
+ s64 pred_demand_delta)
+{
+ if (!sched_enable_hmp || sched_disable_window_stats)
+ return;
+
+ stats->cumulative_runnable_avg += task_load_delta;
+ BUG_ON((s64)stats->cumulative_runnable_avg < 0);
+
+ stats->pred_demands_sum += pred_demand_delta;
+ BUG_ON((s64)stats->pred_demands_sum < 0);
+}
+
+#define pct_to_real(tunable) \
+ (div64_u64((u64)tunable * (u64)max_task_load(), 100))
+
+#define real_to_pct(tunable) \
+ (div64_u64((u64)tunable * (u64)100, (u64)max_task_load()))
+
+#define SCHED_HIGH_IRQ_TIMEOUT 3
+static inline u64 sched_irqload(int cpu)
+{
+ struct rq *rq = cpu_rq(cpu);
+ s64 delta;
+
+ delta = get_jiffies_64() - rq->irqload_ts;
+ /*
+ * Current context can be preempted by irq and rq->irqload_ts can be
+ * updated by irq context so that delta can be negative.
+ * But this is okay and we can safely return as this means there
+ * was recent irq occurrence.
+ */
+
+ if (delta < SCHED_HIGH_IRQ_TIMEOUT)
+ return rq->avg_irqload;
+ else
+ return 0;
+}
+
+static inline int sched_cpu_high_irqload(int cpu)
+{
+ return sched_irqload(cpu) >= sysctl_sched_cpu_high_irqload;
+}
+
+static inline bool task_in_related_thread_group(struct task_struct *p)
+{
+ return !!(rcu_access_pointer(p->grp) != NULL);
+}
+
+static inline
+struct related_thread_group *task_related_thread_group(struct task_struct *p)
+{
+ return rcu_dereference(p->grp);
+}
+
+#define PRED_DEMAND_DELTA ((s64)new_pred_demand - p->ravg.pred_demand)
+
+extern void
+check_for_freq_change(struct rq *rq, bool check_pred, bool check_groups);
+
+extern void notify_migration(int src_cpu, int dest_cpu,
+ bool src_cpu_dead, struct task_struct *p);
+
+struct group_cpu_time {
+ u64 curr_runnable_sum;
+ u64 prev_runnable_sum;
+ u64 nt_curr_runnable_sum;
+ u64 nt_prev_runnable_sum;
+ u64 window_start;
+};
+
+/* Is frequency of two cpus synchronized with each other? */
+static inline int same_freq_domain(int src_cpu, int dst_cpu)
+{
+ struct rq *rq = cpu_rq(src_cpu);
+
+ if (src_cpu == dst_cpu)
+ return 1;
+
+ return cpumask_test_cpu(dst_cpu, &rq->freq_domain_cpumask);
+}
+
+#define BOOST_KICK 0
+#define CPU_RESERVED 1
+
+static inline int is_reserved(int cpu)
+{
+ struct rq *rq = cpu_rq(cpu);
+
+ return test_bit(CPU_RESERVED, &rq->hmp_flags);
+}
+
+static inline int mark_reserved(int cpu)
+{
+ struct rq *rq = cpu_rq(cpu);
+
+ /* Name boost_flags as hmp_flags? */
+ return test_and_set_bit(CPU_RESERVED, &rq->hmp_flags);
+}
+
+static inline void clear_reserved(int cpu)
+{
+ struct rq *rq = cpu_rq(cpu);
+
+ clear_bit(CPU_RESERVED, &rq->hmp_flags);
+}
+
+static inline u64 cpu_cravg_sync(int cpu, int sync)
+{
+ struct rq *rq = cpu_rq(cpu);
+ u64 load;
+
+ load = rq->hmp_stats.cumulative_runnable_avg;
+
+ /*
+ * If load is being checked in a sync wakeup environment,
+ * we may want to discount the load of the currently running
+ * task.
+ */
+ if (sync && cpu == smp_processor_id()) {
+ if (load > rq->curr->ravg.demand)
+ load -= rq->curr->ravg.demand;
+ else
+ load = 0;
+ }
+
+ return load;
+}
+
+extern void check_for_migration(struct rq *rq, struct task_struct *p);
+extern void pre_big_task_count_change(const struct cpumask *cpus);
+extern void post_big_task_count_change(const struct cpumask *cpus);
+extern void set_hmp_defaults(void);
+extern int power_delta_exceeded(unsigned int cpu_cost, unsigned int base_cost);
+extern unsigned int power_cost(int cpu, u64 demand);
+extern void reset_all_window_stats(u64 window_start, unsigned int window_size);
+extern void boost_kick(int cpu);
+extern int sched_boost(void);
+extern int task_load_will_fit(struct task_struct *p, u64 task_load, int cpu,
+ enum sched_boost_type boost_type);
+extern enum sched_boost_type sched_boost_type(void);
+extern int task_will_fit(struct task_struct *p, int cpu);
+extern int group_will_fit(struct sched_cluster *cluster,
+ struct related_thread_group *grp, u64 demand);
+extern u64 cpu_load(int cpu);
+extern u64 cpu_load_sync(int cpu, int sync);
+extern int preferred_cluster(struct sched_cluster *cluster,
+ struct task_struct *p);
+extern void inc_nr_big_task(struct hmp_sched_stats *stats,
+ struct task_struct *p);
+extern void dec_nr_big_task(struct hmp_sched_stats *stats,
+ struct task_struct *p);
+extern void inc_rq_hmp_stats(struct rq *rq,
+ struct task_struct *p, int change_cra);
+extern void dec_rq_hmp_stats(struct rq *rq,
+ struct task_struct *p, int change_cra);
+extern int is_big_task(struct task_struct *p);
+extern int upmigrate_discouraged(struct task_struct *p);
+extern struct sched_cluster *rq_cluster(struct rq *rq);
+extern int nr_big_tasks(struct rq *rq);
+extern void fixup_nr_big_tasks(struct hmp_sched_stats *stats,
+ struct task_struct *p, s64 delta);
+extern void reset_task_stats(struct task_struct *p);
+extern void reset_cfs_rq_hmp_stats(int cpu, int reset_cra);
+extern void _inc_hmp_sched_stats_fair(struct rq *rq,
+ struct task_struct *p, int change_cra);
+extern u64 cpu_upmigrate_discourage_read_u64(struct cgroup_subsys_state *css,
+ struct cftype *cft);
+extern int cpu_upmigrate_discourage_write_u64(struct cgroup_subsys_state *css,
+ struct cftype *cft, u64 upmigrate_discourage);
+
+#else /* CONFIG_SCHED_HMP */
+
+struct hmp_sched_stats;
+struct related_thread_group;
+struct sched_cluster;
+
+static inline int got_boost_kick(void)
+{
+ return 0;
+}
+
+static inline void update_task_ravg(struct task_struct *p, struct rq *rq,
+ int event, u64 wallclock, u64 irqtime) { }
+
+static inline bool early_detection_notify(struct rq *rq, u64 wallclock)
+{
+ return 0;
+}
+
+static inline void clear_ed_task(struct task_struct *p, struct rq *rq) { }
+static inline void fixup_busy_time(struct task_struct *p, int new_cpu) { }
+static inline void clear_boost_kick(int cpu) { }
+static inline void clear_hmp_request(int cpu) { }
+static inline void mark_task_starting(struct task_struct *p) { }
+static inline void set_window_start(struct rq *rq) { }
+static inline void migrate_sync_cpu(int cpu) { }
+static inline void update_cluster_topology(void) { }
+static inline void set_task_last_wake(struct task_struct *p, u64 wallclock) { }
+static inline void set_task_last_switch_out(struct task_struct *p,
+ u64 wallclock) { }
+
+static inline int task_will_fit(struct task_struct *p, int cpu)
+{
+ return 1;
+}
+
+static inline int select_best_cpu(struct task_struct *p, int target,
+ int reason, int sync)
+{
+ return 0;
+}
+
+static inline unsigned int power_cost(int cpu, u64 demand)
+{
+ return SCHED_CAPACITY_SCALE;
+}
+
+static inline int sched_boost(void)
+{
+ return 0;
+}
+
+static inline int is_big_task(struct task_struct *p)
+{
+ return 0;
+}
+
+static inline int nr_big_tasks(struct rq *rq)
+{
+ return 0;
+}
+
+static inline int is_cpu_throttling_imminent(int cpu)
+{
+ return 0;
+}
+
+static inline int is_task_migration_throttled(struct task_struct *p)
+{
+ return 0;
+}
+
+static inline unsigned int cpu_temp(int cpu)
+{
+ return 0;
+}
+
+static inline void
+inc_rq_hmp_stats(struct rq *rq, struct task_struct *p, int change_cra) { }
+
+static inline void
+dec_rq_hmp_stats(struct rq *rq, struct task_struct *p, int change_cra) { }
+
+static inline void
+inc_hmp_sched_stats_fair(struct rq *rq, struct task_struct *p) { }
+
+static inline void
+dec_hmp_sched_stats_fair(struct rq *rq, struct task_struct *p) { }
+
+static inline int
+preferred_cluster(struct sched_cluster *cluster, struct task_struct *p)
+{
+ return 1;
+}
+
+static inline struct sched_cluster *rq_cluster(struct rq *rq)
+{
+ return NULL;
+}
+
+static inline void init_new_task_load(struct task_struct *p) { }
+
+static inline u64 scale_load_to_cpu(u64 load, int cpu)
+{
+ return load;
+}
+
+static inline unsigned int nr_eligible_big_tasks(int cpu)
+{
+ return 0;
+}
+
+static inline int pct_task_load(struct task_struct *p) { return 0; }
+
+static inline int cpu_capacity(int cpu)
+{
+ return SCHED_LOAD_SCALE;
+}
+
+static inline int same_cluster(int src_cpu, int dst_cpu) { return 1; }
+
+static inline void inc_cumulative_runnable_avg(struct hmp_sched_stats *stats,
+ struct task_struct *p)
+{
+}
+
+static inline void dec_cumulative_runnable_avg(struct hmp_sched_stats *stats,
+ struct task_struct *p)
+{
+}
+
+static inline void sched_account_irqtime(int cpu, struct task_struct *curr,
+ u64 delta, u64 wallclock)
+{
+}
+
+static inline void sched_account_irqstart(int cpu, struct task_struct *curr,
+ u64 wallclock)
+{
+}
+
+static inline int sched_cpu_high_irqload(int cpu) { return 0; }
+
+static inline void set_preferred_cluster(struct related_thread_group *grp) { }
+
+static inline bool task_in_related_thread_group(struct task_struct *p)
+{
+ return false;
+}
+
+static inline
+struct related_thread_group *task_related_thread_group(struct task_struct *p)
+{
+ return NULL;
+}
+
+static inline u32 task_load(struct task_struct *p) { return 0; }
+
+static inline int update_preferred_cluster(struct related_thread_group *grp,
+ struct task_struct *p, u32 old_load)
+{
+ return 0;
+}
+
+static inline void add_new_task_to_grp(struct task_struct *new) {}
+
+#define sched_enable_hmp 0
+#define sched_freq_legacy_mode 1
+#define sched_migration_fixup 0
+#define PRED_DEMAND_DELTA (0)
+
+static inline void
+check_for_freq_change(struct rq *rq, bool check_pred, bool check_groups) { }
+
+static inline void notify_migration(int src_cpu, int dest_cpu,
+ bool src_cpu_dead, struct task_struct *p) { }
+
+static inline int same_freq_domain(int src_cpu, int dst_cpu)
+{
+ return 1;
+}
+
+static inline void check_for_migration(struct rq *rq, struct task_struct *p) { }
+static inline void pre_big_task_count_change(void) { }
+static inline void post_big_task_count_change(void) { }
+static inline void set_hmp_defaults(void) { }
+
+static inline void clear_reserved(int cpu) { }
+
+#define trace_sched_cpu_load(...)
+#define trace_sched_cpu_load_lb(...)
+#define trace_sched_cpu_load_cgroup(...)
+#define trace_sched_cpu_load_wakeup(...)
+
+#endif /* CONFIG_SCHED_HMP */
+
+/*
+ * Returns the rq capacity of any rq in a group. This does not play
+ * well with groups where rq capacity can change independently.
+ */
+#define group_rq_capacity(group) cpu_capacity(group_first_cpu(group))
+
#ifdef CONFIG_CGROUP_SCHED
/*
@@ -950,7 +1646,6 @@ static inline struct task_group *task_group(struct task_struct *p)
{
return NULL;
}
-
#endif /* CONFIG_CGROUP_SCHED */
static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu)
@@ -1100,6 +1795,7 @@ static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
#define WF_SYNC 0x01 /* waker goes to sleep after wakeup */
#define WF_FORK 0x02 /* child wakeup after fork */
#define WF_MIGRATED 0x4 /* internal use, task got migrated */
+#define WF_NO_NOTIFIER 0x08 /* do not notify governor */
/*
* To aid in avoiding the subversion of "niceness" due to uneven distribution
@@ -1228,6 +1924,12 @@ struct sched_class {
#ifdef CONFIG_FAIR_GROUP_SCHED
void (*task_move_group) (struct task_struct *p);
#endif
+#ifdef CONFIG_SCHED_HMP
+ void (*inc_hmp_sched_stats)(struct rq *rq, struct task_struct *p);
+ void (*dec_hmp_sched_stats)(struct rq *rq, struct task_struct *p);
+ void (*fixup_hmp_sched_stats)(struct rq *rq, struct task_struct *p,
+ u32 new_task_load, u32 new_pred_demand);
+#endif
};
static inline void put_prev_task(struct rq *rq, struct task_struct *prev)
@@ -1288,7 +1990,9 @@ static inline struct cpuidle_state *idle_get_state(struct rq *rq)
}
#endif
+#ifdef CONFIG_SYSRQ_SCHED_DEBUG
extern void sysrq_sched_debug_show(void);
+#endif
extern void sched_init_granularity(void);
extern void update_max_interval(void);
@@ -1314,6 +2018,7 @@ static inline void add_nr_running(struct rq *rq, unsigned count)
{
unsigned prev_nr = rq->nr_running;
+ sched_update_nr_prod(cpu_of(rq), count, true);
rq->nr_running = prev_nr + count;
if (prev_nr < 2 && rq->nr_running >= 2) {
@@ -1340,6 +2045,7 @@ static inline void add_nr_running(struct rq *rq, unsigned count)
static inline void sub_nr_running(struct rq *rq, unsigned count)
{
+ sched_update_nr_prod(cpu_of(rq), count, false);
rq->nr_running -= count;
}
@@ -1719,6 +2425,9 @@ enum rq_nohz_flag_bits {
NOHZ_BALANCE_KICK,
};
+#define NOHZ_KICK_ANY 0
+#define NOHZ_KICK_RESTRICT 1
+
#define nohz_flags(cpu) (&cpu_rq(cpu)->nohz_flags)
#endif
diff --git a/kernel/sched/sched_avg.c b/kernel/sched/sched_avg.c
new file mode 100644
index 000000000000..c70e0466c36c
--- /dev/null
+++ b/kernel/sched/sched_avg.c
@@ -0,0 +1,128 @@
+/* Copyright (c) 2012, 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/*
+ * Scheduler hook for average runqueue determination
+ */
+#include <linux/module.h>
+#include <linux/percpu.h>
+#include <linux/hrtimer.h>
+#include <linux/sched.h>
+#include <linux/math64.h>
+
+#include "sched.h"
+#include <trace/events/sched.h>
+
+static DEFINE_PER_CPU(u64, nr_prod_sum);
+static DEFINE_PER_CPU(u64, last_time);
+static DEFINE_PER_CPU(u64, nr_big_prod_sum);
+static DEFINE_PER_CPU(u64, nr);
+
+static DEFINE_PER_CPU(unsigned long, iowait_prod_sum);
+static DEFINE_PER_CPU(spinlock_t, nr_lock) = __SPIN_LOCK_UNLOCKED(nr_lock);
+static s64 last_get_time;
+
+/**
+ * sched_get_nr_running_avg
+ * @return: Average nr_running, iowait and nr_big_tasks value since last poll.
+ * Returns the avg * 100 to return up to two decimal points
+ * of accuracy.
+ *
+ * Obtains the average nr_running value since the last poll.
+ * This function may not be called concurrently with itself
+ */
+void sched_get_nr_running_avg(int *avg, int *iowait_avg, int *big_avg)
+{
+ int cpu;
+ u64 curr_time = sched_clock();
+ u64 diff = curr_time - last_get_time;
+ u64 tmp_avg = 0, tmp_iowait = 0, tmp_big_avg = 0;
+
+ *avg = 0;
+ *iowait_avg = 0;
+ *big_avg = 0;
+
+ if (!diff)
+ return;
+
+ /* read and reset nr_running counts */
+ for_each_possible_cpu(cpu) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&per_cpu(nr_lock, cpu), flags);
+ curr_time = sched_clock();
+ tmp_avg += per_cpu(nr_prod_sum, cpu);
+ tmp_avg += per_cpu(nr, cpu) *
+ (curr_time - per_cpu(last_time, cpu));
+
+ tmp_big_avg += per_cpu(nr_big_prod_sum, cpu);
+ tmp_big_avg += nr_eligible_big_tasks(cpu) *
+ (curr_time - per_cpu(last_time, cpu));
+
+ tmp_iowait += per_cpu(iowait_prod_sum, cpu);
+ tmp_iowait += nr_iowait_cpu(cpu) *
+ (curr_time - per_cpu(last_time, cpu));
+
+ per_cpu(last_time, cpu) = curr_time;
+
+ per_cpu(nr_prod_sum, cpu) = 0;
+ per_cpu(nr_big_prod_sum, cpu) = 0;
+ per_cpu(iowait_prod_sum, cpu) = 0;
+
+ spin_unlock_irqrestore(&per_cpu(nr_lock, cpu), flags);
+ }
+
+ diff = curr_time - last_get_time;
+ last_get_time = curr_time;
+
+ *avg = (int)div64_u64(tmp_avg * 100, diff);
+ *big_avg = (int)div64_u64(tmp_big_avg * 100, diff);
+ *iowait_avg = (int)div64_u64(tmp_iowait * 100, diff);
+
+ trace_sched_get_nr_running_avg(*avg, *big_avg, *iowait_avg);
+
+ BUG_ON(*avg < 0 || *big_avg < 0 || *iowait_avg < 0);
+ pr_debug("%s - avg:%d big_avg:%d iowait_avg:%d\n",
+ __func__, *avg, *big_avg, *iowait_avg);
+}
+EXPORT_SYMBOL(sched_get_nr_running_avg);
+
+/**
+ * sched_update_nr_prod
+ * @cpu: The core id of the nr running driver.
+ * @delta: Adjust nr by 'delta' amount
+ * @inc: Whether we are increasing or decreasing the count
+ * @return: N/A
+ *
+ * Update average with latest nr_running value for CPU
+ */
+void sched_update_nr_prod(int cpu, long delta, bool inc)
+{
+ int diff;
+ s64 curr_time;
+ unsigned long flags, nr_running;
+
+ spin_lock_irqsave(&per_cpu(nr_lock, cpu), flags);
+ nr_running = per_cpu(nr, cpu);
+ curr_time = sched_clock();
+ diff = curr_time - per_cpu(last_time, cpu);
+ per_cpu(last_time, cpu) = curr_time;
+ per_cpu(nr, cpu) = nr_running + (inc ? delta : -delta);
+
+ BUG_ON((s64)per_cpu(nr, cpu) < 0);
+
+ per_cpu(nr_prod_sum, cpu) += nr_running * diff;
+ per_cpu(nr_big_prod_sum, cpu) += nr_eligible_big_tasks(cpu) * diff;
+ per_cpu(iowait_prod_sum, cpu) += nr_iowait_cpu(cpu) * diff;
+ spin_unlock_irqrestore(&per_cpu(nr_lock, cpu), flags);
+}
+EXPORT_SYMBOL(sched_update_nr_prod);
diff --git a/kernel/sched/stop_task.c b/kernel/sched/stop_task.c
index cbc67da10954..134da1cc8fce 100644
--- a/kernel/sched/stop_task.c
+++ b/kernel/sched/stop_task.c
@@ -17,6 +17,41 @@ select_task_rq_stop(struct task_struct *p, int cpu, int sd_flag, int flags)
}
#endif /* CONFIG_SMP */
+#ifdef CONFIG_SCHED_HMP
+
+static void
+inc_hmp_sched_stats_stop(struct rq *rq, struct task_struct *p)
+{
+ inc_cumulative_runnable_avg(&rq->hmp_stats, p);
+}
+
+static void
+dec_hmp_sched_stats_stop(struct rq *rq, struct task_struct *p)
+{
+ dec_cumulative_runnable_avg(&rq->hmp_stats, p);
+}
+
+static void
+fixup_hmp_sched_stats_stop(struct rq *rq, struct task_struct *p,
+ u32 new_task_load, u32 new_pred_demand)
+{
+ s64 task_load_delta = (s64)new_task_load - task_load(p);
+ s64 pred_demand_delta = PRED_DEMAND_DELTA;
+
+ fixup_cumulative_runnable_avg(&rq->hmp_stats, p, task_load_delta,
+ pred_demand_delta);
+}
+
+#else /* CONFIG_SCHED_HMP */
+
+static inline void
+inc_hmp_sched_stats_stop(struct rq *rq, struct task_struct *p) { }
+
+static inline void
+dec_hmp_sched_stats_stop(struct rq *rq, struct task_struct *p) { }
+
+#endif /* CONFIG_SCHED_HMP */
+
static void
check_preempt_curr_stop(struct rq *rq, struct task_struct *p, int flags)
{
@@ -42,12 +77,14 @@ static void
enqueue_task_stop(struct rq *rq, struct task_struct *p, int flags)
{
add_nr_running(rq, 1);
+ inc_hmp_sched_stats_stop(rq, p);
}
static void
dequeue_task_stop(struct rq *rq, struct task_struct *p, int flags)
{
sub_nr_running(rq, 1);
+ dec_hmp_sched_stats_stop(rq, p);
}
static void yield_task_stop(struct rq *rq)
@@ -134,4 +171,9 @@ const struct sched_class stop_sched_class = {
.prio_changed = prio_changed_stop,
.switched_to = switched_to_stop,
.update_curr = update_curr_stop,
+#ifdef CONFIG_SCHED_HMP
+ .inc_hmp_sched_stats = inc_hmp_sched_stats_stop,
+ .dec_hmp_sched_stats = dec_hmp_sched_stats_stop,
+ .fixup_hmp_sched_stats = fixup_hmp_sched_stats_stop,
+#endif
};
diff --git a/kernel/smp.c b/kernel/smp.c
index d903c02223af..abdc48cd79a3 100644
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -32,6 +32,9 @@ static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_function_data, cfd_data);
static DEFINE_PER_CPU_SHARED_ALIGNED(struct llist_head, call_single_queue);
static void flush_smp_call_function_queue(bool warn_cpu_offline);
+/* CPU mask indicating which CPUs to bring online during smp_init() */
+static bool have_boot_cpu_mask;
+static cpumask_var_t boot_cpu_mask;
static int
hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu)
@@ -548,6 +551,19 @@ static int __init maxcpus(char *str)
early_param("maxcpus", maxcpus);
+static int __init boot_cpus(char *str)
+{
+ alloc_bootmem_cpumask_var(&boot_cpu_mask);
+ if (cpulist_parse(str, boot_cpu_mask) < 0) {
+ pr_warn("SMP: Incorrect boot_cpus cpumask\n");
+ return -EINVAL;
+ }
+ have_boot_cpu_mask = true;
+ return 0;
+}
+
+early_param("boot_cpus", boot_cpus);
+
/* Setup number of possible processor ids */
int nr_cpu_ids __read_mostly = NR_CPUS;
EXPORT_SYMBOL(nr_cpu_ids);
@@ -563,6 +579,21 @@ void __weak smp_announce(void)
printk(KERN_INFO "Brought up %d CPUs\n", num_online_cpus());
}
+/* Should the given CPU be booted during smp_init() ? */
+static inline bool boot_cpu(int cpu)
+{
+ if (!have_boot_cpu_mask)
+ return true;
+
+ return cpumask_test_cpu(cpu, boot_cpu_mask);
+}
+
+static inline void free_boot_cpu_mask(void)
+{
+ if (have_boot_cpu_mask) /* Allocated from boot_cpus() */
+ free_bootmem_cpumask_var(boot_cpu_mask);
+}
+
/* Called by boot processor to activate the rest. */
void __init smp_init(void)
{
@@ -574,10 +605,12 @@ void __init smp_init(void)
for_each_present_cpu(cpu) {
if (num_online_cpus() >= setup_max_cpus)
break;
- if (!cpu_online(cpu))
+ if (!cpu_online(cpu) && boot_cpu(cpu))
cpu_up(cpu);
}
+ free_boot_cpu_mask();
+
/* Any cleanup work */
smp_announce();
smp_cpus_done(setup_max_cpus);
diff --git a/kernel/smpboot.c b/kernel/smpboot.c
index d264f59bff56..6949476a118f 100644
--- a/kernel/smpboot.c
+++ b/kernel/smpboot.c
@@ -13,6 +13,7 @@
#include <linux/percpu.h>
#include <linux/kthread.h>
#include <linux/smpboot.h>
+#include <linux/kmemleak.h>
#include "smpboot.h"
@@ -177,6 +178,8 @@ __smpboot_create_thread(struct smp_hotplug_thread *ht, unsigned int cpu)
td = kzalloc_node(sizeof(*td), GFP_KERNEL, cpu_to_node(cpu));
if (!td)
return -ENOMEM;
+
+ kmemleak_not_leak(td);
td->cpu = cpu;
td->ht = ht;
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 35bfe0c1360b..07fef40d1274 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -285,6 +285,170 @@ static struct ctl_table kern_table[] = {
.mode = 0644,
.proc_handler = proc_dointvec,
},
+ {
+ .procname = "sched_wake_to_idle",
+ .data = &sysctl_sched_wake_to_idle,
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
+#ifdef CONFIG_SCHED_HMP
+ {
+ .procname = "sched_freq_inc_notify",
+ .data = &sysctl_sched_freq_inc_notify,
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = &zero,
+ },
+ {
+ .procname = "sched_freq_dec_notify",
+ .data = &sysctl_sched_freq_dec_notify,
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = &zero,
+ },
+ {
+ .procname = "sched_cpu_high_irqload",
+ .data = &sysctl_sched_cpu_high_irqload,
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
+ {
+ .procname = "sched_ravg_hist_size",
+ .data = &sysctl_sched_ravg_hist_size,
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = sched_window_update_handler,
+ },
+ {
+ .procname = "sched_window_stats_policy",
+ .data = &sysctl_sched_window_stats_policy,
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = sched_window_update_handler,
+ },
+ {
+ .procname = "sched_spill_load",
+ .data = &sysctl_sched_spill_load_pct,
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = sched_hmp_proc_update_handler,
+ },
+ {
+ .procname = "sched_spill_nr_run",
+ .data = &sysctl_sched_spill_nr_run,
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = &zero,
+ },
+ {
+ .procname = "sched_upmigrate",
+ .data = &sysctl_sched_upmigrate_pct,
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = sched_hmp_proc_update_handler,
+ },
+ {
+ .procname = "sched_downmigrate",
+ .data = &sysctl_sched_downmigrate_pct,
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = sched_hmp_proc_update_handler,
+ },
+ {
+ .procname = "sched_init_task_load",
+ .data = &sysctl_sched_init_task_load_pct,
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = sched_hmp_proc_update_handler,
+ },
+ {
+ .procname = "sched_select_prev_cpu_us",
+ .data = &sysctl_sched_select_prev_cpu_us,
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = sched_hmp_proc_update_handler,
+ },
+ {
+ .procname = "sched_enable_colocation",
+ .data = &sysctl_sched_enable_colocation,
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ .extra1 = &zero,
+ .extra2 = &one,
+ },
+ {
+ .procname = "sched_restrict_cluster_spill",
+ .data = &sysctl_sched_restrict_cluster_spill,
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = &zero,
+ .extra2 = &one,
+ },
+ {
+ .procname = "sched_small_wakee_task_load",
+ .data = &sysctl_sched_small_wakee_task_load_pct,
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = sched_hmp_proc_update_handler,
+ },
+ {
+ .procname = "sched_big_waker_task_load",
+ .data = &sysctl_sched_big_waker_task_load_pct,
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = sched_hmp_proc_update_handler,
+ },
+ {
+ .procname = "sched_enable_thread_grouping",
+ .data = &sysctl_sched_enable_thread_grouping,
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
+ {
+ .procname = "sched_new_task_windows",
+ .data = &sysctl_sched_new_task_windows,
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = sched_window_update_handler,
+ },
+ {
+ .procname = "sched_pred_alert_freq",
+ .data = &sysctl_sched_pred_alert_freq,
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = &zero,
+ },
+ {
+ .procname = "sched_freq_aggregate",
+ .data = &sysctl_sched_freq_aggregate,
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = sched_window_update_handler,
+ },
+ {
+ .procname = "sched_freq_aggregate_threshold",
+ .data = &sysctl_sched_freq_aggregate_threshold_pct,
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = sched_hmp_proc_update_handler,
+ },
+ {
+ .procname = "sched_boost",
+ .data = &sysctl_sched_boost,
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = sched_boost_handler,
+ },
+#endif /* CONFIG_SCHED_HMP */
#ifdef CONFIG_SCHED_DEBUG
{
.procname = "sched_min_granularity_ns",
@@ -1173,6 +1337,27 @@ static struct ctl_table kern_table[] = {
.extra2 = &one,
},
#endif
+#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
+ {
+ .procname = "boot_reason",
+ .data = &boot_reason,
+ .maxlen = sizeof(int),
+ .mode = 0444,
+ .proc_handler = proc_dointvec,
+ },
+
+ {
+ .procname = "cold_boot",
+ .data = &cold_boot,
+ .maxlen = sizeof(int),
+ .mode = 0444,
+ .proc_handler = proc_dointvec,
+ },
+#endif
+/*
+ * NOTE: do not add new entries to this table unless you have read
+ * Documentation/sysctl/ctl_unnumbered.txt
+ */
{ }
};
@@ -1599,6 +1784,22 @@ static struct ctl_table vm_table[] = {
.extra2 = (void *)&mmap_rnd_compat_bits_max,
},
#endif
+#ifdef CONFIG_SWAP
+ {
+ .procname = "swap_ratio",
+ .data = &sysctl_swap_ratio,
+ .maxlen = sizeof(sysctl_swap_ratio),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ },
+ {
+ .procname = "swap_ratio_enable",
+ .data = &sysctl_swap_ratio_enable,
+ .maxlen = sizeof(sysctl_swap_ratio_enable),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ },
+#endif
{ }
};
@@ -2046,15 +2247,7 @@ static int do_proc_dointvec_conv(bool *negp, unsigned long *lvalp,
int write, void *data)
{
if (write) {
- if (*negp) {
- if (*lvalp > (unsigned long) INT_MAX + 1)
- return -EINVAL;
- *valp = -*lvalp;
- } else {
- if (*lvalp > (unsigned long) INT_MAX)
- return -EINVAL;
- *valp = *lvalp;
- }
+ *valp = *negp ? -*lvalp : *lvalp;
} else {
int val = *valp;
if (val < 0) {
diff --git a/kernel/sysctl_binary.c b/kernel/sysctl_binary.c
index 10a1d7dc9313..4a816bab38a2 100644
--- a/kernel/sysctl_binary.c
+++ b/kernel/sysctl_binary.c
@@ -138,6 +138,8 @@ static const struct bin_table bin_kern_table[] = {
{ CTL_INT, KERN_MAX_LOCK_DEPTH, "max_lock_depth" },
{ CTL_INT, KERN_PANIC_ON_NMI, "panic_on_unrecovered_nmi" },
{ CTL_INT, KERN_PANIC_ON_WARN, "panic_on_warn" },
+ { CTL_INT, KERN_BOOT_REASON, "boot_reason" },
+ { CTL_INT, KERN_COLD_BOOT, "cold_boot" },
{}
};
@@ -523,6 +525,7 @@ static const struct bin_table bin_net_ipv6_conf_var_table[] = {
{ CTL_INT, NET_IPV6_PROXY_NDP, "proxy_ndp" },
{ CTL_INT, NET_IPV6_ACCEPT_SOURCE_ROUTE, "accept_source_route" },
{ CTL_INT, NET_IPV6_ACCEPT_RA_FROM_LOCAL, "accept_ra_from_local" },
+ { CTL_INT, NET_IPV6_ACCEPT_RA_PREFIX_ROUTE, "accept_ra_prefix_route" },
{}
};
diff --git a/kernel/time/Makefile b/kernel/time/Makefile
index 49eca0beed32..5819ca07a22b 100644
--- a/kernel/time/Makefile
+++ b/kernel/time/Makefile
@@ -12,3 +12,5 @@ obj-$(CONFIG_TICK_ONESHOT) += tick-oneshot.o tick-sched.o
obj-$(CONFIG_TIMER_STATS) += timer_stats.o
obj-$(CONFIG_DEBUG_FS) += timekeeping_debug.o
obj-$(CONFIG_TEST_UDELAY) += test_udelay.o
+
+ccflags-y += -Idrivers/cpuidle
diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
index 7fbba635a549..0cdc34ebd8d1 100644
--- a/kernel/time/alarmtimer.c
+++ b/kernel/time/alarmtimer.c
@@ -26,6 +26,11 @@
#include <linux/workqueue.h>
#include <linux/freezer.h>
+#ifdef CONFIG_MSM_PM
+#include "lpm-levels.h"
+#endif
+#include <linux/workqueue.h>
+
/**
* struct alarm_base - Alarm timer bases
* @lock: Lock for syncrhonized access to the base
@@ -46,14 +51,130 @@ static ktime_t freezer_delta;
static DEFINE_SPINLOCK(freezer_delta_lock);
static struct wakeup_source *ws;
+static struct delayed_work work;
+static struct workqueue_struct *power_off_alarm_workqueue;
#ifdef CONFIG_RTC_CLASS
/* rtc timer and device for setting alarm wakeups at suspend */
static struct rtc_timer rtctimer;
static struct rtc_device *rtcdev;
static DEFINE_SPINLOCK(rtcdev_lock);
+static struct mutex power_on_alarm_lock;
+static struct alarm init_alarm;
/**
+ * power_on_alarm_init - Init power on alarm value
+ *
+ * Read rtc alarm value after device booting up and add this alarm
+ * into alarm queue.
+ */
+void power_on_alarm_init(void)
+{
+ struct rtc_wkalrm rtc_alarm;
+ struct rtc_time rt;
+ unsigned long alarm_time;
+ struct rtc_device *rtc;
+ ktime_t alarm_ktime;
+
+ rtc = alarmtimer_get_rtcdev();
+
+ if (!rtc)
+ return;
+
+ rtc_read_alarm(rtc, &rtc_alarm);
+ rt = rtc_alarm.time;
+
+ rtc_tm_to_time(&rt, &alarm_time);
+
+ if (alarm_time) {
+ alarm_ktime = ktime_set(alarm_time, 0);
+ alarm_init(&init_alarm, ALARM_POWEROFF_REALTIME, NULL);
+ alarm_start(&init_alarm, alarm_ktime);
+ }
+}
+
+/**
+ * set_power_on_alarm - set power on alarm value into rtc register
+ *
+ * Get the soonest power off alarm timer and set the alarm value into rtc
+ * register.
+ */
+void set_power_on_alarm(void)
+{
+ int rc;
+ struct timespec wall_time, alarm_ts;
+ long alarm_secs = 0l;
+ long rtc_secs, alarm_time, alarm_delta;
+ struct rtc_time rtc_time;
+ struct rtc_wkalrm alarm;
+ struct rtc_device *rtc;
+ struct timerqueue_node *next;
+ unsigned long flags;
+ struct alarm_base *base = &alarm_bases[ALARM_POWEROFF_REALTIME];
+
+ rc = mutex_lock_interruptible(&power_on_alarm_lock);
+ if (rc != 0)
+ return;
+
+ spin_lock_irqsave(&base->lock, flags);
+ next = timerqueue_getnext(&base->timerqueue);
+ spin_unlock_irqrestore(&base->lock, flags);
+
+ if (next) {
+ alarm_ts = ktime_to_timespec(next->expires);
+ alarm_secs = alarm_ts.tv_sec;
+ }
+
+ if (!alarm_secs)
+ goto disable_alarm;
+
+ getnstimeofday(&wall_time);
+
+ /*
+ * alarm_secs have to be bigger than "wall_time +1".
+ * It is to make sure that alarm time will be always
+ * bigger than wall time.
+ */
+ if (alarm_secs <= wall_time.tv_sec + 1)
+ goto disable_alarm;
+
+ rtc = alarmtimer_get_rtcdev();
+ if (!rtc)
+ goto exit;
+
+ rtc_read_time(rtc, &rtc_time);
+ rtc_tm_to_time(&rtc_time, &rtc_secs);
+ alarm_delta = wall_time.tv_sec - rtc_secs;
+ alarm_time = alarm_secs - alarm_delta;
+
+ rtc_time_to_tm(alarm_time, &alarm.time);
+ alarm.enabled = 1;
+ rc = rtc_set_alarm(rtcdev, &alarm);
+ if (rc)
+ goto disable_alarm;
+
+ mutex_unlock(&power_on_alarm_lock);
+ return;
+
+disable_alarm:
+ rtc_alarm_irq_enable(rtcdev, 0);
+exit:
+ mutex_unlock(&power_on_alarm_lock);
+}
+
+static void alarmtimer_triggered_func(void *p)
+{
+ struct rtc_device *rtc = rtcdev;
+
+ if (!(rtc->irq_data & RTC_AF))
+ return;
+ __pm_wakeup_event(ws, 2 * MSEC_PER_SEC);
+}
+
+static struct rtc_task alarmtimer_rtc_task = {
+ .func = alarmtimer_triggered_func
+};
+/**
* alarmtimer_get_rtcdev - Return selected rtcdevice
*
* This function returns the rtc device to use for wakealarms.
@@ -63,7 +184,7 @@ static DEFINE_SPINLOCK(rtcdev_lock);
struct rtc_device *alarmtimer_get_rtcdev(void)
{
unsigned long flags;
- struct rtc_device *ret;
+ struct rtc_device *ret = NULL;
spin_lock_irqsave(&rtcdev_lock, flags);
ret = rtcdev;
@@ -77,33 +198,48 @@ static int alarmtimer_rtc_add_device(struct device *dev,
struct class_interface *class_intf)
{
unsigned long flags;
+ int err = 0;
struct rtc_device *rtc = to_rtc_device(dev);
-
if (rtcdev)
return -EBUSY;
-
if (!rtc->ops->set_alarm)
return -1;
- if (!device_may_wakeup(rtc->dev.parent))
- return -1;
spin_lock_irqsave(&rtcdev_lock, flags);
if (!rtcdev) {
+ err = rtc_irq_register(rtc, &alarmtimer_rtc_task);
+ if (err)
+ goto rtc_irq_reg_err;
rtcdev = rtc;
/* hold a reference so it doesn't go away */
get_device(dev);
}
+
+rtc_irq_reg_err:
spin_unlock_irqrestore(&rtcdev_lock, flags);
- return 0;
+ return err;
+
+}
+
+static void alarmtimer_rtc_remove_device(struct device *dev,
+ struct class_interface *class_intf)
+{
+ if (rtcdev && dev == &rtcdev->dev) {
+ rtc_irq_unregister(rtcdev, &alarmtimer_rtc_task);
+ rtcdev = NULL;
+ }
}
static inline void alarmtimer_rtc_timer_init(void)
{
+ mutex_init(&power_on_alarm_lock);
+
rtc_timer_init(&rtctimer, NULL, NULL);
}
static struct class_interface alarmtimer_rtc_interface = {
.add_dev = &alarmtimer_rtc_add_device,
+ .remove_dev = &alarmtimer_rtc_remove_device,
};
static int alarmtimer_rtc_interface_setup(void)
@@ -124,8 +260,14 @@ struct rtc_device *alarmtimer_get_rtcdev(void)
static inline int alarmtimer_rtc_interface_setup(void) { return 0; }
static inline void alarmtimer_rtc_interface_remove(void) { }
static inline void alarmtimer_rtc_timer_init(void) { }
+void set_power_on_alarm(void) { }
#endif
+static void alarm_work_func(struct work_struct *unused)
+{
+ set_power_on_alarm();
+}
+
/**
* alarmtimer_enqueue - Adds an alarm timer to an alarm_base timerqueue
* @base: pointer to the base where the timer is being run
@@ -195,6 +337,10 @@ static enum hrtimer_restart alarmtimer_fired(struct hrtimer *timer)
}
spin_unlock_irqrestore(&base->lock, flags);
+ /* set next power off alarm */
+ if (alarm->type == ALARM_POWEROFF_REALTIME)
+ queue_delayed_work(power_off_alarm_workqueue, &work, 0);
+
return ret;
}
@@ -217,6 +363,70 @@ EXPORT_SYMBOL_GPL(alarm_expires_remaining);
* set an rtc timer to fire that far into the future, which
* will wake us from suspend.
*/
+#if defined(CONFIG_RTC_DRV_QPNP) && defined(CONFIG_MSM_PM)
+static int alarmtimer_suspend(struct device *dev)
+{
+ struct rtc_time tm;
+ ktime_t min, now;
+ unsigned long flags;
+ struct rtc_device *rtc;
+ int i;
+ int ret = 0;
+
+ spin_lock_irqsave(&freezer_delta_lock, flags);
+ min = freezer_delta;
+ freezer_delta = ktime_set(0, 0);
+ spin_unlock_irqrestore(&freezer_delta_lock, flags);
+
+ rtc = alarmtimer_get_rtcdev();
+ /* If we have no rtcdev, just return */
+ if (!rtc)
+ return 0;
+
+ /* Find the soonest timer to expire*/
+ for (i = 0; i < ALARM_NUMTYPE; i++) {
+ struct alarm_base *base = &alarm_bases[i];
+ struct timerqueue_node *next;
+ ktime_t delta;
+
+ spin_lock_irqsave(&base->lock, flags);
+ next = timerqueue_getnext(&base->timerqueue);
+ spin_unlock_irqrestore(&base->lock, flags);
+ if (!next)
+ continue;
+ delta = ktime_sub(next->expires, base->gettime());
+ if (!min.tv64 || (delta.tv64 < min.tv64))
+ min = delta;
+ }
+ if (min.tv64 == 0)
+ return 0;
+
+ if (ktime_to_ns(min) < 2 * NSEC_PER_SEC) {
+ __pm_wakeup_event(ws, 2 * MSEC_PER_SEC);
+ return -EBUSY;
+ }
+
+ /* Setup a timer to fire that far in the future */
+ rtc_timer_cancel(rtc, &rtctimer);
+ rtc_read_time(rtc, &tm);
+ now = rtc_tm_to_ktime(tm);
+ now = ktime_add(now, min);
+ if (poweron_alarm) {
+ struct rtc_time tm_val;
+ unsigned long secs;
+
+ tm_val = rtc_ktime_to_tm(min);
+ rtc_tm_to_time(&tm_val, &secs);
+ lpm_suspend_wake_time(secs);
+ } else {
+ /* Set alarm, if in the past reject suspend briefly to handle */
+ ret = rtc_timer_start(rtc, &rtctimer, now, ktime_set(0, 0));
+ if (ret < 0)
+ __pm_wakeup_event(ws, MSEC_PER_SEC);
+ }
+ return ret;
+}
+#else
static int alarmtimer_suspend(struct device *dev)
{
struct rtc_time tm;
@@ -226,6 +436,8 @@ static int alarmtimer_suspend(struct device *dev)
int i;
int ret;
+ cancel_delayed_work_sync(&work);
+
spin_lock_irqsave(&freezer_delta_lock, flags);
min = freezer_delta;
freezer_delta = ktime_set(0, 0);
@@ -271,11 +483,31 @@ static int alarmtimer_suspend(struct device *dev)
__pm_wakeup_event(ws, MSEC_PER_SEC);
return ret;
}
+#endif
+static int alarmtimer_resume(struct device *dev)
+{
+ struct rtc_device *rtc;
+
+ rtc = alarmtimer_get_rtcdev();
+ /* If we have no rtcdev, just return */
+ if (!rtc)
+ return 0;
+ rtc_timer_cancel(rtc, &rtctimer);
+
+ queue_delayed_work(power_off_alarm_workqueue, &work, 0);
+ return 0;
+}
+
#else
static int alarmtimer_suspend(struct device *dev)
{
return 0;
}
+
+static int alarmtimer_resume(struct device *dev)
+{
+ return 0;
+}
#endif
static void alarmtimer_freezerset(ktime_t absexp, enum alarmtimer_type type)
@@ -443,12 +675,14 @@ EXPORT_SYMBOL_GPL(alarm_forward_now);
* clock2alarm - helper that converts from clockid to alarmtypes
* @clockid: clockid.
*/
-static enum alarmtimer_type clock2alarm(clockid_t clockid)
+enum alarmtimer_type clock2alarm(clockid_t clockid)
{
if (clockid == CLOCK_REALTIME_ALARM)
return ALARM_REALTIME;
if (clockid == CLOCK_BOOTTIME_ALARM)
return ALARM_BOOTTIME;
+ if (clockid == CLOCK_POWEROFF_ALARM)
+ return ALARM_POWEROFF_REALTIME;
return -1;
}
@@ -800,6 +1034,7 @@ out:
/* Suspend hook structures */
static const struct dev_pm_ops alarmtimer_pm_ops = {
.suspend = alarmtimer_suspend,
+ .resume = alarmtimer_resume,
};
static struct platform_driver alarmtimer_driver = {
@@ -834,10 +1069,13 @@ static int __init alarmtimer_init(void)
posix_timers_register_clock(CLOCK_REALTIME_ALARM, &alarm_clock);
posix_timers_register_clock(CLOCK_BOOTTIME_ALARM, &alarm_clock);
+ posix_timers_register_clock(CLOCK_POWEROFF_ALARM, &alarm_clock);
/* Initialize alarm bases */
alarm_bases[ALARM_REALTIME].base_clockid = CLOCK_REALTIME;
alarm_bases[ALARM_REALTIME].gettime = &ktime_get_real;
+ alarm_bases[ALARM_POWEROFF_REALTIME].base_clockid = CLOCK_REALTIME;
+ alarm_bases[ALARM_POWEROFF_REALTIME].gettime = &ktime_get_real;
alarm_bases[ALARM_BOOTTIME].base_clockid = CLOCK_BOOTTIME;
alarm_bases[ALARM_BOOTTIME].gettime = &ktime_get_boottime;
for (i = 0; i < ALARM_NUMTYPE; i++) {
@@ -859,8 +1097,24 @@ static int __init alarmtimer_init(void)
goto out_drv;
}
ws = wakeup_source_register("alarmtimer");
- return 0;
+ if (!ws) {
+ error = -ENOMEM;
+ goto out_ws;
+ }
+
+ INIT_DELAYED_WORK(&work, alarm_work_func);
+ power_off_alarm_workqueue =
+ create_singlethread_workqueue("power_off_alarm");
+ if (!power_off_alarm_workqueue) {
+ error = -ENOMEM;
+ goto out_wq;
+ }
+ return 0;
+out_wq:
+ wakeup_source_unregister(ws);
+out_ws:
+ platform_device_unregister(pdev);
out_drv:
platform_driver_unregister(&alarmtimer_driver);
out_if:
diff --git a/kernel/time/sched_clock.c b/kernel/time/sched_clock.c
index a26036d37a38..0637823aa5a6 100644
--- a/kernel/time/sched_clock.c
+++ b/kernel/time/sched_clock.c
@@ -70,6 +70,7 @@ struct clock_data {
static struct hrtimer sched_clock_timer;
static int irqtime = -1;
+static int initialized;
core_param(irqtime, irqtime, int, 0400);
@@ -231,6 +232,11 @@ sched_clock_register(u64 (*read)(void), int bits, unsigned long rate)
pr_debug("Registered %pF as sched_clock source\n", read);
}
+int sched_clock_initialized(void)
+{
+ return initialized;
+}
+
void __init sched_clock_postinit(void)
{
/*
@@ -249,6 +255,8 @@ void __init sched_clock_postinit(void)
hrtimer_init(&sched_clock_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
sched_clock_timer.function = sched_clock_poll;
hrtimer_start(&sched_clock_timer, cd.wrap_kt, HRTIMER_MODE_REL);
+
+ initialized = 1;
}
/*
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index 22c57e191a23..651ff1a3a306 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -24,6 +24,7 @@
#include <linux/posix-timers.h>
#include <linux/perf_event.h>
#include <linux/context_tracking.h>
+#include <linux/rq_stats.h>
#include <asm/irq_regs.h>
@@ -31,6 +32,10 @@
#include <trace/events/timer.h>
+struct rq_data rq_info;
+struct workqueue_struct *rq_wq;
+spinlock_t rq_lock;
+
/*
* Per cpu nohz control structure
*/
@@ -41,6 +46,21 @@ static DEFINE_PER_CPU(struct tick_sched, tick_cpu_sched);
*/
static ktime_t last_jiffies_update;
+u64 jiffy_to_ktime_ns(u64 *now, u64 *jiffy_ktime_ns)
+{
+ u64 cur_jiffies;
+ unsigned long seq;
+
+ do {
+ seq = read_seqbegin(&jiffies_lock);
+ *now = ktime_get_ns();
+ *jiffy_ktime_ns = ktime_to_ns(last_jiffies_update);
+ cur_jiffies = get_jiffies_64();
+ } while (read_seqretry(&jiffies_lock, seq));
+
+ return cur_jiffies;
+}
+
struct tick_sched *tick_get_tick_sched(int cpu)
{
return &per_cpu(tick_cpu_sched, cpu);
@@ -143,7 +163,7 @@ static void tick_sched_handle(struct tick_sched *ts, struct pt_regs *regs)
* when we go busy again does not account too much ticks.
*/
if (ts->tick_stopped) {
- touch_softlockup_watchdog();
+ touch_softlockup_watchdog_sched();
if (is_idle_task(current))
ts->idle_jiffies++;
}
@@ -430,7 +450,7 @@ static void tick_nohz_update_jiffies(ktime_t now)
tick_do_update_jiffies64(now);
local_irq_restore(flags);
- touch_softlockup_watchdog();
+ touch_softlockup_watchdog_sched();
}
/*
@@ -701,7 +721,7 @@ static void tick_nohz_restart_sched_tick(struct tick_sched *ts, ktime_t now)
update_cpu_load_nohz();
calc_load_exit_idle();
- touch_softlockup_watchdog();
+ touch_softlockup_watchdog_sched();
/*
* Cancel the scheduled timer and restore the tick
*/
@@ -1049,6 +1069,51 @@ void tick_irq_enter(void)
* High resolution timer specific code
*/
#ifdef CONFIG_HIGH_RES_TIMERS
+static void update_rq_stats(void)
+{
+ unsigned long jiffy_gap = 0;
+ unsigned int rq_avg = 0;
+ unsigned long flags = 0;
+
+ jiffy_gap = jiffies - rq_info.rq_poll_last_jiffy;
+
+ if (jiffy_gap >= rq_info.rq_poll_jiffies) {
+
+ spin_lock_irqsave(&rq_lock, flags);
+
+ if (!rq_info.rq_avg)
+ rq_info.rq_poll_total_jiffies = 0;
+
+ rq_avg = nr_running() * 10;
+
+ if (rq_info.rq_poll_total_jiffies) {
+ rq_avg = (rq_avg * jiffy_gap) +
+ (rq_info.rq_avg *
+ rq_info.rq_poll_total_jiffies);
+ do_div(rq_avg,
+ rq_info.rq_poll_total_jiffies + jiffy_gap);
+ }
+
+ rq_info.rq_avg = rq_avg;
+ rq_info.rq_poll_total_jiffies += jiffy_gap;
+ rq_info.rq_poll_last_jiffy = jiffies;
+
+ spin_unlock_irqrestore(&rq_lock, flags);
+ }
+}
+
+static void wakeup_user(void)
+{
+ unsigned long jiffy_gap;
+
+ jiffy_gap = jiffies - rq_info.def_timer_last_jiffy;
+
+ if (jiffy_gap >= rq_info.def_timer_jiffies) {
+ rq_info.def_timer_last_jiffy = jiffies;
+ queue_work(rq_wq, &rq_info.def_timer_work);
+ }
+}
+
/*
* We rearm the timer until we get disabled by the idle code.
* Called with interrupts disabled.
@@ -1066,9 +1131,23 @@ static enum hrtimer_restart tick_sched_timer(struct hrtimer *timer)
* Do not call, when we are not in irq context and have
* no valid regs pointer
*/
- if (regs)
+ if (regs) {
tick_sched_handle(ts, regs);
+ if (rq_info.init == 1 &&
+ tick_do_timer_cpu == smp_processor_id()) {
+ /*
+ * update run queue statistics
+ */
+ update_rq_stats();
+
+ /*
+ * wakeup user if needed
+ */
+ wakeup_user();
+ }
+ }
+
/* No need to reprogram if we are in idle or full dynticks mode */
if (unlikely(ts->tick_stopped))
return HRTIMER_NORESTART;
@@ -1181,3 +1260,8 @@ int tick_check_oneshot_change(int allow_nohz)
tick_nohz_switch_to_nohz();
return 0;
}
+
+ktime_t * get_next_event_cpu(unsigned int cpu)
+{
+ return &(per_cpu(tick_cpu_device, cpu).evtdev->next_event);
+}
diff --git a/kernel/time/timer.c b/kernel/time/timer.c
index bbc5d1114583..51896272fcde 100644
--- a/kernel/time/timer.c
+++ b/kernel/time/timer.c
@@ -94,12 +94,15 @@ struct tvec_base {
struct tvec tv5;
} ____cacheline_aligned;
+static inline void __run_timers(struct tvec_base *base);
static DEFINE_PER_CPU(struct tvec_base, tvec_bases);
#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
unsigned int sysctl_timer_migration = 1;
+struct tvec_base tvec_base_deferrable;
+
void timers_update_migration(bool update_nohz)
{
bool on = sysctl_timer_migration && tick_nohz_active;
@@ -135,18 +138,62 @@ int timer_migration_handler(struct ctl_table *table, int write,
}
static inline struct tvec_base *get_target_base(struct tvec_base *base,
- int pinned)
+ int pinned, u32 timer_flags)
{
+ if (!pinned && !(timer_flags & TIMER_PINNED_ON_CPU) &&
+ (timer_flags & TIMER_DEFERRABLE))
+ return &tvec_base_deferrable;
if (pinned || !base->migration_enabled)
return this_cpu_ptr(&tvec_bases);
return per_cpu_ptr(&tvec_bases, get_nohz_timer_target());
}
+
+static inline void __run_deferrable_timers(void)
+{
+ if (smp_processor_id() == tick_do_timer_cpu &&
+ time_after_eq(jiffies, tvec_base_deferrable.timer_jiffies))
+ __run_timers(&tvec_base_deferrable);
+}
+
+static inline void init_timer_deferrable_global(void)
+{
+ tvec_base_deferrable.cpu = nr_cpu_ids;
+ spin_lock_init(&tvec_base_deferrable.lock);
+ tvec_base_deferrable.timer_jiffies = jiffies;
+ tvec_base_deferrable.next_timer = tvec_base_deferrable.timer_jiffies;
+}
+
+static inline struct tvec_base *get_timer_base(u32 timer_flags)
+{
+ if (!(timer_flags & TIMER_PINNED_ON_CPU) &&
+ timer_flags & TIMER_DEFERRABLE)
+ return &tvec_base_deferrable;
+ else
+ return per_cpu_ptr(&tvec_bases, timer_flags & TIMER_CPUMASK);
+}
#else
static inline struct tvec_base *get_target_base(struct tvec_base *base,
- int pinned)
+ int pinned, u32 timer_flags)
{
return this_cpu_ptr(&tvec_bases);
}
+
+static inline void __run_deferrable_timers(void)
+{
+}
+
+static inline void init_timer_deferrable_global(void)
+{
+ /*
+ * initialize cpu unbound deferrable timer base only when CONFIG_SMP.
+ * UP kernel handles the timers with cpu 0 timer base.
+ */
+}
+
+static inline struct tvec_base *get_timer_base(u32 timer_flags)
+{
+ return per_cpu_ptr(&tvec_bases, timer_flags & TIMER_CPUMASK);
+}
#endif
static unsigned long round_jiffies_common(unsigned long j, int cpu,
@@ -768,7 +815,7 @@ static struct tvec_base *lock_timer_base(struct timer_list *timer,
struct tvec_base *base;
if (!(tf & TIMER_MIGRATING)) {
- base = per_cpu_ptr(&tvec_bases, tf & TIMER_CPUMASK);
+ base = get_timer_base(tf);
spin_lock_irqsave(&base->lock, *flags);
if (timer->flags == tf)
return base;
@@ -797,7 +844,7 @@ __mod_timer(struct timer_list *timer, unsigned long expires,
debug_activate(timer, expires);
- new_base = get_target_base(base, pinned);
+ new_base = get_target_base(base, pinned, timer->flags);
if (base != new_base) {
/*
@@ -819,6 +866,10 @@ __mod_timer(struct timer_list *timer, unsigned long expires,
}
}
+ if (pinned == TIMER_PINNED)
+ timer->flags |= TIMER_PINNED_ON_CPU;
+ else
+ timer->flags &= ~TIMER_PINNED_ON_CPU;
timer->expires = expires;
internal_add_timer(base, timer);
@@ -1000,6 +1051,7 @@ void add_timer_on(struct timer_list *timer, int cpu)
(timer->flags & ~TIMER_BASEMASK) | cpu);
}
+ timer->flags |= TIMER_PINNED_ON_CPU;
debug_activate(timer, timer->expires);
internal_add_timer(base, timer);
spin_unlock_irqrestore(&base->lock, flags);
@@ -1433,6 +1485,8 @@ static void run_timer_softirq(struct softirq_action *h)
{
struct tvec_base *base = this_cpu_ptr(&tvec_bases);
+ __run_deferrable_timers();
+
if (time_after_eq(jiffies, base->timer_jiffies))
__run_timers(base);
}
@@ -1656,6 +1710,8 @@ static void __init init_timer_cpus(void)
for_each_possible_cpu(cpu)
init_timer_cpu(cpu);
+
+ init_timer_deferrable_global();
}
void __init init_timers(void)
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
index 5f5b66a2f156..048bf074bef9 100644
--- a/kernel/trace/Kconfig
+++ b/kernel/trace/Kconfig
@@ -89,6 +89,31 @@ config RING_BUFFER_ALLOW_SWAP
Allow the use of ring_buffer_swap_cpu.
Adds a very slight overhead to tracing when enabled.
+config IPC_LOGGING
+ bool "Debug Logging for IPC Drivers"
+ select GENERIC_TRACER
+ help
+ This option allows the debug logging for IPC Drivers.
+
+ If in doubt, say no.
+
+config QCOM_RTB
+ bool "Register tracing"
+ help
+ Add support for logging different events to a small uncached
+ region. This is designed to aid in debugging reset cases where the
+ caches may not be flushed before the target resets.
+
+config QCOM_RTB_SEPARATE_CPUS
+ bool "Separate entries for each cpu"
+ depends on QCOM_RTB
+ depends on SMP
+ help
+ Under some circumstances, it may be beneficial to give dedicated space
+ for each cpu to log accesses. Selecting this option will log each cpu
+ separately. This will guarantee that the last acesses for each cpu
+ will be logged but there will be fewer entries per cpu
+
# All tracer options should select GENERIC_TRACER. For those options that are
# enabled by all tracers (context switch and event tracer) they select TRACING.
# This allows those options to appear when no other tracer is selected. But the
@@ -488,6 +513,19 @@ config FUNCTION_PROFILER
If in doubt, say N.
+config CPU_FREQ_SWITCH_PROFILER
+ bool "CPU frequency switch time profiler"
+ select GENERIC_TRACER
+ help
+ This option enables the CPU frequency switch profiler. A file is
+ created in debugfs called "cpu_freq_switch_profile_enabled", which
+ defaults to zero. When a 1 is echoed into this file, profiling begins.
+ When a zero is echoed, profiling stops. A "cpu_freq_switch" file is
+ also created in the trace_stats directory; this file shows the
+ switches that have occurred and duration statistics.
+
+ If in doubt, say N.
+
config FTRACE_MCOUNT_RECORD
def_bool y
depends on DYNAMIC_FTRACE
diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile
index ba04bf0c2653..2acad4b6a92a 100644
--- a/kernel/trace/Makefile
+++ b/kernel/trace/Makefile
@@ -36,6 +36,7 @@ obj-$(CONFIG_FUNCTION_TRACER) += trace_functions.o
obj-$(CONFIG_IRQSOFF_TRACER) += trace_irqsoff.o
obj-$(CONFIG_PREEMPT_TRACER) += trace_irqsoff.o
obj-$(CONFIG_SCHED_TRACER) += trace_sched_wakeup.o
+obj-$(CONFIG_CPU_FREQ_SWITCH_PROFILER) += trace_cpu_freq_switch.o
obj-$(CONFIG_NOP_TRACER) += trace_nop.o
obj-$(CONFIG_STACK_TRACER) += trace_stack.o
obj-$(CONFIG_MMIOTRACE) += trace_mmiotrace.o
@@ -65,7 +66,12 @@ endif
obj-$(CONFIG_PROBE_EVENTS) += trace_probe.o
obj-$(CONFIG_UPROBE_EVENT) += trace_uprobe.o
obj-$(CONFIG_GPU_TRACEPOINTS) += gpu-traces.o
+obj-$(CONFIG_QCOM_RTB) += msm_rtb.o
obj-$(CONFIG_TRACEPOINT_BENCHMARK) += trace_benchmark.o
+obj-$(CONFIG_IPC_LOGGING) += ipc_logging.o
+ifdef CONFIG_DEBUG_FS
+obj-$(CONFIG_IPC_LOGGING) += ipc_logging_debug.o
+endif
libftrace-y := ftrace.o
diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
index a990824c8604..7b6127653a37 100644
--- a/kernel/trace/blktrace.c
+++ b/kernel/trace/blktrace.c
@@ -199,9 +199,9 @@ static const u32 ddir_act[2] = { BLK_TC_ACT(BLK_TC_READ),
* blk_io_trace structure and places it in a per-cpu subbuffer.
*/
static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes,
- int rw, u32 what, int error, int pdu_len, void *pdu_data)
+ int rw, u32 what, int error, int pdu_len,
+ void *pdu_data, struct task_struct *tsk)
{
- struct task_struct *tsk = current;
struct ring_buffer_event *event = NULL;
struct ring_buffer *buffer = NULL;
struct blk_io_trace *t;
@@ -708,18 +708,33 @@ static void blk_add_trace_rq(struct request_queue *q, struct request *rq,
unsigned int nr_bytes, u32 what)
{
struct blk_trace *bt = q->blk_trace;
+ struct task_struct *tsk = current;
if (likely(!bt))
return;
+ /*
+ * Use the bio context for all events except ISSUE and
+ * COMPLETE events.
+ *
+ * Not all the pages in the bio are dirtied by the same task but
+ * most likely it will be, since the sectors accessed on the device
+ * must be adjacent.
+ */
+ if (!((what == BLK_TA_ISSUE) || (what == BLK_TA_COMPLETE)) &&
+ bio_has_data(rq->bio) && rq->bio->bi_io_vec &&
+ rq->bio->bi_io_vec->bv_page &&
+ rq->bio->bi_io_vec->bv_page->tsk_dirty)
+ tsk = rq->bio->bi_io_vec->bv_page->tsk_dirty;
+
if (rq->cmd_type == REQ_TYPE_BLOCK_PC) {
what |= BLK_TC_ACT(BLK_TC_PC);
__blk_add_trace(bt, 0, nr_bytes, rq->cmd_flags,
- what, rq->errors, rq->cmd_len, rq->cmd);
+ what, rq->errors, rq->cmd_len, rq->cmd, tsk);
} else {
what |= BLK_TC_ACT(BLK_TC_FS);
__blk_add_trace(bt, blk_rq_pos(rq), nr_bytes,
- rq->cmd_flags, what, rq->errors, 0, NULL);
+ rq->cmd_flags, what, rq->errors, 0, NULL, tsk);
}
}
@@ -771,12 +786,22 @@ static void blk_add_trace_bio(struct request_queue *q, struct bio *bio,
u32 what, int error)
{
struct blk_trace *bt = q->blk_trace;
+ struct task_struct *tsk = current;
if (likely(!bt))
return;
+ /*
+ * Not all the pages in the bio are dirtied by the same task but
+ * most likely it will be, since the sectors accessed on the device
+ * must be adjacent.
+ */
+ if (bio_has_data(bio) && bio->bi_io_vec && bio->bi_io_vec->bv_page &&
+ bio->bi_io_vec->bv_page->tsk_dirty)
+ tsk = bio->bi_io_vec->bv_page->tsk_dirty;
+
__blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size,
- bio->bi_rw, what, error, 0, NULL);
+ bio->bi_rw, what, error, 0, NULL, tsk);
}
static void blk_add_trace_bio_bounce(void *ignore,
@@ -824,7 +849,8 @@ static void blk_add_trace_getrq(void *ignore,
struct blk_trace *bt = q->blk_trace;
if (bt)
- __blk_add_trace(bt, 0, 0, rw, BLK_TA_GETRQ, 0, 0, NULL);
+ __blk_add_trace(bt, 0, 0, rw, BLK_TA_GETRQ, 0, 0,
+ NULL, current);
}
}
@@ -840,7 +866,7 @@ static void blk_add_trace_sleeprq(void *ignore,
if (bt)
__blk_add_trace(bt, 0, 0, rw, BLK_TA_SLEEPRQ,
- 0, 0, NULL);
+ 0, 0, NULL, current);
}
}
@@ -849,7 +875,8 @@ static void blk_add_trace_plug(void *ignore, struct request_queue *q)
struct blk_trace *bt = q->blk_trace;
if (bt)
- __blk_add_trace(bt, 0, 0, 0, BLK_TA_PLUG, 0, 0, NULL);
+ __blk_add_trace(bt, 0, 0, 0, BLK_TA_PLUG, 0, 0, NULL,
+ current);
}
static void blk_add_trace_unplug(void *ignore, struct request_queue *q,
@@ -866,7 +893,8 @@ static void blk_add_trace_unplug(void *ignore, struct request_queue *q,
else
what = BLK_TA_UNPLUG_TIMER;
- __blk_add_trace(bt, 0, 0, 0, what, 0, sizeof(rpdu), &rpdu);
+ __blk_add_trace(bt, 0, 0, 0, what, 0, sizeof(rpdu), &rpdu,
+ current);
}
}
@@ -875,13 +903,19 @@ static void blk_add_trace_split(void *ignore,
unsigned int pdu)
{
struct blk_trace *bt = q->blk_trace;
+ struct task_struct *tsk = current;
if (bt) {
__be64 rpdu = cpu_to_be64(pdu);
+ if (bio_has_data(bio) && bio->bi_io_vec &&
+ bio->bi_io_vec->bv_page &&
+ bio->bi_io_vec->bv_page->tsk_dirty)
+ tsk = bio->bi_io_vec->bv_page->tsk_dirty;
+
__blk_add_trace(bt, bio->bi_iter.bi_sector,
bio->bi_iter.bi_size, bio->bi_rw, BLK_TA_SPLIT,
- bio->bi_error, sizeof(rpdu), &rpdu);
+ bio->bi_error, sizeof(rpdu), &rpdu, tsk);
}
}
@@ -904,6 +938,7 @@ static void blk_add_trace_bio_remap(void *ignore,
{
struct blk_trace *bt = q->blk_trace;
struct blk_io_trace_remap r;
+ struct task_struct *tsk = current;
if (likely(!bt))
return;
@@ -912,9 +947,14 @@ static void blk_add_trace_bio_remap(void *ignore,
r.device_to = cpu_to_be32(bio->bi_bdev->bd_dev);
r.sector_from = cpu_to_be64(from);
+ if (bio_has_data(bio) && bio->bi_io_vec &&
+ bio->bi_io_vec->bv_page &&
+ bio->bi_io_vec->bv_page->tsk_dirty)
+ tsk = bio->bi_io_vec->bv_page->tsk_dirty;
+
__blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size,
bio->bi_rw, BLK_TA_REMAP, bio->bi_error,
- sizeof(r), &r);
+ sizeof(r), &r, tsk);
}
/**
@@ -937,6 +977,7 @@ static void blk_add_trace_rq_remap(void *ignore,
{
struct blk_trace *bt = q->blk_trace;
struct blk_io_trace_remap r;
+ struct task_struct *tsk = current;
if (likely(!bt))
return;
@@ -945,9 +986,14 @@ static void blk_add_trace_rq_remap(void *ignore,
r.device_to = cpu_to_be32(disk_devt(rq->rq_disk));
r.sector_from = cpu_to_be64(from);
+ if (bio_has_data(rq->bio) && rq->bio->bi_io_vec &&
+ rq->bio->bi_io_vec->bv_page &&
+ rq->bio->bi_io_vec->bv_page->tsk_dirty)
+ tsk = rq->bio->bi_io_vec->bv_page->tsk_dirty;
+
__blk_add_trace(bt, blk_rq_pos(rq), blk_rq_bytes(rq),
rq_data_dir(rq), BLK_TA_REMAP, !!rq->errors,
- sizeof(r), &r);
+ sizeof(r), &r, tsk);
}
/**
@@ -966,16 +1012,22 @@ void blk_add_driver_data(struct request_queue *q,
void *data, size_t len)
{
struct blk_trace *bt = q->blk_trace;
+ struct task_struct *tsk = current;
if (likely(!bt))
return;
+ if (bio_has_data(rq->bio) && rq->bio->bi_io_vec &&
+ rq->bio->bi_io_vec->bv_page &&
+ rq->bio->bi_io_vec->bv_page->tsk_dirty)
+ tsk = rq->bio->bi_io_vec->bv_page->tsk_dirty;
+
if (rq->cmd_type == REQ_TYPE_BLOCK_PC)
__blk_add_trace(bt, 0, blk_rq_bytes(rq), 0,
- BLK_TA_DRV_DATA, rq->errors, len, data);
+ BLK_TA_DRV_DATA, rq->errors, len, data, tsk);
else
__blk_add_trace(bt, blk_rq_pos(rq), blk_rq_bytes(rq), 0,
- BLK_TA_DRV_DATA, rq->errors, len, data);
+ BLK_TA_DRV_DATA, rq->errors, len, data, tsk);
}
EXPORT_SYMBOL_GPL(blk_add_driver_data);
diff --git a/kernel/trace/ipc_logging.c b/kernel/trace/ipc_logging.c
new file mode 100644
index 000000000000..2c3e0998d400
--- /dev/null
+++ b/kernel/trace/ipc_logging.c
@@ -0,0 +1,876 @@
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <asm/arch_timer.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/jiffies.h>
+#include <linux/debugfs.h>
+#include <linux/io.h>
+#include <linux/idr.h>
+#include <linux/string.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/delay.h>
+#include <linux/completion.h>
+#include <linux/ipc_logging.h>
+
+#include "ipc_logging_private.h"
+
+#define LOG_PAGE_DATA_SIZE sizeof(((struct ipc_log_page *)0)->data)
+#define LOG_PAGE_FLAG (1 << 31)
+
+static LIST_HEAD(ipc_log_context_list);
+static DEFINE_RWLOCK(context_list_lock_lha1);
+static void *get_deserialization_func(struct ipc_log_context *ilctxt,
+ int type);
+
+static struct ipc_log_page *get_first_page(struct ipc_log_context *ilctxt)
+{
+ struct ipc_log_page_header *p_pghdr;
+ struct ipc_log_page *pg = NULL;
+
+ if (!ilctxt)
+ return NULL;
+ p_pghdr = list_first_entry(&ilctxt->page_list,
+ struct ipc_log_page_header, list);
+ pg = container_of(p_pghdr, struct ipc_log_page, hdr);
+ return pg;
+}
+
+/**
+ * is_nd_read_empty - Returns true if no data is available to read in log
+ *
+ * @ilctxt: logging context
+ * @returns: > 1 if context is empty; 0 if not empty; <0 for failure
+ *
+ * This is for the debugfs read pointer which allows for a non-destructive read.
+ * There may still be data in the log, but it may have already been read.
+ */
+static int is_nd_read_empty(struct ipc_log_context *ilctxt)
+{
+ if (!ilctxt)
+ return -EINVAL;
+
+ return ((ilctxt->nd_read_page == ilctxt->write_page) &&
+ (ilctxt->nd_read_page->hdr.nd_read_offset ==
+ ilctxt->write_page->hdr.write_offset));
+}
+
+/**
+ * is_read_empty - Returns true if no data is available in log
+ *
+ * @ilctxt: logging context
+ * @returns: > 1 if context is empty; 0 if not empty; <0 for failure
+ *
+ * This is for the actual log contents. If it is empty, then there
+ * is no data at all in the log.
+ */
+static int is_read_empty(struct ipc_log_context *ilctxt)
+{
+ if (!ilctxt)
+ return -EINVAL;
+
+ return ((ilctxt->read_page == ilctxt->write_page) &&
+ (ilctxt->read_page->hdr.read_offset ==
+ ilctxt->write_page->hdr.write_offset));
+}
+
+/**
+ * is_nd_read_equal_read - Return true if the non-destructive read is equal to
+ * the destructive read
+ *
+ * @ilctxt: logging context
+ * @returns: true if nd read is equal to read; false otherwise
+ */
+static bool is_nd_read_equal_read(struct ipc_log_context *ilctxt)
+{
+ uint16_t read_offset;
+ uint16_t nd_read_offset;
+
+ if (ilctxt->nd_read_page == ilctxt->read_page) {
+ read_offset = ilctxt->read_page->hdr.read_offset;
+ nd_read_offset = ilctxt->nd_read_page->hdr.nd_read_offset;
+
+ if (read_offset == nd_read_offset)
+ return true;
+ }
+
+ return false;
+}
+
+
+static struct ipc_log_page *get_next_page(struct ipc_log_context *ilctxt,
+ struct ipc_log_page *cur_pg)
+{
+ struct ipc_log_page_header *p_pghdr;
+ struct ipc_log_page *pg = NULL;
+
+ if (!ilctxt || !cur_pg)
+ return NULL;
+
+ if (ilctxt->last_page == cur_pg)
+ return ilctxt->first_page;
+
+ p_pghdr = list_first_entry(&cur_pg->hdr.list,
+ struct ipc_log_page_header, list);
+ pg = container_of(p_pghdr, struct ipc_log_page, hdr);
+
+ return pg;
+}
+
+/**
+ * ipc_log_read - do non-destructive read of the log
+ *
+ * @ilctxt: Logging context
+ * @data: Data pointer to receive the data
+ * @data_size: Number of bytes to read (must be <= bytes available in log)
+ *
+ * This read will update a runtime read pointer, but will not affect the actual
+ * contents of the log which allows for reading the logs continuously while
+ * debugging and if the system crashes, then the full logs can still be
+ * extracted.
+ */
+static void ipc_log_read(struct ipc_log_context *ilctxt,
+ void *data, int data_size)
+{
+ int bytes_to_read;
+
+ bytes_to_read = MIN(LOG_PAGE_DATA_SIZE
+ - ilctxt->nd_read_page->hdr.nd_read_offset,
+ data_size);
+
+ memcpy(data, (ilctxt->nd_read_page->data +
+ ilctxt->nd_read_page->hdr.nd_read_offset), bytes_to_read);
+
+ if (bytes_to_read != data_size) {
+ /* not enough space, wrap read to next page */
+ ilctxt->nd_read_page->hdr.nd_read_offset = 0;
+ ilctxt->nd_read_page = get_next_page(ilctxt,
+ ilctxt->nd_read_page);
+ BUG_ON(ilctxt->nd_read_page == NULL);
+
+ memcpy((data + bytes_to_read),
+ (ilctxt->nd_read_page->data +
+ ilctxt->nd_read_page->hdr.nd_read_offset),
+ (data_size - bytes_to_read));
+ bytes_to_read = (data_size - bytes_to_read);
+ }
+ ilctxt->nd_read_page->hdr.nd_read_offset += bytes_to_read;
+}
+
+/**
+ * ipc_log_drop - do destructive read of the log
+ *
+ * @ilctxt: Logging context
+ * @data: Data pointer to receive the data (or NULL)
+ * @data_size: Number of bytes to read (must be <= bytes available in log)
+ */
+static void ipc_log_drop(struct ipc_log_context *ilctxt, void *data,
+ int data_size)
+{
+ int bytes_to_read;
+ bool push_nd_read;
+
+ bytes_to_read = MIN(LOG_PAGE_DATA_SIZE
+ - ilctxt->read_page->hdr.read_offset,
+ data_size);
+ if (data)
+ memcpy(data, (ilctxt->read_page->data +
+ ilctxt->read_page->hdr.read_offset), bytes_to_read);
+
+ if (bytes_to_read != data_size) {
+ /* not enough space, wrap read to next page */
+ push_nd_read = is_nd_read_equal_read(ilctxt);
+
+ ilctxt->read_page->hdr.read_offset = 0;
+ if (push_nd_read) {
+ ilctxt->read_page->hdr.nd_read_offset = 0;
+ ilctxt->read_page = get_next_page(ilctxt,
+ ilctxt->read_page);
+ BUG_ON(ilctxt->read_page == NULL);
+ ilctxt->nd_read_page = ilctxt->read_page;
+ } else {
+ ilctxt->read_page = get_next_page(ilctxt,
+ ilctxt->read_page);
+ BUG_ON(ilctxt->read_page == NULL);
+ }
+
+ if (data)
+ memcpy((data + bytes_to_read),
+ (ilctxt->read_page->data +
+ ilctxt->read_page->hdr.read_offset),
+ (data_size - bytes_to_read));
+
+ bytes_to_read = (data_size - bytes_to_read);
+ }
+
+ /* update non-destructive read pointer if necessary */
+ push_nd_read = is_nd_read_equal_read(ilctxt);
+ ilctxt->read_page->hdr.read_offset += bytes_to_read;
+ ilctxt->write_avail += data_size;
+
+ if (push_nd_read)
+ ilctxt->nd_read_page->hdr.nd_read_offset += bytes_to_read;
+}
+
+/**
+ * msg_read - Reads a message.
+ *
+ * If a message is read successfully, then the message context
+ * will be set to:
+ * .hdr message header .size and .type values
+ * .offset beginning of message data
+ *
+ * @ilctxt Logging context
+ * @ectxt Message context
+ *
+ * @returns 0 - no message available; >0 message size; <0 error
+ */
+static int msg_read(struct ipc_log_context *ilctxt,
+ struct encode_context *ectxt)
+{
+ struct tsv_header hdr;
+
+ if (!ectxt)
+ return -EINVAL;
+
+ if (is_nd_read_empty(ilctxt))
+ return 0;
+
+ ipc_log_read(ilctxt, &hdr, sizeof(hdr));
+ ectxt->hdr.type = hdr.type;
+ ectxt->hdr.size = hdr.size;
+ ectxt->offset = sizeof(hdr);
+ ipc_log_read(ilctxt, (ectxt->buff + ectxt->offset),
+ (int)hdr.size);
+
+ return sizeof(hdr) + (int)hdr.size;
+}
+
+/**
+ * msg_drop - Drops a message.
+ *
+ * @ilctxt Logging context
+ */
+static void msg_drop(struct ipc_log_context *ilctxt)
+{
+ struct tsv_header hdr;
+
+ if (!is_read_empty(ilctxt)) {
+ ipc_log_drop(ilctxt, &hdr, sizeof(hdr));
+ ipc_log_drop(ilctxt, NULL, (int)hdr.size);
+ }
+}
+
+/*
+ * Commits messages to the FIFO. If the FIFO is full, then enough
+ * messages are dropped to create space for the new message.
+ */
+void ipc_log_write(void *ctxt, struct encode_context *ectxt)
+{
+ struct ipc_log_context *ilctxt = (struct ipc_log_context *)ctxt;
+ int bytes_to_write;
+ unsigned long flags;
+
+ if (!ilctxt || !ectxt) {
+ pr_err("%s: Invalid ipc_log or encode context\n", __func__);
+ return;
+ }
+
+ read_lock_irqsave(&context_list_lock_lha1, flags);
+ spin_lock(&ilctxt->context_lock_lhb1);
+ while (ilctxt->write_avail <= ectxt->offset)
+ msg_drop(ilctxt);
+
+ bytes_to_write = MIN(LOG_PAGE_DATA_SIZE
+ - ilctxt->write_page->hdr.write_offset,
+ ectxt->offset);
+ memcpy((ilctxt->write_page->data +
+ ilctxt->write_page->hdr.write_offset),
+ ectxt->buff, bytes_to_write);
+
+ if (bytes_to_write != ectxt->offset) {
+ uint64_t t_now = sched_clock();
+
+ ilctxt->write_page->hdr.write_offset += bytes_to_write;
+ ilctxt->write_page->hdr.end_time = t_now;
+
+ ilctxt->write_page = get_next_page(ilctxt, ilctxt->write_page);
+ BUG_ON(ilctxt->write_page == NULL);
+ ilctxt->write_page->hdr.write_offset = 0;
+ ilctxt->write_page->hdr.start_time = t_now;
+ memcpy((ilctxt->write_page->data +
+ ilctxt->write_page->hdr.write_offset),
+ (ectxt->buff + bytes_to_write),
+ (ectxt->offset - bytes_to_write));
+ bytes_to_write = (ectxt->offset - bytes_to_write);
+ }
+ ilctxt->write_page->hdr.write_offset += bytes_to_write;
+ ilctxt->write_avail -= ectxt->offset;
+ complete(&ilctxt->read_avail);
+ spin_unlock(&ilctxt->context_lock_lhb1);
+ read_unlock_irqrestore(&context_list_lock_lha1, flags);
+}
+EXPORT_SYMBOL(ipc_log_write);
+
+/*
+ * Starts a new message after which you can add serialized data and
+ * then complete the message by calling msg_encode_end().
+ */
+void msg_encode_start(struct encode_context *ectxt, uint32_t type)
+{
+ if (!ectxt) {
+ pr_err("%s: Invalid encode context\n", __func__);
+ return;
+ }
+
+ ectxt->hdr.type = type;
+ ectxt->hdr.size = 0;
+ ectxt->offset = sizeof(ectxt->hdr);
+}
+EXPORT_SYMBOL(msg_encode_start);
+
+/*
+ * Completes the message
+ */
+void msg_encode_end(struct encode_context *ectxt)
+{
+ if (!ectxt) {
+ pr_err("%s: Invalid encode context\n", __func__);
+ return;
+ }
+
+ /* finalize data size */
+ ectxt->hdr.size = ectxt->offset - sizeof(ectxt->hdr);
+ BUG_ON(ectxt->hdr.size > MAX_MSG_SIZE);
+ memcpy(ectxt->buff, &ectxt->hdr, sizeof(ectxt->hdr));
+}
+EXPORT_SYMBOL(msg_encode_end);
+
+/*
+ * Helper funtion used to write data to a message context.
+ *
+ * @ectxt context initialized by calling msg_encode_start()
+ * @data data to write
+ * @size number of bytes of data to write
+ */
+static inline int tsv_write_data(struct encode_context *ectxt,
+ void *data, uint32_t size)
+{
+ if (!ectxt) {
+ pr_err("%s: Invalid encode context\n", __func__);
+ return -EINVAL;
+ }
+ if ((ectxt->offset + size) > MAX_MSG_SIZE) {
+ pr_err("%s: No space to encode further\n", __func__);
+ return -EINVAL;
+ }
+
+ memcpy((void *)(ectxt->buff + ectxt->offset), data, size);
+ ectxt->offset += size;
+ return 0;
+}
+
+/*
+ * Helper function that writes a type to the context.
+ *
+ * @ectxt context initialized by calling msg_encode_start()
+ * @type primitive type
+ * @size size of primitive in bytes
+ */
+static inline int tsv_write_header(struct encode_context *ectxt,
+ uint32_t type, uint32_t size)
+{
+ struct tsv_header hdr;
+
+ hdr.type = (unsigned char)type;
+ hdr.size = (unsigned char)size;
+ return tsv_write_data(ectxt, &hdr, sizeof(hdr));
+}
+
+/*
+ * Writes the current timestamp count.
+ *
+ * @ectxt context initialized by calling msg_encode_start()
+ */
+int tsv_timestamp_write(struct encode_context *ectxt)
+{
+ int ret;
+ uint64_t t_now = sched_clock();
+
+ ret = tsv_write_header(ectxt, TSV_TYPE_TIMESTAMP, sizeof(t_now));
+ if (ret)
+ return ret;
+ return tsv_write_data(ectxt, &t_now, sizeof(t_now));
+}
+EXPORT_SYMBOL(tsv_timestamp_write);
+
+/*
+ * Writes the current QTimer timestamp count.
+ *
+ * @ectxt context initialized by calling msg_encode_start()
+ */
+int tsv_qtimer_write(struct encode_context *ectxt)
+{
+ int ret;
+ uint64_t t_now = arch_counter_get_cntvct();
+
+ ret = tsv_write_header(ectxt, TSV_TYPE_QTIMER, sizeof(t_now));
+ if (ret)
+ return ret;
+ return tsv_write_data(ectxt, &t_now, sizeof(t_now));
+}
+EXPORT_SYMBOL(tsv_qtimer_write);
+
+/*
+ * Writes a data pointer.
+ *
+ * @ectxt context initialized by calling msg_encode_start()
+ * @pointer pointer value to write
+ */
+int tsv_pointer_write(struct encode_context *ectxt, void *pointer)
+{
+ int ret;
+ ret = tsv_write_header(ectxt, TSV_TYPE_POINTER, sizeof(pointer));
+ if (ret)
+ return ret;
+ return tsv_write_data(ectxt, &pointer, sizeof(pointer));
+}
+EXPORT_SYMBOL(tsv_pointer_write);
+
+/*
+ * Writes a 32-bit integer value.
+ *
+ * @ectxt context initialized by calling msg_encode_start()
+ * @n integer to write
+ */
+int tsv_int32_write(struct encode_context *ectxt, int32_t n)
+{
+ int ret;
+ ret = tsv_write_header(ectxt, TSV_TYPE_INT32, sizeof(n));
+ if (ret)
+ return ret;
+ return tsv_write_data(ectxt, &n, sizeof(n));
+}
+EXPORT_SYMBOL(tsv_int32_write);
+
+/*
+ * Writes a byte array.
+ *
+ * @ectxt context initialized by calling msg_write_start()
+ * @data Beginning address of data
+ * @data_size Size of data to be written
+ */
+int tsv_byte_array_write(struct encode_context *ectxt,
+ void *data, int data_size)
+{
+ int ret;
+ ret = tsv_write_header(ectxt, TSV_TYPE_BYTE_ARRAY, data_size);
+ if (ret)
+ return ret;
+ return tsv_write_data(ectxt, data, data_size);
+}
+EXPORT_SYMBOL(tsv_byte_array_write);
+
+/*
+ * Helper function to log a string
+ *
+ * @ilctxt ipc_log_context created using ipc_log_context_create()
+ * @fmt Data specified using format specifiers
+ */
+int ipc_log_string(void *ilctxt, const char *fmt, ...)
+{
+ struct encode_context ectxt;
+ int avail_size, data_size, hdr_size = sizeof(struct tsv_header);
+ va_list arg_list;
+
+ if (!ilctxt)
+ return -EINVAL;
+
+ msg_encode_start(&ectxt, TSV_TYPE_STRING);
+ tsv_timestamp_write(&ectxt);
+ tsv_qtimer_write(&ectxt);
+ avail_size = (MAX_MSG_SIZE - (ectxt.offset + hdr_size));
+ va_start(arg_list, fmt);
+ data_size = vsnprintf((ectxt.buff + ectxt.offset + hdr_size),
+ avail_size, fmt, arg_list);
+ va_end(arg_list);
+ tsv_write_header(&ectxt, TSV_TYPE_BYTE_ARRAY, data_size);
+ ectxt.offset += data_size;
+ msg_encode_end(&ectxt);
+ ipc_log_write(ilctxt, &ectxt);
+ return 0;
+}
+EXPORT_SYMBOL(ipc_log_string);
+
+/**
+ * ipc_log_extract - Reads and deserializes log
+ *
+ * @ctxt: logging context
+ * @buff: buffer to receive the data
+ * @size: size of the buffer
+ * @returns: 0 if no data read; >0 number of bytes read; < 0 error
+ *
+ * If no data is available to be read, then the ilctxt::read_avail
+ * completion is reinitialized. This allows clients to block
+ * until new log data is save.
+ */
+int ipc_log_extract(void *ctxt, char *buff, int size)
+{
+ struct encode_context ectxt;
+ struct decode_context dctxt;
+ void (*deserialize_func)(struct encode_context *ectxt,
+ struct decode_context *dctxt);
+ struct ipc_log_context *ilctxt = (struct ipc_log_context *)ctxt;
+ unsigned long flags;
+
+ if (size < MAX_MSG_DECODED_SIZE)
+ return -EINVAL;
+
+ dctxt.output_format = OUTPUT_DEBUGFS;
+ dctxt.buff = buff;
+ dctxt.size = size;
+ read_lock_irqsave(&context_list_lock_lha1, flags);
+ spin_lock(&ilctxt->context_lock_lhb1);
+ while (dctxt.size >= MAX_MSG_DECODED_SIZE &&
+ !is_nd_read_empty(ilctxt)) {
+ msg_read(ilctxt, &ectxt);
+ deserialize_func = get_deserialization_func(ilctxt,
+ ectxt.hdr.type);
+ spin_unlock(&ilctxt->context_lock_lhb1);
+ read_unlock_irqrestore(&context_list_lock_lha1, flags);
+ if (deserialize_func)
+ deserialize_func(&ectxt, &dctxt);
+ else
+ pr_err("%s: unknown message 0x%x\n",
+ __func__, ectxt.hdr.type);
+ read_lock_irqsave(&context_list_lock_lha1, flags);
+ spin_lock(&ilctxt->context_lock_lhb1);
+ }
+ if ((size - dctxt.size) == 0)
+ reinit_completion(&ilctxt->read_avail);
+ spin_unlock(&ilctxt->context_lock_lhb1);
+ read_unlock_irqrestore(&context_list_lock_lha1, flags);
+ return size - dctxt.size;
+}
+EXPORT_SYMBOL(ipc_log_extract);
+
+/*
+ * Helper funtion used to read data from a message context.
+ *
+ * @ectxt context initialized by calling msg_read()
+ * @data data to read
+ * @size number of bytes of data to read
+ */
+static void tsv_read_data(struct encode_context *ectxt,
+ void *data, uint32_t size)
+{
+ BUG_ON((ectxt->offset + size) > MAX_MSG_SIZE);
+ memcpy(data, (ectxt->buff + ectxt->offset), size);
+ ectxt->offset += size;
+}
+
+/*
+ * Helper function that reads a type from the context and updates the
+ * context pointers.
+ *
+ * @ectxt context initialized by calling msg_read()
+ * @hdr type header
+ */
+static void tsv_read_header(struct encode_context *ectxt,
+ struct tsv_header *hdr)
+{
+ BUG_ON((ectxt->offset + sizeof(*hdr)) > MAX_MSG_SIZE);
+ memcpy(hdr, (ectxt->buff + ectxt->offset), sizeof(*hdr));
+ ectxt->offset += sizeof(*hdr);
+}
+
+/*
+ * Reads a timestamp.
+ *
+ * @ectxt context initialized by calling msg_read()
+ * @dctxt deserialization context
+ * @format output format (appended to %6u.09u timestamp format)
+ */
+void tsv_timestamp_read(struct encode_context *ectxt,
+ struct decode_context *dctxt, const char *format)
+{
+ struct tsv_header hdr;
+ uint64_t val;
+ unsigned long nanosec_rem;
+
+ tsv_read_header(ectxt, &hdr);
+ BUG_ON(hdr.type != TSV_TYPE_TIMESTAMP);
+ tsv_read_data(ectxt, &val, sizeof(val));
+ nanosec_rem = do_div(val, 1000000000U);
+ IPC_SPRINTF_DECODE(dctxt, "[%6u.%09lu%s/",
+ (unsigned)val, nanosec_rem, format);
+}
+EXPORT_SYMBOL(tsv_timestamp_read);
+
+/*
+ * Reads a QTimer timestamp.
+ *
+ * @ectxt context initialized by calling msg_read()
+ * @dctxt deserialization context
+ * @format output format (appended to %#18llx timestamp format)
+ */
+void tsv_qtimer_read(struct encode_context *ectxt,
+ struct decode_context *dctxt, const char *format)
+{
+ struct tsv_header hdr;
+ uint64_t val;
+
+ tsv_read_header(ectxt, &hdr);
+ BUG_ON(hdr.type != TSV_TYPE_QTIMER);
+ tsv_read_data(ectxt, &val, sizeof(val));
+
+ /*
+ * This gives 16 hex digits of output. The # prefix prepends
+ * a 0x, and these characters count as part of the number.
+ */
+ IPC_SPRINTF_DECODE(dctxt, "%#18llx]%s", val, format);
+}
+EXPORT_SYMBOL(tsv_qtimer_read);
+
+/*
+ * Reads a data pointer.
+ *
+ * @ectxt context initialized by calling msg_read()
+ * @dctxt deserialization context
+ * @format output format
+ */
+void tsv_pointer_read(struct encode_context *ectxt,
+ struct decode_context *dctxt, const char *format)
+{
+ struct tsv_header hdr;
+ void *val;
+
+ tsv_read_header(ectxt, &hdr);
+ BUG_ON(hdr.type != TSV_TYPE_POINTER);
+ tsv_read_data(ectxt, &val, sizeof(val));
+
+ IPC_SPRINTF_DECODE(dctxt, format, val);
+}
+EXPORT_SYMBOL(tsv_pointer_read);
+
+/*
+ * Reads a 32-bit integer value.
+ *
+ * @ectxt context initialized by calling msg_read()
+ * @dctxt deserialization context
+ * @format output format
+ */
+int32_t tsv_int32_read(struct encode_context *ectxt,
+ struct decode_context *dctxt, const char *format)
+{
+ struct tsv_header hdr;
+ int32_t val;
+
+ tsv_read_header(ectxt, &hdr);
+ BUG_ON(hdr.type != TSV_TYPE_INT32);
+ tsv_read_data(ectxt, &val, sizeof(val));
+
+ IPC_SPRINTF_DECODE(dctxt, format, val);
+ return val;
+}
+EXPORT_SYMBOL(tsv_int32_read);
+
+/*
+ * Reads a byte array/string.
+ *
+ * @ectxt context initialized by calling msg_read()
+ * @dctxt deserialization context
+ * @format output format
+ */
+void tsv_byte_array_read(struct encode_context *ectxt,
+ struct decode_context *dctxt, const char *format)
+{
+ struct tsv_header hdr;
+
+ tsv_read_header(ectxt, &hdr);
+ BUG_ON(hdr.type != TSV_TYPE_BYTE_ARRAY);
+ tsv_read_data(ectxt, dctxt->buff, hdr.size);
+ dctxt->buff += hdr.size;
+ dctxt->size -= hdr.size;
+}
+EXPORT_SYMBOL(tsv_byte_array_read);
+
+int add_deserialization_func(void *ctxt, int type,
+ void (*dfunc)(struct encode_context *,
+ struct decode_context *))
+{
+ struct ipc_log_context *ilctxt = (struct ipc_log_context *)ctxt;
+ struct dfunc_info *df_info;
+ unsigned long flags;
+
+ if (!ilctxt || !dfunc)
+ return -EINVAL;
+
+ df_info = kmalloc(sizeof(struct dfunc_info), GFP_KERNEL);
+ if (!df_info)
+ return -ENOSPC;
+
+ read_lock_irqsave(&context_list_lock_lha1, flags);
+ spin_lock(&ilctxt->context_lock_lhb1);
+ df_info->type = type;
+ df_info->dfunc = dfunc;
+ list_add_tail(&df_info->list, &ilctxt->dfunc_info_list);
+ spin_unlock(&ilctxt->context_lock_lhb1);
+ read_unlock_irqrestore(&context_list_lock_lha1, flags);
+ return 0;
+}
+EXPORT_SYMBOL(add_deserialization_func);
+
+static void *get_deserialization_func(struct ipc_log_context *ilctxt,
+ int type)
+{
+ struct dfunc_info *df_info = NULL;
+
+ if (!ilctxt)
+ return NULL;
+
+ list_for_each_entry(df_info, &ilctxt->dfunc_info_list, list) {
+ if (df_info->type == type)
+ return df_info->dfunc;
+ }
+ return NULL;
+}
+
+/**
+ * ipc_log_context_create: Create a debug log context
+ * Should not be called from atomic context
+ *
+ * @max_num_pages: Number of pages of logging space required (max. 10)
+ * @mod_name : Name of the directory entry under DEBUGFS
+ * @user_version : Version number of user-defined message formats
+ *
+ * returns context id on success, NULL on failure
+ */
+void *ipc_log_context_create(int max_num_pages,
+ const char *mod_name, uint16_t user_version)
+{
+ struct ipc_log_context *ctxt;
+ struct ipc_log_page *pg = NULL;
+ int page_cnt;
+ unsigned long flags;
+
+ ctxt = kzalloc(sizeof(struct ipc_log_context), GFP_KERNEL);
+ if (!ctxt) {
+ pr_err("%s: cannot create ipc_log_context\n", __func__);
+ return 0;
+ }
+
+ init_completion(&ctxt->read_avail);
+ INIT_LIST_HEAD(&ctxt->page_list);
+ INIT_LIST_HEAD(&ctxt->dfunc_info_list);
+ spin_lock_init(&ctxt->context_lock_lhb1);
+ for (page_cnt = 0; page_cnt < max_num_pages; page_cnt++) {
+ pg = kzalloc(sizeof(struct ipc_log_page), GFP_KERNEL);
+ if (!pg) {
+ pr_err("%s: cannot create ipc_log_page\n", __func__);
+ goto release_ipc_log_context;
+ }
+ pg->hdr.log_id = (uint64_t)(uintptr_t)ctxt;
+ pg->hdr.page_num = LOG_PAGE_FLAG | page_cnt;
+ pg->hdr.ctx_offset = (int64_t)((uint64_t)(uintptr_t)ctxt -
+ (uint64_t)(uintptr_t)&pg->hdr);
+
+ /* set magic last to signal that page init is complete */
+ pg->hdr.magic = IPC_LOGGING_MAGIC_NUM;
+ pg->hdr.nmagic = ~(IPC_LOGGING_MAGIC_NUM);
+
+ spin_lock_irqsave(&ctxt->context_lock_lhb1, flags);
+ list_add_tail(&pg->hdr.list, &ctxt->page_list);
+ spin_unlock_irqrestore(&ctxt->context_lock_lhb1, flags);
+ }
+
+ ctxt->log_id = (uint64_t)(uintptr_t)ctxt;
+ ctxt->version = IPC_LOG_VERSION;
+ strlcpy(ctxt->name, mod_name, IPC_LOG_MAX_CONTEXT_NAME_LEN);
+ ctxt->user_version = user_version;
+ ctxt->first_page = get_first_page(ctxt);
+ ctxt->last_page = pg;
+ ctxt->write_page = ctxt->first_page;
+ ctxt->read_page = ctxt->first_page;
+ ctxt->nd_read_page = ctxt->first_page;
+ ctxt->write_avail = max_num_pages * LOG_PAGE_DATA_SIZE;
+ ctxt->header_size = sizeof(struct ipc_log_page_header);
+ create_ctx_debugfs(ctxt, mod_name);
+
+ /* set magic last to signal context init is complete */
+ ctxt->magic = IPC_LOG_CONTEXT_MAGIC_NUM;
+ ctxt->nmagic = ~(IPC_LOG_CONTEXT_MAGIC_NUM);
+
+ write_lock_irqsave(&context_list_lock_lha1, flags);
+ list_add_tail(&ctxt->list, &ipc_log_context_list);
+ write_unlock_irqrestore(&context_list_lock_lha1, flags);
+ return (void *)ctxt;
+
+release_ipc_log_context:
+ while (page_cnt-- > 0) {
+ pg = get_first_page(ctxt);
+ list_del(&pg->hdr.list);
+ kfree(pg);
+ }
+ kfree(ctxt);
+ return 0;
+}
+EXPORT_SYMBOL(ipc_log_context_create);
+
+/*
+ * Destroy debug log context
+ *
+ * @ctxt: debug log context created by calling ipc_log_context_create API.
+ */
+int ipc_log_context_destroy(void *ctxt)
+{
+ struct ipc_log_context *ilctxt = (struct ipc_log_context *)ctxt;
+ struct ipc_log_page *pg = NULL;
+ unsigned long flags;
+
+ if (!ilctxt)
+ return 0;
+
+ while (!list_empty(&ilctxt->page_list)) {
+ pg = get_first_page(ctxt);
+ list_del(&pg->hdr.list);
+ kfree(pg);
+ }
+
+ write_lock_irqsave(&context_list_lock_lha1, flags);
+ list_del(&ilctxt->list);
+ write_unlock_irqrestore(&context_list_lock_lha1, flags);
+
+ debugfs_remove_recursive(ilctxt->dent);
+
+ kfree(ilctxt);
+ return 0;
+}
+EXPORT_SYMBOL(ipc_log_context_destroy);
+
+static int __init ipc_logging_init(void)
+{
+ check_and_create_debugfs();
+ return 0;
+}
+
+module_init(ipc_logging_init);
+
+MODULE_DESCRIPTION("ipc logging");
+MODULE_LICENSE("GPL v2");
diff --git a/kernel/trace/ipc_logging_debug.c b/kernel/trace/ipc_logging_debug.c
new file mode 100644
index 000000000000..a54538798f2b
--- /dev/null
+++ b/kernel/trace/ipc_logging_debug.c
@@ -0,0 +1,184 @@
+/* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/jiffies.h>
+#include <linux/debugfs.h>
+#include <linux/io.h>
+#include <linux/idr.h>
+#include <linux/string.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/delay.h>
+#include <linux/completion.h>
+#include <linux/ipc_logging.h>
+
+#include "ipc_logging_private.h"
+
+static DEFINE_MUTEX(ipc_log_debugfs_init_lock);
+static struct dentry *root_dent;
+
+static int debug_log(struct ipc_log_context *ilctxt,
+ char *buff, int size, int cont)
+{
+ int i = 0;
+ int ret;
+
+ if (size < MAX_MSG_DECODED_SIZE) {
+ pr_err("%s: buffer size %d < %d\n", __func__, size,
+ MAX_MSG_DECODED_SIZE);
+ return -ENOMEM;
+ }
+ do {
+ i = ipc_log_extract(ilctxt, buff, size - 1);
+ if (cont && i == 0) {
+ ret = wait_for_completion_interruptible(
+ &ilctxt->read_avail);
+ if (ret < 0)
+ return ret;
+ }
+ } while (cont && i == 0);
+
+ return i;
+}
+
+/*
+ * VFS Read operation helper which dispatches the call to the debugfs
+ * read command stored in file->private_data.
+ *
+ * @file File structure
+ * @buff user buffer
+ * @count size of user buffer
+ * @ppos file position to read from (only a value of 0 is accepted)
+ * @cont 1 = continuous mode (don't return 0 to signal end-of-file)
+ *
+ * @returns ==0 end of file
+ * >0 number of bytes read
+ * <0 error
+ */
+static ssize_t debug_read_helper(struct file *file, char __user *buff,
+ size_t count, loff_t *ppos, int cont)
+{
+ struct ipc_log_context *ilctxt = file->private_data;
+ char *buffer;
+ int bsize;
+
+ buffer = kmalloc(count, GFP_KERNEL);
+ if (!buffer)
+ return -ENOMEM;
+
+ bsize = debug_log(ilctxt, buffer, count, cont);
+ if (bsize > 0) {
+ if (copy_to_user(buff, buffer, bsize)) {
+ kfree(buffer);
+ return -EFAULT;
+ }
+ *ppos += bsize;
+ }
+ kfree(buffer);
+ return bsize;
+}
+
+static ssize_t debug_read(struct file *file, char __user *buff,
+ size_t count, loff_t *ppos)
+{
+ return debug_read_helper(file, buff, count, ppos, 0);
+}
+
+static ssize_t debug_read_cont(struct file *file, char __user *buff,
+ size_t count, loff_t *ppos)
+{
+ return debug_read_helper(file, buff, count, ppos, 1);
+}
+
+static int debug_open(struct inode *inode, struct file *file)
+{
+ file->private_data = inode->i_private;
+ return 0;
+}
+
+static const struct file_operations debug_ops = {
+ .read = debug_read,
+ .open = debug_open,
+};
+
+static const struct file_operations debug_ops_cont = {
+ .read = debug_read_cont,
+ .open = debug_open,
+};
+
+static void debug_create(const char *name, mode_t mode,
+ struct dentry *dent,
+ struct ipc_log_context *ilctxt,
+ const struct file_operations *fops)
+{
+ debugfs_create_file(name, mode, dent, ilctxt, fops);
+}
+
+static void dfunc_string(struct encode_context *ectxt,
+ struct decode_context *dctxt)
+{
+ tsv_timestamp_read(ectxt, dctxt, "");
+ tsv_qtimer_read(ectxt, dctxt, " ");
+ tsv_byte_array_read(ectxt, dctxt, "");
+
+ /* add trailing \n if necessary */
+ if (*(dctxt->buff - 1) != '\n') {
+ if (dctxt->size) {
+ ++dctxt->buff;
+ --dctxt->size;
+ }
+ *(dctxt->buff - 1) = '\n';
+ }
+}
+
+void check_and_create_debugfs(void)
+{
+ mutex_lock(&ipc_log_debugfs_init_lock);
+ if (!root_dent) {
+ root_dent = debugfs_create_dir("ipc_logging", 0);
+
+ if (IS_ERR(root_dent)) {
+ pr_err("%s: unable to create debugfs %ld\n",
+ __func__, PTR_ERR(root_dent));
+ root_dent = NULL;
+ }
+ }
+ mutex_unlock(&ipc_log_debugfs_init_lock);
+}
+EXPORT_SYMBOL(check_and_create_debugfs);
+
+void create_ctx_debugfs(struct ipc_log_context *ctxt,
+ const char *mod_name)
+{
+ if (!root_dent)
+ check_and_create_debugfs();
+
+ if (root_dent) {
+ ctxt->dent = debugfs_create_dir(mod_name, root_dent);
+ if (!IS_ERR(ctxt->dent)) {
+ debug_create("log", 0444, ctxt->dent,
+ ctxt, &debug_ops);
+ debug_create("log_cont", 0444, ctxt->dent,
+ ctxt, &debug_ops_cont);
+ }
+ }
+ add_deserialization_func((void *)ctxt,
+ TSV_TYPE_STRING, dfunc_string);
+}
+EXPORT_SYMBOL(create_ctx_debugfs);
diff --git a/kernel/trace/ipc_logging_private.h b/kernel/trace/ipc_logging_private.h
new file mode 100644
index 000000000000..3ac950695086
--- /dev/null
+++ b/kernel/trace/ipc_logging_private.h
@@ -0,0 +1,165 @@
+/* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef _IPC_LOGGING_PRIVATE_H
+#define _IPC_LOGGING_PRIVATE_H
+
+#include <linux/ipc_logging.h>
+
+#define IPC_LOG_VERSION 0x0003
+#define IPC_LOG_MAX_CONTEXT_NAME_LEN 32
+
+/**
+ * struct ipc_log_page_header - Individual log page header
+ *
+ * @magic: Magic number (used for log extraction)
+ * @nmagic: Inverse of magic number (used for log extraction)
+ * @page_num: Index of page (0.. N - 1) (note top bit is always set)
+ * @read_offset: Read offset in page
+ * @write_offset: Write offset in page (or 0xFFFF if full)
+ * @log_id: ID of logging context that owns this page
+ * @start_time: Scheduler clock for first write time in page
+ * @end_time: Scheduler clock for last write time in page
+ * @ctx_offset: Signed offset from page to the logging context. Used to
+ * optimize ram-dump extraction.
+ *
+ * @list: Linked list of pages that make up a log
+ * @nd_read_offset: Non-destructive read offset used for debugfs
+ *
+ * The first part of the structure defines data that is used to extract the
+ * logs from a memory dump and elements in this section should not be changed
+ * or re-ordered. New local data structures can be added to the end of the
+ * structure since they will be ignored by the extraction tool.
+ */
+struct ipc_log_page_header {
+ uint32_t magic;
+ uint32_t nmagic;
+ uint32_t page_num;
+ uint16_t read_offset;
+ uint16_t write_offset;
+ uint64_t log_id;
+ uint64_t start_time;
+ uint64_t end_time;
+ int64_t ctx_offset;
+
+ /* add local data structures after this point */
+ struct list_head list;
+ uint16_t nd_read_offset;
+};
+
+/**
+ * struct ipc_log_page - Individual log page
+ *
+ * @hdr: Log page header
+ * @data: Log data
+ *
+ * Each log consists of 1 to N log pages. Data size is adjusted to always fit
+ * the structure into a single kernel page.
+ */
+struct ipc_log_page {
+ struct ipc_log_page_header hdr;
+ char data[PAGE_SIZE - sizeof(struct ipc_log_page_header)];
+};
+
+/**
+ * struct ipc_log_context - main logging context
+ *
+ * @magic: Magic number (used for log extraction)
+ * @nmagic: Inverse of magic number (used for log extraction)
+ * @version: IPC Logging version of log format
+ * @user_version: Version number for user-defined messages
+ * @header_size: Size of the log header which is used to determine the offset
+ * of ipc_log_page::data
+ * @log_id: Log ID (assigned when log is created)
+ * @name: Name of the log used to uniquely identify the log during extraction
+ *
+ * @list: List of log contexts (struct ipc_log_context)
+ * @page_list: List of log pages (struct ipc_log_page)
+ * @first_page: First page in list of logging pages
+ * @last_page: Last page in list of logging pages
+ * @write_page: Current write page
+ * @read_page: Current read page (for internal reads)
+ * @nd_read_page: Current debugfs extraction page (non-destructive)
+ *
+ * @write_avail: Number of bytes available to write in all pages
+ * @dent: Debugfs node for run-time log extraction
+ * @dfunc_info_list: List of deserialization functions
+ * @context_lock_lhb1: Lock for entire structure
+ * @read_avail: Completed when new data is added to the log
+ */
+struct ipc_log_context {
+ uint32_t magic;
+ uint32_t nmagic;
+ uint32_t version;
+ uint16_t user_version;
+ uint16_t header_size;
+ uint64_t log_id;
+ char name[IPC_LOG_MAX_CONTEXT_NAME_LEN];
+
+ /* add local data structures after this point */
+ struct list_head list;
+ struct list_head page_list;
+ struct ipc_log_page *first_page;
+ struct ipc_log_page *last_page;
+ struct ipc_log_page *write_page;
+ struct ipc_log_page *read_page;
+ struct ipc_log_page *nd_read_page;
+
+ uint32_t write_avail;
+ struct dentry *dent;
+ struct list_head dfunc_info_list;
+ spinlock_t context_lock_lhb1;
+ struct completion read_avail;
+};
+
+struct dfunc_info {
+ struct list_head list;
+ int type;
+ void (*dfunc) (struct encode_context *, struct decode_context *);
+};
+
+enum {
+ TSV_TYPE_INVALID,
+ TSV_TYPE_TIMESTAMP,
+ TSV_TYPE_POINTER,
+ TSV_TYPE_INT32,
+ TSV_TYPE_BYTE_ARRAY,
+ TSV_TYPE_QTIMER,
+};
+
+enum {
+ OUTPUT_DEBUGFS,
+};
+
+#define IPC_LOG_CONTEXT_MAGIC_NUM 0x25874452
+#define IPC_LOGGING_MAGIC_NUM 0x52784425
+#define MIN(x, y) ((x) < (y) ? (x) : (y))
+#define IS_MSG_TYPE(x) (((x) > TSV_TYPE_MSG_START) && \
+ ((x) < TSV_TYPE_MSG_END))
+#define MAX_MSG_DECODED_SIZE (MAX_MSG_SIZE*4)
+
+#if (defined(CONFIG_DEBUG_FS))
+void check_and_create_debugfs(void);
+
+void create_ctx_debugfs(struct ipc_log_context *ctxt,
+ const char *mod_name);
+#else
+void check_and_create_debugfs(void)
+{
+}
+
+void create_ctx_debugfs(struct ipc_log_context *ctxt, const char *mod_name)
+{
+}
+#endif
+
+#endif
diff --git a/kernel/trace/msm_rtb.c b/kernel/trace/msm_rtb.c
new file mode 100644
index 000000000000..ba609d5eb07f
--- /dev/null
+++ b/kernel/trace/msm_rtb.c
@@ -0,0 +1,329 @@
+/*
+ * Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/atomic.h>
+#include <linux/export.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/dma-mapping.h>
+#include <linux/mod_devicetable.h>
+#include <linux/platform_device.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/atomic.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/io.h>
+#include <asm-generic/sizes.h>
+#include <linux/msm_rtb.h>
+
+#define SENTINEL_BYTE_1 0xFF
+#define SENTINEL_BYTE_2 0xAA
+#define SENTINEL_BYTE_3 0xFF
+
+#define RTB_COMPAT_STR "qcom,msm-rtb"
+
+/* Write
+ * 1) 3 bytes sentinel
+ * 2) 1 bytes of log type
+ * 3) 8 bytes of where the caller came from
+ * 4) 4 bytes index
+ * 4) 8 bytes extra data from the caller
+ * 5) 8 bytes of timestamp
+ *
+ * Total = 32 bytes.
+ */
+struct msm_rtb_layout {
+ unsigned char sentinel[3];
+ unsigned char log_type;
+ uint32_t idx;
+ uint64_t caller;
+ uint64_t data;
+ uint64_t timestamp;
+} __attribute__ ((__packed__));
+
+
+struct msm_rtb_state {
+ struct msm_rtb_layout *rtb;
+ phys_addr_t phys;
+ int nentries;
+ int size;
+ int enabled;
+ int initialized;
+ uint32_t filter;
+ int step_size;
+};
+
+#if defined(CONFIG_QCOM_RTB_SEPARATE_CPUS)
+DEFINE_PER_CPU(atomic_t, msm_rtb_idx_cpu);
+#else
+static atomic_t msm_rtb_idx;
+#endif
+
+static struct msm_rtb_state msm_rtb = {
+ .filter = 1 << LOGK_LOGBUF,
+ .enabled = 1,
+};
+
+module_param_named(filter, msm_rtb.filter, uint, 0644);
+module_param_named(enable, msm_rtb.enabled, int, 0644);
+
+static int msm_rtb_panic_notifier(struct notifier_block *this,
+ unsigned long event, void *ptr)
+{
+ msm_rtb.enabled = 0;
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block msm_rtb_panic_blk = {
+ .notifier_call = msm_rtb_panic_notifier,
+ .priority = INT_MAX,
+};
+
+int notrace msm_rtb_event_should_log(enum logk_event_type log_type)
+{
+ return msm_rtb.initialized && msm_rtb.enabled &&
+ ((1 << (log_type & ~LOGTYPE_NOPC)) & msm_rtb.filter);
+}
+EXPORT_SYMBOL(msm_rtb_event_should_log);
+
+static void msm_rtb_emit_sentinel(struct msm_rtb_layout *start)
+{
+ start->sentinel[0] = SENTINEL_BYTE_1;
+ start->sentinel[1] = SENTINEL_BYTE_2;
+ start->sentinel[2] = SENTINEL_BYTE_3;
+}
+
+static void msm_rtb_write_type(enum logk_event_type log_type,
+ struct msm_rtb_layout *start)
+{
+ start->log_type = (char)log_type;
+}
+
+static void msm_rtb_write_caller(uint64_t caller, struct msm_rtb_layout *start)
+{
+ start->caller = caller;
+}
+
+static void msm_rtb_write_idx(uint32_t idx,
+ struct msm_rtb_layout *start)
+{
+ start->idx = idx;
+}
+
+static void msm_rtb_write_data(uint64_t data, struct msm_rtb_layout *start)
+{
+ start->data = data;
+}
+
+static void msm_rtb_write_timestamp(struct msm_rtb_layout *start)
+{
+ start->timestamp = sched_clock();
+}
+
+static void uncached_logk_pc_idx(enum logk_event_type log_type, uint64_t caller,
+ uint64_t data, int idx)
+{
+ struct msm_rtb_layout *start;
+
+ start = &msm_rtb.rtb[idx & (msm_rtb.nentries - 1)];
+
+ msm_rtb_emit_sentinel(start);
+ msm_rtb_write_type(log_type, start);
+ msm_rtb_write_caller(caller, start);
+ msm_rtb_write_idx(idx, start);
+ msm_rtb_write_data(data, start);
+ msm_rtb_write_timestamp(start);
+ mb();
+
+ return;
+}
+
+static void uncached_logk_timestamp(int idx)
+{
+ unsigned long long timestamp;
+
+ timestamp = sched_clock();
+ uncached_logk_pc_idx(LOGK_TIMESTAMP|LOGTYPE_NOPC,
+ (uint64_t)lower_32_bits(timestamp),
+ (uint64_t)upper_32_bits(timestamp), idx);
+}
+
+#if defined(CONFIG_QCOM_RTB_SEPARATE_CPUS)
+static int msm_rtb_get_idx(void)
+{
+ int cpu, i, offset;
+ atomic_t *index;
+
+ /*
+ * ideally we would use get_cpu but this is a close enough
+ * approximation for our purposes.
+ */
+ cpu = raw_smp_processor_id();
+
+ index = &per_cpu(msm_rtb_idx_cpu, cpu);
+
+ i = atomic_add_return(msm_rtb.step_size, index);
+ i -= msm_rtb.step_size;
+
+ /* Check if index has wrapped around */
+ offset = (i & (msm_rtb.nentries - 1)) -
+ ((i - msm_rtb.step_size) & (msm_rtb.nentries - 1));
+ if (offset < 0) {
+ uncached_logk_timestamp(i);
+ i = atomic_add_return(msm_rtb.step_size, index);
+ i -= msm_rtb.step_size;
+ }
+
+ return i;
+}
+#else
+static int msm_rtb_get_idx(void)
+{
+ int i, offset;
+
+ i = atomic_inc_return(&msm_rtb_idx);
+ i--;
+
+ /* Check if index has wrapped around */
+ offset = (i & (msm_rtb.nentries - 1)) -
+ ((i - 1) & (msm_rtb.nentries - 1));
+ if (offset < 0) {
+ uncached_logk_timestamp(i);
+ i = atomic_inc_return(&msm_rtb_idx);
+ i--;
+ }
+
+ return i;
+}
+#endif
+
+int notrace uncached_logk_pc(enum logk_event_type log_type, void *caller,
+ void *data)
+{
+ int i;
+
+ if (!msm_rtb_event_should_log(log_type))
+ return 0;
+
+ i = msm_rtb_get_idx();
+ uncached_logk_pc_idx(log_type, (uint64_t)((unsigned long) caller),
+ (uint64_t)((unsigned long) data), i);
+
+ return 1;
+}
+EXPORT_SYMBOL(uncached_logk_pc);
+
+noinline int notrace uncached_logk(enum logk_event_type log_type, void *data)
+{
+ return uncached_logk_pc(log_type, __builtin_return_address(0), data);
+}
+EXPORT_SYMBOL(uncached_logk);
+
+static int msm_rtb_probe(struct platform_device *pdev)
+{
+ struct msm_rtb_platform_data *d = pdev->dev.platform_data;
+#if defined(CONFIG_QCOM_RTB_SEPARATE_CPUS)
+ unsigned int cpu;
+#endif
+ int ret;
+
+ if (!pdev->dev.of_node) {
+ msm_rtb.size = d->size;
+ } else {
+ u64 size;
+ struct device_node *pnode;
+
+ pnode = of_parse_phandle(pdev->dev.of_node,
+ "linux,contiguous-region", 0);
+ if (pnode != NULL) {
+ const u32 *addr;
+
+ addr = of_get_address(pnode, 0, &size, NULL);
+ if (!addr) {
+ of_node_put(pnode);
+ return -EINVAL;
+ }
+ of_node_put(pnode);
+ } else {
+ ret = of_property_read_u32(pdev->dev.of_node,
+ "qcom,rtb-size",
+ (u32 *)&size);
+ if (ret < 0)
+ return ret;
+
+ }
+
+ msm_rtb.size = size;
+ }
+
+ if (msm_rtb.size <= 0 || msm_rtb.size > SZ_1M)
+ return -EINVAL;
+
+ msm_rtb.rtb = dma_alloc_coherent(&pdev->dev, msm_rtb.size,
+ &msm_rtb.phys,
+ GFP_KERNEL);
+
+ if (!msm_rtb.rtb)
+ return -ENOMEM;
+
+ msm_rtb.nentries = msm_rtb.size / sizeof(struct msm_rtb_layout);
+
+ /* Round this down to a power of 2 */
+ msm_rtb.nentries = __rounddown_pow_of_two(msm_rtb.nentries);
+
+ memset(msm_rtb.rtb, 0, msm_rtb.size);
+
+
+#if defined(CONFIG_QCOM_RTB_SEPARATE_CPUS)
+ for_each_possible_cpu(cpu) {
+ atomic_t *a = &per_cpu(msm_rtb_idx_cpu, cpu);
+ atomic_set(a, cpu);
+ }
+ msm_rtb.step_size = num_possible_cpus();
+#else
+ atomic_set(&msm_rtb_idx, 0);
+ msm_rtb.step_size = 1;
+#endif
+
+ atomic_notifier_chain_register(&panic_notifier_list,
+ &msm_rtb_panic_blk);
+ msm_rtb.initialized = 1;
+ return 0;
+}
+
+static struct of_device_id msm_match_table[] = {
+ {.compatible = RTB_COMPAT_STR},
+ {},
+};
+
+static struct platform_driver msm_rtb_driver = {
+ .driver = {
+ .name = "msm_rtb",
+ .owner = THIS_MODULE,
+ .of_match_table = msm_match_table
+ },
+};
+
+static int __init msm_rtb_init(void)
+{
+ return platform_driver_probe(&msm_rtb_driver, msm_rtb_probe);
+}
+
+static void __exit msm_rtb_exit(void)
+{
+ platform_driver_unregister(&msm_rtb_driver);
+}
+module_init(msm_rtb_init)
+module_exit(msm_rtb_exit)
diff --git a/kernel/trace/power-traces.c b/kernel/trace/power-traces.c
index eb4220a132ec..9270e1ac6460 100644
--- a/kernel/trace/power-traces.c
+++ b/kernel/trace/power-traces.c
@@ -15,4 +15,5 @@
EXPORT_TRACEPOINT_SYMBOL_GPL(suspend_resume);
EXPORT_TRACEPOINT_SYMBOL_GPL(cpu_idle);
-
+EXPORT_TRACEPOINT_SYMBOL(core_ctl_set_busy);
+EXPORT_TRACEPOINT_SYMBOL(core_ctl_eval_need);
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index dd4d86ae8e21..ae68222c5a74 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -41,6 +41,7 @@
#include <linux/nmi.h>
#include <linux/fs.h>
#include <linux/sched/rt.h>
+#include <linux/coresight-stm.h>
#include "trace.h"
#include "trace_output.h"
@@ -573,8 +574,11 @@ int __trace_puts(unsigned long ip, const char *str, int size)
if (entry->buf[size - 1] != '\n') {
entry->buf[size] = '\n';
entry->buf[size + 1] = '\0';
- } else
+ stm_log(OST_ENTITY_TRACE_PRINTK, entry->buf, size + 2);
+ } else {
entry->buf[size] = '\0';
+ stm_log(OST_ENTITY_TRACE_PRINTK, entry->buf, size + 1);
+ }
__buffer_unlock_commit(buffer, event);
ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL);
@@ -615,6 +619,7 @@ int __trace_bputs(unsigned long ip, const char *str)
entry = ring_buffer_event_data(event);
entry->ip = ip;
entry->str = str;
+ stm_log(OST_ENTITY_TRACE_PRINTK, entry->str, strlen(entry->str)+1);
__buffer_unlock_commit(buffer, event);
ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL);
@@ -2240,6 +2245,7 @@ __trace_array_vprintk(struct ring_buffer *buffer,
memcpy(&entry->buf, tbuffer, len + 1);
if (!call_filter_check_discard(call, entry, buffer, event)) {
+ stm_log(OST_ENTITY_TRACE_PRINTK, entry->buf, len + 1);
__buffer_unlock_commit(buffer, event);
ftrace_trace_stack(&global_trace, buffer, flags, 6, pc, NULL);
}
@@ -5268,8 +5274,11 @@ tracing_mark_write(struct file *filp, const char __user *ubuf,
if (entry->buf[cnt - 1] != '\n') {
entry->buf[cnt] = '\n';
entry->buf[cnt + 1] = '\0';
- } else
+ stm_log(OST_ENTITY_TRACE_MARKER, entry->buf, cnt + 2);
+ } else {
entry->buf[cnt] = '\0';
+ stm_log(OST_ENTITY_TRACE_MARKER, entry->buf, cnt + 1);
+ }
__buffer_unlock_commit(buffer, event);
diff --git a/kernel/trace/trace_cpu_freq_switch.c b/kernel/trace/trace_cpu_freq_switch.c
new file mode 100644
index 000000000000..f9dab6c4bb72
--- /dev/null
+++ b/kernel/trace/trace_cpu_freq_switch.c
@@ -0,0 +1,312 @@
+/*
+ * Copyright (c) 2012, 2016 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/percpu.h>
+#include <linux/slab.h>
+#include <linux/rbtree.h>
+#include <linux/hrtimer.h>
+#include <linux/tracefs.h>
+#include <linux/ktime.h>
+#include <trace/events/power.h>
+#include "trace_stat.h"
+#include "trace.h"
+
+struct trans {
+ struct rb_node node;
+ unsigned int cpu;
+ unsigned int start_freq;
+ unsigned int end_freq;
+ unsigned int min_us;
+ unsigned int max_us;
+ ktime_t total_t;
+ unsigned int count;
+};
+static struct rb_root freq_trans_tree = RB_ROOT;
+
+static struct trans *tr_search(struct rb_root *root, unsigned int cpu,
+ unsigned int start_freq, unsigned int end_freq)
+{
+ struct rb_node *node = root->rb_node;
+
+ while (node) {
+ struct trans *tr = container_of(node, struct trans, node);
+
+ if (cpu < tr->cpu)
+ node = node->rb_left;
+ else if (cpu > tr->cpu)
+ node = node->rb_right;
+ else if (start_freq < tr->start_freq)
+ node = node->rb_left;
+ else if (start_freq > tr->start_freq)
+ node = node->rb_right;
+ else if (end_freq < tr->end_freq)
+ node = node->rb_left;
+ else if (end_freq > tr->end_freq)
+ node = node->rb_right;
+ else
+ return tr;
+ }
+ return NULL;
+}
+
+static int tr_insert(struct rb_root *root, struct trans *tr)
+{
+ struct rb_node **new = &(root->rb_node), *parent = NULL;
+
+ while (*new) {
+ struct trans *this = container_of(*new, struct trans, node);
+
+ parent = *new;
+ if (tr->cpu < this->cpu)
+ new = &((*new)->rb_left);
+ else if (tr->cpu > this->cpu)
+ new = &((*new)->rb_right);
+ else if (tr->start_freq < this->start_freq)
+ new = &((*new)->rb_left);
+ else if (tr->start_freq > this->start_freq)
+ new = &((*new)->rb_right);
+ else if (tr->end_freq < this->end_freq)
+ new = &((*new)->rb_left);
+ else if (tr->end_freq > this->end_freq)
+ new = &((*new)->rb_right);
+ else
+ return -EINVAL;
+ }
+
+ rb_link_node(&tr->node, parent, new);
+ rb_insert_color(&tr->node, root);
+
+ return 0;
+}
+
+struct trans_state {
+ spinlock_t lock;
+ unsigned int start_freq;
+ unsigned int end_freq;
+ ktime_t start_t;
+ bool started;
+};
+static DEFINE_PER_CPU(struct trans_state, freq_trans_state);
+
+static DEFINE_SPINLOCK(state_lock);
+
+static void probe_start(void *ignore, unsigned int start_freq,
+ unsigned int end_freq, unsigned int cpu)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&state_lock, flags);
+ per_cpu(freq_trans_state, cpu).start_freq = start_freq;
+ per_cpu(freq_trans_state, cpu).end_freq = end_freq;
+ per_cpu(freq_trans_state, cpu).start_t = ktime_get();
+ per_cpu(freq_trans_state, cpu).started = true;
+ spin_unlock_irqrestore(&state_lock, flags);
+}
+
+static void probe_end(void *ignore, unsigned int cpu)
+{
+ unsigned long flags;
+ struct trans *tr;
+ s64 dur_us;
+ ktime_t dur_t, end_t = ktime_get();
+
+ spin_lock_irqsave(&state_lock, flags);
+
+ if (!per_cpu(freq_trans_state, cpu).started)
+ goto out;
+
+ dur_t = ktime_sub(end_t, per_cpu(freq_trans_state, cpu).start_t);
+ dur_us = ktime_to_us(dur_t);
+
+ tr = tr_search(&freq_trans_tree, cpu,
+ per_cpu(freq_trans_state, cpu).start_freq,
+ per_cpu(freq_trans_state, cpu).end_freq);
+ if (!tr) {
+ tr = kzalloc(sizeof(*tr), GFP_ATOMIC);
+ if (!tr) {
+ WARN_ONCE(1, "CPU frequency trace is now invalid!\n");
+ goto out;
+ }
+
+ tr->start_freq = per_cpu(freq_trans_state, cpu).start_freq;
+ tr->end_freq = per_cpu(freq_trans_state, cpu).end_freq;
+ tr->cpu = cpu;
+ tr->min_us = UINT_MAX;
+ tr_insert(&freq_trans_tree, tr);
+ }
+ tr->total_t = ktime_add(tr->total_t, dur_t);
+ tr->count++;
+
+ if (dur_us > tr->max_us)
+ tr->max_us = dur_us;
+ if (dur_us < tr->min_us)
+ tr->min_us = dur_us;
+
+ per_cpu(freq_trans_state, cpu).started = false;
+out:
+ spin_unlock_irqrestore(&state_lock, flags);
+}
+
+static void *freq_switch_stat_start(struct tracer_stat *trace)
+{
+ struct rb_node *n;
+ unsigned long flags;
+
+ spin_lock_irqsave(&state_lock, flags);
+ n = rb_first(&freq_trans_tree);
+ spin_unlock_irqrestore(&state_lock, flags);
+
+ return n;
+}
+
+static void *freq_switch_stat_next(void *prev, int idx)
+{
+ struct rb_node *n;
+ unsigned long flags;
+
+ spin_lock_irqsave(&state_lock, flags);
+ n = rb_next(prev);
+ spin_unlock_irqrestore(&state_lock, flags);
+
+ return n;
+}
+
+static int freq_switch_stat_show(struct seq_file *s, void *p)
+{
+ unsigned long flags;
+ struct trans *tr = p;
+
+ spin_lock_irqsave(&state_lock, flags);
+ seq_printf(s, "%3d %9d %8d %5d %6lld %6d %6d\n", tr->cpu,
+ tr->start_freq, tr->end_freq, tr->count,
+ div_s64(ktime_to_us(tr->total_t), tr->count),
+ tr->min_us, tr->max_us);
+ spin_unlock_irqrestore(&state_lock, flags);
+
+ return 0;
+}
+
+static void freq_switch_stat_release(void *stat)
+{
+ struct trans *tr = stat;
+ unsigned long flags;
+
+ spin_lock_irqsave(&state_lock, flags);
+ rb_erase(&tr->node, &freq_trans_tree);
+ spin_unlock_irqrestore(&state_lock, flags);
+ kfree(tr);
+}
+
+static int freq_switch_stat_headers(struct seq_file *s)
+{
+ seq_puts(s, "CPU START_KHZ END_KHZ COUNT AVG_US MIN_US MAX_US\n");
+ seq_puts(s, " | | | | | | |\n");
+ return 0;
+}
+
+struct tracer_stat freq_switch_stats __read_mostly = {
+ .name = "cpu_freq_switch",
+ .stat_start = freq_switch_stat_start,
+ .stat_next = freq_switch_stat_next,
+ .stat_show = freq_switch_stat_show,
+ .stat_release = freq_switch_stat_release,
+ .stat_headers = freq_switch_stat_headers
+};
+
+static void trace_freq_switch_disable(void)
+{
+ unregister_stat_tracer(&freq_switch_stats);
+ unregister_trace_cpu_frequency_switch_end(probe_end, NULL);
+ unregister_trace_cpu_frequency_switch_start(probe_start, NULL);
+ pr_info("disabled cpu frequency switch time profiling\n");
+}
+
+static int trace_freq_switch_enable(void)
+{
+ int ret;
+
+ ret = register_trace_cpu_frequency_switch_start(probe_start, NULL);
+ if (ret)
+ goto out;
+
+ ret = register_trace_cpu_frequency_switch_end(probe_end, NULL);
+ if (ret)
+ goto err_register_switch_end;
+
+ ret = register_stat_tracer(&freq_switch_stats);
+ if (ret)
+ goto err_register_stat_tracer;
+
+ pr_info("enabled cpu frequency switch time profiling\n");
+ return 0;
+
+err_register_stat_tracer:
+ unregister_trace_cpu_frequency_switch_end(probe_end, NULL);
+err_register_switch_end:
+ register_trace_cpu_frequency_switch_start(probe_start, NULL);
+out:
+ pr_err("failed to enable cpu frequency switch time profiling\n");
+
+ return ret;
+}
+
+static DEFINE_MUTEX(debugfs_lock);
+static bool trace_freq_switch_enabled;
+
+static int debug_toggle_tracing(void *data, u64 val)
+{
+ int ret = 0;
+
+ mutex_lock(&debugfs_lock);
+
+ if (val == 1 && !trace_freq_switch_enabled)
+ ret = trace_freq_switch_enable();
+ else if (val == 0 && trace_freq_switch_enabled)
+ trace_freq_switch_disable();
+ else if (val > 1)
+ ret = -EINVAL;
+
+ if (!ret)
+ trace_freq_switch_enabled = val;
+
+ mutex_unlock(&debugfs_lock);
+
+ return ret;
+}
+
+static int debug_tracing_state_get(void *data, u64 *val)
+{
+ mutex_lock(&debugfs_lock);
+ *val = trace_freq_switch_enabled;
+ mutex_unlock(&debugfs_lock);
+
+ return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(debug_tracing_state_fops, debug_tracing_state_get,
+ debug_toggle_tracing, "%llu\n");
+
+static int __init trace_freq_switch_init(void)
+{
+ struct dentry *d_tracer = tracing_init_dentry();
+
+ if (IS_ERR(d_tracer))
+ return 0;
+
+ tracefs_create_file("cpu_freq_switch_profile_enabled",
+ S_IRUGO | S_IWUSR, d_tracer, NULL, &debug_tracing_state_fops);
+
+ return 0;
+}
+late_initcall(trace_freq_switch_init);
diff --git a/kernel/trace/trace_event_perf.c b/kernel/trace/trace_event_perf.c
index cc9f7a9319be..731f6484b811 100644
--- a/kernel/trace/trace_event_perf.c
+++ b/kernel/trace/trace_event_perf.c
@@ -256,7 +256,8 @@ int perf_trace_add(struct perf_event *p_event, int flags)
void perf_trace_del(struct perf_event *p_event, int flags)
{
struct trace_event_call *tp_event = p_event->tp_event;
- hlist_del_rcu(&p_event->hlist_entry);
+ if (!hlist_unhashed(&p_event->hlist_entry))
+ hlist_del_rcu(&p_event->hlist_entry);
tp_event->class->reg(tp_event, TRACE_REG_PERF_DEL, p_event);
}
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index d202d991edae..fda3b6e1b3a0 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -287,14 +287,15 @@ static void output_printk(struct trace_event_buffer *fbuffer)
spin_unlock_irqrestore(&tracepoint_iter_lock, flags);
}
-void trace_event_buffer_commit(struct trace_event_buffer *fbuffer)
+void trace_event_buffer_commit(struct trace_event_buffer *fbuffer,
+ unsigned long len)
{
if (tracepoint_printk)
output_printk(fbuffer);
event_trigger_unlock_commit(fbuffer->trace_file, fbuffer->buffer,
fbuffer->event, fbuffer->entry,
- fbuffer->flags, fbuffer->pc);
+ fbuffer->flags, fbuffer->pc, len);
}
EXPORT_SYMBOL_GPL(trace_event_buffer_commit);
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c
index 9d4399b553a3..78f04e4ad829 100644
--- a/kernel/trace/trace_sched_wakeup.c
+++ b/kernel/trace/trace_sched_wakeup.c
@@ -359,7 +359,8 @@ static bool report_latency(struct trace_array *tr, cycle_t delta)
}
static void
-probe_wakeup_migrate_task(void *ignore, struct task_struct *task, int cpu)
+probe_wakeup_migrate_task(void *ignore, struct task_struct *task, int cpu,
+ unsigned int load)
{
if (task != wakeup_task)
return;
diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c
index 0655afbea83f..a01740a98afa 100644
--- a/kernel/trace/trace_syscalls.c
+++ b/kernel/trace/trace_syscalls.c
@@ -336,7 +336,7 @@ static void ftrace_syscall_enter(void *data, struct pt_regs *regs, long id)
syscall_get_arguments(current, regs, 0, sys_data->nb_args, entry->args);
event_trigger_unlock_commit(trace_file, buffer, event, entry,
- irq_flags, pc);
+ irq_flags, pc, 0);
}
static void ftrace_syscall_exit(void *data, struct pt_regs *regs, long ret)
@@ -382,7 +382,7 @@ static void ftrace_syscall_exit(void *data, struct pt_regs *regs, long ret)
entry->ret = syscall_get_return_value(current, regs);
event_trigger_unlock_commit(trace_file, buffer, event, entry,
- irq_flags, pc);
+ irq_flags, pc, 0);
}
static int reg_event_syscall_enter(struct trace_event_file *file,
diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c
index d2f6d0be3503..23515a716748 100644
--- a/kernel/trace/trace_uprobe.c
+++ b/kernel/trace/trace_uprobe.c
@@ -821,7 +821,7 @@ static void __uprobe_trace_func(struct trace_uprobe *tu,
memcpy(data, ucb->buf, tu->tp.size + dsize);
- event_trigger_unlock_commit(trace_file, buffer, event, entry, 0, 0);
+ event_trigger_unlock_commit(trace_file, buffer, event, entry, 0, 0, 0);
}
/* uprobe handler */
diff --git a/kernel/watchdog.c b/kernel/watchdog.c
index 1f1b05f5a94b..029da92fb712 100644
--- a/kernel/watchdog.c
+++ b/kernel/watchdog.c
@@ -20,6 +20,7 @@
#include <linux/smpboot.h>
#include <linux/sched/rt.h>
#include <linux/tick.h>
+#include <linux/workqueue.h>
#include <asm/irq_regs.h>
#include <linux/kvm_para.h>
@@ -119,7 +120,9 @@ static unsigned long soft_lockup_nmi_warn;
#ifdef CONFIG_HARDLOCKUP_DETECTOR
unsigned int __read_mostly hardlockup_panic =
CONFIG_BOOTPARAM_HARDLOCKUP_PANIC_VALUE;
+#ifdef CONFIG_HARDLOCKUP_DETECTOR_NMI
static unsigned long __maybe_unused hardlockup_allcpu_dumped;
+#endif
/*
* We may not want to enable hard lockup detection by default in all cases,
* for example when running the kernel as a guest on a hypervisor. In these
@@ -230,7 +233,15 @@ static void __touch_watchdog(void)
__this_cpu_write(watchdog_touch_ts, get_timestamp());
}
-void touch_softlockup_watchdog(void)
+/**
+ * touch_softlockup_watchdog_sched - touch watchdog on scheduler stalls
+ *
+ * Call when the scheduler may have stalled for legitimate reasons
+ * preventing the watchdog task from executing - e.g. the scheduler
+ * entering idle state. This should only be used for scheduler events.
+ * Use touch_softlockup_watchdog() for everything else.
+ */
+void touch_softlockup_watchdog_sched(void)
{
/*
* Preemption can be enabled. It doesn't matter which CPU's timestamp
@@ -238,6 +249,12 @@ void touch_softlockup_watchdog(void)
*/
raw_cpu_write(watchdog_touch_ts, 0);
}
+
+void touch_softlockup_watchdog(void)
+{
+ touch_softlockup_watchdog_sched();
+ wq_watchdog_touch(raw_smp_processor_id());
+}
EXPORT_SYMBOL(touch_softlockup_watchdog);
void touch_all_softlockup_watchdogs(void)
@@ -251,6 +268,7 @@ void touch_all_softlockup_watchdogs(void)
*/
for_each_watchdog_cpu(cpu)
per_cpu(watchdog_touch_ts, cpu) = 0;
+ wq_watchdog_touch(-1);
}
#ifdef CONFIG_HARDLOCKUP_DETECTOR
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 450c21fd0e6e..ef84d9874d03 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -48,6 +48,7 @@
#include <linux/nodemask.h>
#include <linux/moduleparam.h>
#include <linux/uaccess.h>
+#include <linux/bug.h>
#include "workqueue_internal.h"
@@ -148,6 +149,8 @@ struct worker_pool {
int id; /* I: pool ID */
unsigned int flags; /* X: flags */
+ unsigned long watchdog_ts; /* L: watchdog timestamp */
+
struct list_head worklist; /* L: list of pending works */
int nr_workers; /* L: total number of workers */
@@ -1093,6 +1096,8 @@ static void pwq_activate_delayed_work(struct work_struct *work)
struct pool_workqueue *pwq = get_work_pwq(work);
trace_workqueue_activate_work(work);
+ if (list_empty(&pwq->pool->worklist))
+ pwq->pool->watchdog_ts = jiffies;
move_linked_works(work, &pwq->pool->worklist, NULL);
__clear_bit(WORK_STRUCT_DELAYED_BIT, work_data_bits(work));
pwq->nr_active++;
@@ -1395,6 +1400,8 @@ retry:
trace_workqueue_activate_work(work);
pwq->nr_active++;
worklist = &pwq->pool->worklist;
+ if (list_empty(worklist))
+ pwq->pool->watchdog_ts = jiffies;
} else {
work_flags |= WORK_STRUCT_DELAYED;
worklist = &pwq->delayed_works;
@@ -2052,6 +2059,7 @@ __acquires(&pool->lock)
current->comm, preempt_count(), task_pid_nr(current),
worker->current_func);
debug_show_held_locks(current);
+ BUG_ON(PANIC_CORRUPTION);
dump_stack();
}
@@ -2167,6 +2175,8 @@ recheck:
list_first_entry(&pool->worklist,
struct work_struct, entry);
+ pool->watchdog_ts = jiffies;
+
if (likely(!(*work_data_bits(work) & WORK_STRUCT_LINKED))) {
/* optimization path, not strictly necessary */
process_one_work(worker, work);
@@ -2250,6 +2260,7 @@ repeat:
struct pool_workqueue, mayday_node);
struct worker_pool *pool = pwq->pool;
struct work_struct *work, *n;
+ bool first = true;
__set_current_state(TASK_RUNNING);
list_del_init(&pwq->mayday_node);
@@ -2266,9 +2277,14 @@ repeat:
* process'em.
*/
WARN_ON_ONCE(!list_empty(scheduled));
- list_for_each_entry_safe(work, n, &pool->worklist, entry)
- if (get_work_pwq(work) == pwq)
+ list_for_each_entry_safe(work, n, &pool->worklist, entry) {
+ if (get_work_pwq(work) == pwq) {
+ if (first)
+ pool->watchdog_ts = jiffies;
move_linked_works(work, scheduled, &n);
+ }
+ first = false;
+ }
if (!list_empty(scheduled)) {
process_scheduled_works(rescuer);
@@ -3079,6 +3095,7 @@ static int init_worker_pool(struct worker_pool *pool)
pool->cpu = -1;
pool->node = NUMA_NO_NODE;
pool->flags |= POOL_DISASSOCIATED;
+ pool->watchdog_ts = jiffies;
INIT_LIST_HEAD(&pool->worklist);
INIT_LIST_HEAD(&pool->idle_list);
hash_init(pool->busy_hash);
@@ -4318,7 +4335,9 @@ void show_workqueue_state(void)
pr_info("pool %d:", pool->id);
pr_cont_pool_info(pool);
- pr_cont(" workers=%d", pool->nr_workers);
+ pr_cont(" hung=%us workers=%d",
+ jiffies_to_msecs(jiffies - pool->watchdog_ts) / 1000,
+ pool->nr_workers);
if (pool->manager)
pr_cont(" manager: %d",
task_pid_nr(pool->manager->task));
@@ -5177,6 +5196,154 @@ static void workqueue_sysfs_unregister(struct workqueue_struct *wq)
static void workqueue_sysfs_unregister(struct workqueue_struct *wq) { }
#endif /* CONFIG_SYSFS */
+/*
+ * Workqueue watchdog.
+ *
+ * Stall may be caused by various bugs - missing WQ_MEM_RECLAIM, illegal
+ * flush dependency, a concurrency managed work item which stays RUNNING
+ * indefinitely. Workqueue stalls can be very difficult to debug as the
+ * usual warning mechanisms don't trigger and internal workqueue state is
+ * largely opaque.
+ *
+ * Workqueue watchdog monitors all worker pools periodically and dumps
+ * state if some pools failed to make forward progress for a while where
+ * forward progress is defined as the first item on ->worklist changing.
+ *
+ * This mechanism is controlled through the kernel parameter
+ * "workqueue.watchdog_thresh" which can be updated at runtime through the
+ * corresponding sysfs parameter file.
+ */
+#ifdef CONFIG_WQ_WATCHDOG
+
+static void wq_watchdog_timer_fn(unsigned long data);
+
+static unsigned long wq_watchdog_thresh = 30;
+static struct timer_list wq_watchdog_timer =
+ TIMER_DEFERRED_INITIALIZER(wq_watchdog_timer_fn, 0, 0);
+
+static unsigned long wq_watchdog_touched = INITIAL_JIFFIES;
+static DEFINE_PER_CPU(unsigned long, wq_watchdog_touched_cpu) = INITIAL_JIFFIES;
+
+static void wq_watchdog_reset_touched(void)
+{
+ int cpu;
+
+ wq_watchdog_touched = jiffies;
+ for_each_possible_cpu(cpu)
+ per_cpu(wq_watchdog_touched_cpu, cpu) = jiffies;
+}
+
+static void wq_watchdog_timer_fn(unsigned long data)
+{
+ unsigned long thresh = READ_ONCE(wq_watchdog_thresh) * HZ;
+ bool lockup_detected = false;
+ struct worker_pool *pool;
+ int pi;
+
+ if (!thresh)
+ return;
+
+ rcu_read_lock();
+
+ for_each_pool(pool, pi) {
+ unsigned long pool_ts, touched, ts;
+
+ if (list_empty(&pool->worklist))
+ continue;
+
+ /* get the latest of pool and touched timestamps */
+ pool_ts = READ_ONCE(pool->watchdog_ts);
+ touched = READ_ONCE(wq_watchdog_touched);
+
+ if (time_after(pool_ts, touched))
+ ts = pool_ts;
+ else
+ ts = touched;
+
+ if (pool->cpu >= 0) {
+ unsigned long cpu_touched =
+ READ_ONCE(per_cpu(wq_watchdog_touched_cpu,
+ pool->cpu));
+ if (time_after(cpu_touched, ts))
+ ts = cpu_touched;
+ }
+
+ /* did we stall? */
+ if (time_after(jiffies, ts + thresh)) {
+ lockup_detected = true;
+ pr_emerg("BUG: workqueue lockup - pool");
+ pr_cont_pool_info(pool);
+ pr_cont(" stuck for %us!\n",
+ jiffies_to_msecs(jiffies - pool_ts) / 1000);
+ }
+ }
+
+ rcu_read_unlock();
+
+ if (lockup_detected)
+ show_workqueue_state();
+
+ wq_watchdog_reset_touched();
+ mod_timer(&wq_watchdog_timer, jiffies + thresh);
+}
+
+void wq_watchdog_touch(int cpu)
+{
+ if (cpu >= 0)
+ per_cpu(wq_watchdog_touched_cpu, cpu) = jiffies;
+ else
+ wq_watchdog_touched = jiffies;
+}
+
+static void wq_watchdog_set_thresh(unsigned long thresh)
+{
+ wq_watchdog_thresh = 0;
+ del_timer_sync(&wq_watchdog_timer);
+
+ if (thresh) {
+ wq_watchdog_thresh = thresh;
+ wq_watchdog_reset_touched();
+ mod_timer(&wq_watchdog_timer, jiffies + thresh * HZ);
+ }
+}
+
+static int wq_watchdog_param_set_thresh(const char *val,
+ const struct kernel_param *kp)
+{
+ unsigned long thresh;
+ int ret;
+
+ ret = kstrtoul(val, 0, &thresh);
+ if (ret)
+ return ret;
+
+ if (system_wq)
+ wq_watchdog_set_thresh(thresh);
+ else
+ wq_watchdog_thresh = thresh;
+
+ return 0;
+}
+
+static const struct kernel_param_ops wq_watchdog_thresh_ops = {
+ .set = wq_watchdog_param_set_thresh,
+ .get = param_get_ulong,
+};
+
+module_param_cb(watchdog_thresh, &wq_watchdog_thresh_ops, &wq_watchdog_thresh,
+ 0644);
+
+static void wq_watchdog_init(void)
+{
+ wq_watchdog_set_thresh(wq_watchdog_thresh);
+}
+
+#else /* CONFIG_WQ_WATCHDOG */
+
+static inline void wq_watchdog_init(void) { }
+
+#endif /* CONFIG_WQ_WATCHDOG */
+
static void __init wq_numa_init(void)
{
cpumask_var_t *tbl;
@@ -5300,6 +5467,9 @@ static int __init init_workqueues(void)
!system_unbound_wq || !system_freezable_wq ||
!system_power_efficient_wq ||
!system_freezable_power_efficient_wq);
+
+ wq_watchdog_init();
+
return 0;
}
early_initcall(init_workqueues);