From: Rafael J. Wysocki Date: Fri, 12 Aug 2016 20:53:58 +0000 (+0200) Subject: Merge branches 'pm-sleep' and 'pm-cpufreq' X-Git-Url: http://drtracing.org/?a=commitdiff_plain;h=0aeeb3e73f5fd7faef3dce8cb590effd6ee51571;hp=-c;p=deliverable%2Flinux.git Merge branches 'pm-sleep' and 'pm-cpufreq' * pm-sleep: PM / hibernate: Restore processor state before using per-CPU variables x86/power/64: Always create temporary identity mapping correctly * pm-cpufreq: cpufreq: powernv: Fix crash in gpstate_timer_handler() --- 0aeeb3e73f5fd7faef3dce8cb590effd6ee51571 diff --combined drivers/cpufreq/powernv-cpufreq.c index 87796e0864e9,54c45368e3f1,2fcc879d2b97..d3ffde806629 --- a/drivers/cpufreq/powernv-cpufreq.c +++ b/drivers/cpufreq/powernv-cpufreq.c @@@@ -64,14 -64,12 -64,14 +64,14 @@@@ /** * struct global_pstate_info - Per policy data structure to maintain history of * global pstates - * @highest_lpstate: The local pstate from which we are ramping down + * @highest_lpstate_idx: The local pstate index from which we are + * ramping down * @elapsed_time: Time in ms spent in ramping down from - * highest_lpstate + * highest_lpstate_idx * @last_sampled_time: Time from boot in ms when global pstates were * last set - * @last_lpstate,last_gpstate: Last set values for local and global pstates + * @last_lpstate_idx, Last set value of local pstate and global + * last_gpstate_idx pstate in terms of cpufreq table index * @timer: Is used for ramping down if cpu goes idle for * a long time with global pstate held high * @gpstate_lock: A spinlock to maintain synchronization between @@@@ -79,11 -77,11 -79,11 +79,11 @@@@ * governer's target_index calls */ struct global_pstate_info { - int highest_lpstate; + int highest_lpstate_idx; unsigned int elapsed_time; unsigned int last_sampled_time; - int last_lpstate; - int last_gpstate; + int last_lpstate_idx; + int last_gpstate_idx; spinlock_t gpstate_lock; struct timer_list timer; }; @@@@ -126,47 -124,29 -126,66 +126,66 @@@@ static int nr_chips static DEFINE_PER_CPU(struct chip *, chip_info); /* - * Note: The set of pstates consists of contiguous integers, the - * smallest of which is indicated by powernv_pstate_info.min, the - * largest of which is indicated by powernv_pstate_info.max. + * Note: + * The set of pstates consists of contiguous integers. + * powernv_pstate_info stores the index of the frequency table for + * max, min and nominal frequencies. It also stores number of + * available frequencies. * - * The nominal pstate is the highest non-turbo pstate in this - * platform. This is indicated by powernv_pstate_info.nominal. + * powernv_pstate_info.nominal indicates the index to the highest + * non-turbo frequency. */ static struct powernv_pstate_info { - int min; - int max; - int nominal; - int nr_pstates; + unsigned int min; + unsigned int max; + unsigned int nominal; + unsigned int nr_pstates; } powernv_pstate_info; + /* Use following macros for conversions between pstate_id and index */ + static inline int idx_to_pstate(unsigned int i) + { ++ if (unlikely(i >= powernv_pstate_info.nr_pstates)) { ++ pr_warn_once("index %u is out of bound\n", i); ++ return powernv_freqs[powernv_pstate_info.nominal].driver_data; ++ } ++ + return powernv_freqs[i].driver_data; + } + + static inline unsigned int pstate_to_idx(int pstate) + { ++ int min = powernv_freqs[powernv_pstate_info.min].driver_data; ++ int max = powernv_freqs[powernv_pstate_info.max].driver_data; ++ ++ if (min > 0) { ++ if (unlikely((pstate < max) || (pstate > min))) { ++ pr_warn_once("pstate %d is out of bound\n", pstate); ++ return powernv_pstate_info.nominal; ++ } ++ } else { ++ if (unlikely((pstate > max) || (pstate < min))) { ++ pr_warn_once("pstate %d is out of bound\n", pstate); ++ return powernv_pstate_info.nominal; ++ } ++ } + /* + * abs() is deliberately used so that is works with + * both monotonically increasing and decreasing + * pstate values + */ + return abs(pstate - idx_to_pstate(powernv_pstate_info.max)); + } + static inline void reset_gpstates(struct cpufreq_policy *policy) { struct global_pstate_info *gpstates = policy->driver_data; - gpstates->highest_lpstate = 0; + gpstates->highest_lpstate_idx = 0; gpstates->elapsed_time = 0; gpstates->last_sampled_time = 0; - gpstates->last_lpstate = 0; - gpstates->last_gpstate = 0; + gpstates->last_lpstate_idx = 0; + gpstates->last_gpstate_idx = 0; } /* @@@@ -176,10 -156,9 -195,10 +195,10 @@@@ static int init_powernv_pstates(void) { struct device_node *power_mgt; - int i, pstate_min, pstate_max, pstate_nominal, nr_pstates = 0; + int i, nr_pstates = 0; const __be32 *pstate_ids, *pstate_freqs; u32 len_ids, len_freqs; + u32 pstate_min, pstate_max, pstate_nominal; power_mgt = of_find_node_by_path("/ibm,opal/power-mgt"); if (!power_mgt) { @@@@ -229,7 -208,6 -248,7 +248,7 @@@@ return -ENODEV; } + powernv_pstate_info.nr_pstates = nr_pstates; pr_debug("NR PStates %d\n", nr_pstates); for (i = 0; i < nr_pstates; i++) { u32 id = be32_to_cpu(pstate_ids[i]); @@@@ -238,17 -216,15 -257,17 +257,17 @@@@ pr_debug("PState id %d freq %d MHz\n", id, freq); powernv_freqs[i].frequency = freq * 1000; /* kHz */ powernv_freqs[i].driver_data = id; + + if (id == pstate_max) + powernv_pstate_info.max = i; + else if (id == pstate_nominal) + powernv_pstate_info.nominal = i; + else if (id == pstate_min) + powernv_pstate_info.min = i; } + /* End of list marker entry */ powernv_freqs[i].frequency = CPUFREQ_TABLE_END; - - powernv_pstate_info.min = pstate_min; - powernv_pstate_info.max = pstate_max; - powernv_pstate_info.nominal = pstate_nominal; - powernv_pstate_info.nr_pstates = nr_pstates; - return 0; } @@@@ -257,12 -233,12 -276,12 +276,12 @@@@ static unsigned int pstate_id_to_freq(i { int i; - i = powernv_pstate_info.max - pstate_id; + i = pstate_to_idx(pstate_id); if (i >= powernv_pstate_info.nr_pstates || i < 0) { pr_warn("PState id %d outside of PState table, " "reporting nominal id %d instead\n", - pstate_id, powernv_pstate_info.nominal); - i = powernv_pstate_info.max - powernv_pstate_info.nominal; + pstate_id, idx_to_pstate(powernv_pstate_info.nominal)); + i = powernv_pstate_info.nominal; } return powernv_freqs[i].frequency; @@@@ -276,7 -252,7 -295,7 +295,7 @@@@ static ssize_t cpuinfo_nominal_freq_sho char *buf) { return sprintf(buf, "%u\n", - pstate_id_to_freq(powernv_pstate_info.nominal)); + powernv_freqs[powernv_pstate_info.nominal].frequency); } struct freq_attr cpufreq_freq_attr_cpuinfo_nominal_freq = @@@@ -450,7 -426,7 -469,7 +469,7 @@@@ static void set_pstate(void *data */ static inline unsigned int get_nominal_index(void) { - return powernv_pstate_info.max - powernv_pstate_info.nominal; + return powernv_pstate_info.nominal; } static void powernv_cpufreq_throttle_check(void *data) @@@@ -459,22 -435,20 -478,22 +478,22 @@@@ unsigned int cpu = smp_processor_id(); unsigned long pmsr; int pmsr_pmax; + unsigned int pmsr_pmax_idx; pmsr = get_pmspr(SPRN_PMSR); chip = this_cpu_read(chip_info); /* Check for Pmax Capping */ pmsr_pmax = (s8)PMSR_MAX(pmsr); - if (pmsr_pmax != powernv_pstate_info.max) { + pmsr_pmax_idx = pstate_to_idx(pmsr_pmax); + if (pmsr_pmax_idx != powernv_pstate_info.max) { if (chip->throttled) goto next; chip->throttled = true; - if (pmsr_pmax < powernv_pstate_info.nominal) { - pr_warn_once("CPU %d on Chip %u has Pmax reduced below nominal frequency (%d < %d)\n", + if (pmsr_pmax_idx > powernv_pstate_info.nominal) { + pr_warn_once("CPU %d on Chip %u has Pmax(%d) reduced below nominal frequency(%d)\n", cpu, chip->id, pmsr_pmax, - powernv_pstate_info.nominal); + idx_to_pstate(powernv_pstate_info.nominal)); chip->throttle_sub_turbo++; } else { chip->throttle_turbo++; @@@@ -510,35 -484,34 -529,35 +529,35 @@@@ next /** * calc_global_pstate - Calculate global pstate - * @elapsed_time: Elapsed time in milliseconds - * @local_pstate: New local pstate - * @highest_lpstate: pstate from which its ramping down + * @elapsed_time: Elapsed time in milliseconds + * @local_pstate_idx: New local pstate + * @highest_lpstate_idx: pstate from which its ramping down * * Finds the appropriate global pstate based on the pstate from which its * ramping down and the time elapsed in ramping down. It follows a quadratic * equation which ensures that it reaches ramping down to pmin in 5sec. */ static inline int calc_global_pstate(unsigned int elapsed_time, - int highest_lpstate, int local_pstate) + int highest_lpstate_idx, + int local_pstate_idx) { - int pstate_diff; + int index_diff; /* * Using ramp_down_percent we get the percentage of rampdown * that we are expecting to be dropping. Difference between - * highest_lpstate and powernv_pstate_info.min will give a absolute + * highest_lpstate_idx and powernv_pstate_info.min will give a absolute * number of how many pstates we will drop eventually by the end of * 5 seconds, then just scale it get the number pstates to be dropped. */ - pstate_diff = ((int)ramp_down_percent(elapsed_time) * - (highest_lpstate - powernv_pstate_info.min)) / 100; + index_diff = ((int)ramp_down_percent(elapsed_time) * + (powernv_pstate_info.min - highest_lpstate_idx)) / 100; /* Ensure that global pstate is >= to local pstate */ - if (highest_lpstate - pstate_diff < local_pstate) - return local_pstate; + if (highest_lpstate_idx + index_diff >= local_pstate_idx) + return local_pstate_idx; else - return highest_lpstate - pstate_diff; + return highest_lpstate_idx + index_diff; } static inline void queue_gpstate_timer(struct global_pstate_info *gpstates) @@@@ -557,7 -530,8 -576,8 +576,7 @@@@ else timer_interval = GPSTATE_TIMER_INTERVAL; -- mod_timer_pinned(&gpstates->timer, jiffies + -- msecs_to_jiffies(timer_interval)); ++ mod_timer(&gpstates->timer, jiffies + msecs_to_jiffies(timer_interval)); } /** @@@@ -573,7 -547,7 -593,7 +592,7 @@@@ void gpstate_timer_handler(unsigned lon { struct cpufreq_policy *policy = (struct cpufreq_policy *)data; struct global_pstate_info *gpstates = policy->driver_data; - int gpstate_id; + int gpstate_idx; unsigned int time_diff = jiffies_to_msecs(jiffies) - gpstates->last_sampled_time; struct powernv_smp_call_data freq_data; @@@@ -583,29 -557,29 -603,29 +602,29 @@@@ gpstates->last_sampled_time += time_diff; gpstates->elapsed_time += time_diff; - freq_data.pstate_id = gpstates->last_lpstate; + freq_data.pstate_id = idx_to_pstate(gpstates->last_lpstate_idx); - if ((gpstates->last_gpstate == freq_data.pstate_id) || + if ((gpstates->last_gpstate_idx == gpstates->last_lpstate_idx) || (gpstates->elapsed_time > MAX_RAMP_DOWN_TIME)) { - gpstate_id = freq_data.pstate_id; + gpstate_idx = pstate_to_idx(freq_data.pstate_id); reset_gpstates(policy); - gpstates->highest_lpstate = freq_data.pstate_id; + gpstates->highest_lpstate_idx = gpstate_idx; } else { - gpstate_id = calc_global_pstate(gpstates->elapsed_time, - gpstates->highest_lpstate, - freq_data.pstate_id); + gpstate_idx = calc_global_pstate(gpstates->elapsed_time, + gpstates->highest_lpstate_idx, - freq_data.pstate_id); ++ gpstates->last_lpstate_idx); } /* * If local pstate is equal to global pstate, rampdown is over * So timer is not required to be queued. */ - if (gpstate_id != freq_data.pstate_id) + if (gpstate_idx != gpstates->last_lpstate_idx) queue_gpstate_timer(gpstates); - freq_data.gpstate_id = gpstate_id; - gpstates->last_gpstate = freq_data.gpstate_id; - gpstates->last_lpstate = freq_data.pstate_id; + freq_data.gpstate_id = idx_to_pstate(gpstate_idx); + gpstates->last_gpstate_idx = pstate_to_idx(freq_data.gpstate_id); + gpstates->last_lpstate_idx = pstate_to_idx(freq_data.pstate_id); spin_unlock(&gpstates->gpstate_lock); @@@@ -622,7 -596,7 -642,7 +641,7 @@@@ static int powernv_cpufreq_target_index unsigned int new_index) { struct powernv_smp_call_data freq_data; - unsigned int cur_msec, gpstate_id; + unsigned int cur_msec, gpstate_idx; struct global_pstate_info *gpstates = policy->driver_data; if (unlikely(rebooting) && new_index != get_nominal_index()) @@@@ -634,15 -608,15 -654,15 +653,15 @@@@ cur_msec = jiffies_to_msecs(get_jiffies_64()); spin_lock(&gpstates->gpstate_lock); - freq_data.pstate_id = powernv_freqs[new_index].driver_data; + freq_data.pstate_id = idx_to_pstate(new_index); if (!gpstates->last_sampled_time) { - gpstate_id = freq_data.pstate_id; - gpstates->highest_lpstate = freq_data.pstate_id; + gpstate_idx = new_index; + gpstates->highest_lpstate_idx = new_index; goto gpstates_done; } - if (gpstates->last_gpstate > freq_data.pstate_id) { + if (gpstates->last_gpstate_idx < new_index) { gpstates->elapsed_time += cur_msec - gpstates->last_sampled_time; @@@@ -653,34 -627,34 -673,34 +672,34 @@@@ */ if (gpstates->elapsed_time > MAX_RAMP_DOWN_TIME) { reset_gpstates(policy); - gpstates->highest_lpstate = freq_data.pstate_id; - gpstate_id = freq_data.pstate_id; + gpstates->highest_lpstate_idx = new_index; + gpstate_idx = new_index; } else { /* Elaspsed_time is less than 5 seconds, continue to rampdown */ - gpstate_id = calc_global_pstate(gpstates->elapsed_time, - gpstates->highest_lpstate, - freq_data.pstate_id); + gpstate_idx = calc_global_pstate(gpstates->elapsed_time, + gpstates->highest_lpstate_idx, + new_index); } } else { reset_gpstates(policy); - gpstates->highest_lpstate = freq_data.pstate_id; - gpstate_id = freq_data.pstate_id; + gpstates->highest_lpstate_idx = new_index; + gpstate_idx = new_index; } /* * If local pstate is equal to global pstate, rampdown is over * So timer is not required to be queued. */ - if (gpstate_id != freq_data.pstate_id) + if (gpstate_idx != new_index) queue_gpstate_timer(gpstates); else del_timer_sync(&gpstates->timer); gpstates_done: - freq_data.gpstate_id = gpstate_id; + freq_data.gpstate_id = idx_to_pstate(gpstate_idx); gpstates->last_sampled_time = cur_msec; - gpstates->last_gpstate = freq_data.gpstate_id; - gpstates->last_lpstate = freq_data.pstate_id; + gpstates->last_gpstate_idx = gpstate_idx; + gpstates->last_lpstate_idx = new_index; spin_unlock(&gpstates->gpstate_lock); @@@@ -725,7 -699,7 -745,7 +744,7 @@@@ static int powernv_cpufreq_cpu_init(str policy->driver_data = gpstates; /* initialize timer */ -- init_timer_deferrable(&gpstates->timer); ++ init_timer_pinned_deferrable(&gpstates->timer); gpstates->timer.data = (unsigned long)policy; gpstates->timer.function = gpstate_timer_handler; gpstates->timer.expires = jiffies + @@@@ -786,7 -760,9 -806,7 +805,7 @@@@ void powernv_cpufreq_work_fn(struct wor struct cpufreq_policy policy; cpufreq_get_policy(&policy, cpu); - cpufreq_frequency_table_target(&policy, policy.freq_table, - policy.cur, - CPUFREQ_RELATION_C, &index); + index = cpufreq_table_find_index_c(&policy, policy.cur); powernv_cpufreq_target_index(&policy, index); cpumask_andnot(&mask, &mask, policy.cpus); } @@@@ -872,8 -848,8 -892,8 +891,8 @@@@ static void powernv_cpufreq_stop_cpu(st struct powernv_smp_call_data freq_data; struct global_pstate_info *gpstates = policy->driver_data; - freq_data.pstate_id = powernv_pstate_info.min; - freq_data.gpstate_id = powernv_pstate_info.min; + freq_data.pstate_id = idx_to_pstate(powernv_pstate_info.min); + freq_data.gpstate_id = idx_to_pstate(powernv_pstate_info.min); smp_call_function_single(policy->cpu, set_pstate, &freq_data, 1); del_timer_sync(&gpstates->timer); } diff --combined kernel/power/hibernate.c index a881c6a7ba74,61761aa7cc19,fca9254280ee..33c79b6105c5 --- a/kernel/power/hibernate.c +++ b/kernel/power/hibernate.c @@@@ -52,7 -52,7 -52,6 +52,7 @@@@ enum #ifdef CONFIG_SUSPEND HIBERNATION_SUSPEND, #endif + HIBERNATION_TEST_RESUME, /* keep last */ __HIBERNATION_AFTER_LAST }; @@@@ -300,12 -300,12 -299,12 +300,12 @@@@ static int create_image(int platform_mo save_processor_state(); trace_suspend_resume(TPS("machine_suspend"), PM_EVENT_HIBERNATE, true); error = swsusp_arch_suspend(); + + /* Restore control flow magically appears here */ + + restore_processor_state(); trace_suspend_resume(TPS("machine_suspend"), PM_EVENT_HIBERNATE, false); if (error) printk(KERN_ERR "PM: Error %d creating hibernation image\n", error); - - /* Restore control flow magically appears here */ - - restore_processor_state(); if (!in_suspend) events_check_enabled = false; @@@@ -410,11 -410,11 -409,6 +410,11 @@@@ int hibernation_snapshot(int platform_m goto Close; } +int __weak hibernate_resume_nonboot_cpu_disable(void) +{ + return disable_nonboot_cpus(); +} + /** * resume_target_kernel - Restore system state from a hibernation image. * @platform_mode: Whether or not to use the platform driver. @@@@ -439,7 -439,7 -433,7 +439,7 @@@@ static int resume_target_kernel(bool pl if (error) goto Cleanup; - error = disable_nonboot_cpus(); + error = hibernate_resume_nonboot_cpu_disable(); if (error) goto Enable_cpus; @@@@ -648,39 -648,39 -642,12 +648,39 @@@@ static void power_down(void cpu_relax(); } +static int load_image_and_restore(void) +{ + int error; + unsigned int flags; + + pr_debug("PM: Loading hibernation image.\n"); + + lock_device_hotplug(); + error = create_basic_memory_bitmaps(); + if (error) + goto Unlock; + + error = swsusp_read(&flags); + swsusp_close(FMODE_READ); + if (!error) + hibernation_restore(flags & SF_PLATFORM_MODE); + + printk(KERN_ERR "PM: Failed to load hibernation image, recovering.\n"); + swsusp_free(); + free_basic_memory_bitmaps(); + Unlock: + unlock_device_hotplug(); + + return error; +} + /** * hibernate - Carry out system hibernation, including saving the image. */ int hibernate(void) { - int error; + int error, nr_calls = 0; + bool snapshot_test = false; if (!hibernation_available()) { pr_debug("PM: Hibernation not available.\n"); @@@@ -695,11 -695,11 -662,9 +695,11 @@@@ } pm_prepare_console(); - error = pm_notifier_call_chain(PM_HIBERNATION_PREPARE); - if (error) + error = __pm_notifier_call_chain(PM_HIBERNATION_PREPARE, -1, &nr_calls); + if (error) { + nr_calls--; goto Exit; + } printk(KERN_INFO "PM: Syncing filesystems ... "); sys_sync(); @@@@ -732,12 -732,12 -697,8 +732,12 @@@@ pr_debug("PM: writing image.\n"); error = swsusp_write(flags); swsusp_free(); - if (!error) - power_down(); + if (!error) { + if (hibernation_mode == HIBERNATION_TEST_RESUME) + snapshot_test = true; + else + power_down(); + } in_suspend = 0; pm_restore_gfp_mask(); } else { @@@@ -748,18 -748,18 -709,12 +748,18 @@@@ free_basic_memory_bitmaps(); Thaw: unlock_device_hotplug(); + if (snapshot_test) { + pr_debug("PM: Checking hibernation image\n"); + error = swsusp_check(); + if (!error) + error = load_image_and_restore(); + } thaw_processes(); /* Don't bother checking whether freezer_test_done is true */ freezer_test_done = false; Exit: - pm_notifier_call_chain(PM_POST_HIBERNATION); + __pm_notifier_call_chain(PM_POST_HIBERNATION, nr_calls, NULL); pm_restore_console(); atomic_inc(&snapshot_device_available); Unlock: @@@@ -785,7 -785,7 -740,8 +785,7 @@@@ */ static int software_resume(void) { - int error; - unsigned int flags; + int error, nr_calls = 0; /* * If the user said "noresume".. bail out early. @@@@ -871,20 -871,20 -827,35 +871,20 @@@@ } pm_prepare_console(); - error = pm_notifier_call_chain(PM_RESTORE_PREPARE); - if (error) + error = __pm_notifier_call_chain(PM_RESTORE_PREPARE, -1, &nr_calls); + if (error) { + nr_calls--; goto Close_Finish; + } pr_debug("PM: Preparing processes for restore.\n"); error = freeze_processes(); if (error) goto Close_Finish; - - pr_debug("PM: Loading hibernation image.\n"); - - lock_device_hotplug(); - error = create_basic_memory_bitmaps(); - if (error) - goto Thaw; - - error = swsusp_read(&flags); - swsusp_close(FMODE_READ); - if (!error) - hibernation_restore(flags & SF_PLATFORM_MODE); - - printk(KERN_ERR "PM: Failed to load hibernation image, recovering.\n"); - swsusp_free(); - free_basic_memory_bitmaps(); - Thaw: - unlock_device_hotplug(); + error = load_image_and_restore(); thaw_processes(); Finish: - pm_notifier_call_chain(PM_POST_RESTORE); + __pm_notifier_call_chain(PM_POST_RESTORE, nr_calls, NULL); pm_restore_console(); atomic_inc(&snapshot_device_available); /* For success case, the suspend path will release the lock */ @@@@ -907,7 -907,7 -878,6 +907,7 @@@@ static const char * const hibernation_m #ifdef CONFIG_SUSPEND [HIBERNATION_SUSPEND] = "suspend", #endif + [HIBERNATION_TEST_RESUME] = "test_resume", }; /* @@@@ -954,7 -954,7 -924,6 +954,7 @@@@ static ssize_t disk_show(struct kobjec #ifdef CONFIG_SUSPEND case HIBERNATION_SUSPEND: #endif + case HIBERNATION_TEST_RESUME: break; case HIBERNATION_PLATFORM: if (hibernation_ops) @@@@ -1001,7 -1001,7 -970,6 +1001,7 @@@@ static ssize_t disk_store(struct kobjec #ifdef CONFIG_SUSPEND case HIBERNATION_SUSPEND: #endif + case HIBERNATION_TEST_RESUME: hibernation_mode = mode; break; case HIBERNATION_PLATFORM: @@@@ -1147,16 -1147,16 -1115,13 +1147,16 @@@@ static int __init resume_offset_setup(c static int __init hibernate_setup(char *str) { - if (!strncmp(str, "noresume", 8)) + if (!strncmp(str, "noresume", 8)) { noresume = 1; - else if (!strncmp(str, "nocompress", 10)) + } else if (!strncmp(str, "nocompress", 10)) { nocompress = 1; - else if (!strncmp(str, "no", 2)) { + } else if (!strncmp(str, "no", 2)) { noresume = 1; nohibernate = 1; + } else if (IS_ENABLED(CONFIG_DEBUG_RODATA) + && !strncmp(str, "protect_image", 13)) { + enable_restore_image_protection(); } return 1; } @@@@ -1189,6 -1189,11 -1154,11 +1189,6 @@@@ static int __init nohibernate_setup(cha return 1; } --static int __init kaslr_nohibernate_setup(char *str) --{ -- return nohibernate_setup(str); --} -- static int __init page_poison_nohibernate_setup(char *str) { #ifdef CONFIG_PAGE_POISONING_ZERO @@@@ -1212,4 -1217,5 -1182,5 +1212,4 @@@@ __setup("hibernate=", hibernate_setup) __setup("resumewait", resumewait_setup); __setup("resumedelay=", resumedelay_setup); __setup("nohibernate", nohibernate_setup); --__setup("kaslr", kaslr_nohibernate_setup); __setup("page_poison=", page_poison_nohibernate_setup);