From aa09a45d12fe351cbed7cd6e707ba810b6c57b30 Mon Sep 17 00:00:00 2001 From: jimin-kiim <0305jimin@naver.com> Date: Sun, 3 Dec 2023 20:29:44 +0900 Subject: [PATCH 01/37] Feat: adding compare_and_swap module file --- assignment10/compare_and_swap_module.c | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) create mode 100644 assignment10/compare_and_swap_module.c diff --git a/assignment10/compare_and_swap_module.c b/assignment10/compare_and_swap_module.c new file mode 100644 index 000000000..7232781f7 --- /dev/null +++ b/assignment10/compare_and_swap_module.c @@ -0,0 +1,16 @@ +// #--------- compare_and_swap_module.c ---------# +#include +#include +#include + +int __init compare_and_swap_module_init(void) { + printk("Entering Compare and Swap Module!"); return 0; +} + +void __exit compare_and_swap_module_cleanup(void) { + printk("Exiting Compare and Swap Module!\n"); +} + +module_init(compare_and_swap_init); +module_exit(compare_and_swap_cleanup); +MODULE_LICENSE("GPL"); From e73e066ba771fdd1c7c7032ad45dbfe4c94ed844 Mon Sep 17 00:00:00 2001 From: jimin-kiim <0305jimin@naver.com> Date: Sun, 3 Dec 2023 20:30:22 +0900 Subject: [PATCH 02/37] Feat: adding spinlock_module file --- assignment10/spinlock_module.c | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) create mode 100644 assignment10/spinlock_module.c diff --git a/assignment10/spinlock_module.c b/assignment10/spinlock_module.c new file mode 100644 index 000000000..854995589 --- /dev/null +++ b/assignment10/spinlock_module.c @@ -0,0 +1,16 @@ +// #--------- spinlock_module.c ---------# +#include +#include +#include + +int __init spinlock_module_init(void) { + printk("simple module"); return 0; +} + +void __exit spinlock_module_cleanup(void) { + printk("Bye Module!\n"); +} + +module_init(spinlock_module_init); +module_exit(spinlock_module_cleanup); +MODULE_LICENSE("GPL"); From 4d288973344137d1f6c39a037941375ac8465399 Mon Sep 17 00:00:00 2001 From: jimin-kiim <0305jimin@naver.com> Date: Wed, 6 Dec 2023 14:58:11 +0900 Subject: [PATCH 03/37] Feat: implementing counter using compare and swap sync function --- assignment10/compare_and_swap_module.c | 39 +++++++++++++++++++++++--- 1 file changed, 35 insertions(+), 4 deletions(-) diff --git a/assignment10/compare_and_swap_module.c b/assignment10/compare_and_swap_module.c index 7232781f7..1077b9537 100644 --- a/assignment10/compare_and_swap_module.c +++ b/assignment10/compare_and_swap_module.c @@ -2,15 +2,46 @@ #include #include #include +#include +#include +#include + +#define NUM_THREADS 4 +static struct task_struct *threads[NUM_THREADS]; +int counter = 0; + + +static int compare_and_swap_function(void *data) +{ + int original; + while(!kthread_should_stop()) { + // critical section + original = __sync_val_compare_and_swap(&counter, counter, counter + 1); + // end of the critical section + printk(KERN_INFO "pid[%u] %s: counter: %d\n", current->pid, __func__, original); + msleep(500); + } + do_exit(0); +} + int __init compare_and_swap_module_init(void) { - printk("Entering Compare and Swap Module!"); return 0; + printk("Entering Compare and Swap Module!"); + int i; + for (i = 0; i < NUM_THREADS; i++) { + threads[i] = kthread_run(compare_and_swap_function, NULL, "compare_and_swap_function"); + } + return 0; } void __exit compare_and_swap_module_cleanup(void) { - printk("Exiting Compare and Swap Module!\n"); + int i; + for(i = 0; i < NUM_THREADS; i++) { + kthread_stop(threads[i]); + } + printk("Exiting Compare and Swap Module!\n"); } -module_init(compare_and_swap_init); -module_exit(compare_and_swap_cleanup); +module_init(compare_and_swap_module_init); +module_exit(compare_and_swap_module_cleanup); MODULE_LICENSE("GPL"); From 342820d7b5ae4062da43417ef0ceec72396ceb86 Mon Sep 17 00:00:00 2001 From: jimin-kiim <0305jimin@naver.com> Date: Wed, 6 Dec 2023 15:00:28 +0900 Subject: [PATCH 04/37] Feat: adding makefile --- assignment10/Makefile | 8 ++++++++ 1 file changed, 8 insertions(+) create mode 100644 assignment10/Makefile diff --git a/assignment10/Makefile b/assignment10/Makefile new file mode 100644 index 000000000..3e26f43d5 --- /dev/null +++ b/assignment10/Makefile @@ -0,0 +1,8 @@ +obj-m := compare_and_swap_module.o test_and_set_module.o fetch_and_add_module.o +KERNEL_DIR := /lib/modules/$(shell uname -r)/build +PWD := $(shell pwd) + +default: + $(MAKE) -C $(KERNEL_DIR) M=$(PWD) modules +clean: + $(MAKE) -C $(KERNEL_DIR) M=$(PWD) clean From e2c77bc00f8497d16282a0bb019079d832f272cd Mon Sep 17 00:00:00 2001 From: jimin-kiim <0305jimin@naver.com> Date: Wed, 6 Dec 2023 15:40:25 +0900 Subject: [PATCH 05/37] Feat: implementing counter using fetch and add syn function --- assignment10/fetch_and_add_module.c | 48 +++++++++++++++++++++++++++++ 1 file changed, 48 insertions(+) create mode 100644 assignment10/fetch_and_add_module.c diff --git a/assignment10/fetch_and_add_module.c b/assignment10/fetch_and_add_module.c new file mode 100644 index 000000000..ad8ff5f23 --- /dev/null +++ b/assignment10/fetch_and_add_module.c @@ -0,0 +1,48 @@ +// #--------- fetch_and_add_module.c ---------# +#include +#include +#include +#include +#include +#include + +#define NUM_THREADS 4 +static struct task_struct *threads[NUM_THREADS]; + +static int counter = 0; +static int fetch_and_add_function(void *data) +{ + int original; + while(!kthread_should_stop()) { + // critical section + original = __sync_fetch_and_add(&counter, 1); + // end of the critical section + printk(KERN_INFO "pid[%u] %s: counter: %d\n", current->pid, __func__, original); + msleep(500); + } + do_exit(0); +} + + +int __init fetch_and_add_module_init(void) { + printk("Entering Fetch and Add Module!"); + int i; + for (i = 0; i < NUM_THREADS; i++) { + threads[i] = kthread_run(fetch_and_add_function, NULL, "fetch_and_add_function"); + } + return 0; +} + +void __exit fetch_and_add_module_cleanup(void) { + int i; + for (i = 0; i < NUM_THREADS; i++) { + kthread_stop(threads[i]); + } + printk("Exiting Fetch and Add Module!\n"); + +} + +module_init(fetch_and_add_module_init); +module_exit(fetch_and_add_module_cleanup); +MODULE_LICENSE("GPL"); + From 51143400cf16e7155166f3ec19ee914b83677896 Mon Sep 17 00:00:00 2001 From: jimin-kiim <0305jimin@naver.com> Date: Wed, 6 Dec 2023 15:41:24 +0900 Subject: [PATCH 06/37] Feat: implementing counter using test and set sync fuction --- assignment10/test_and_set_module.c | 48 ++++++++++++++++++++++++++++++ 1 file changed, 48 insertions(+) create mode 100644 assignment10/test_and_set_module.c diff --git a/assignment10/test_and_set_module.c b/assignment10/test_and_set_module.c new file mode 100644 index 000000000..3f603975d --- /dev/null +++ b/assignment10/test_and_set_module.c @@ -0,0 +1,48 @@ +// #--------- compare_and_swap_module.c ---------# +#include +#include +#include +#include +#include +#include + +#define NUM_THREADS 4 +static struct task_struct *threads[NUM_THREADS]; + +int counter = 0; + +static int test_and_set_function(void *data) +{ + int original; + while(!kthread_should_stop()) { + // critical section + original = __sync_val_compare_and_swap(&counter, counter, counter + 1); + // end of the critical section + printk(KERN_INFO "pid[%u] %s: counter: %d\n", current->pid, __func__, original); + msleep(500); + } + do_exit(0); +} + + +int __init test_and_set_module_init(void) { + printk("Entering Test and Set Modue!\n"); + int i; + for (i = 0; i < NUM_THREADS; i++) { + threads[i] = kthread_run(test_and_set_function, NULL, "test_and_set_function"); + } + return 0; +} + +void __exit test_and_set_module_cleanup(void) { + int i; + for (i = 0; i < NUM_THREADS; i++) { + kthread_stop(threads[i]); + } + printk("Exiting Test and Set Module!\n"); +} + +module_init(test_and_set_module_init); +module_exit(test_and_set_module_cleanup); +MODULE_LICENSE("GPL"); + From 6a756f610644d82e159bcc4f25a959b46b95a236 Mon Sep 17 00:00:00 2001 From: jimin-kiim <0305jimin@naver.com> Date: Wed, 6 Dec 2023 19:44:14 +0900 Subject: [PATCH 07/37] Feat: spinlock module --- assignment10/spinlock_module.c | 137 ++++++++++++++++++++++++++++++++- 1 file changed, 135 insertions(+), 2 deletions(-) diff --git a/assignment10/spinlock_module.c b/assignment10/spinlock_module.c index 854995589..921ef2a63 100644 --- a/assignment10/spinlock_module.c +++ b/assignment10/spinlock_module.c @@ -2,13 +2,146 @@ #include #include #include +#include +#include +#include +#include +#include +#include +#include "calclock.h" + +#define NUM_THREADS 4 +static struct task_struct *threads[NUM_THREADS]; + +spinlock_t lock; + +struct node { + struct list_head list; + unsigned int data; +}; + +struct list_head list; + +unsigned long long count_insert, count_search, count_delete; +unsigned long long time_insert, time_search, time_delete; + + +void *add_to_list(int thread_id, int range_bound[]) { + struct timespec localclock[2]; + struct node *first = NULL; + + int i; + for (i = range_bound[0]; i < range_bound[1] + 1; i++) { + struct node *new = kmalloc(sizeof(struct node), GFP_KERNEL); + new->data = i; + + spin_lock(&lock); + getrawmonotonic(&localclock[0]); + + list_add(&new->list, &list); + + getrawmonotonic(&localclock[1]); + + calclock(localclock, &time_insert, &count_insert); + spin_unlock(&lock); + if (first == NULL) first = new; + } + + printk(KERN_INFO "thread #%d range: %d ~ %d\n",thread_id, range_bound[0], range_bound[1]); + return first; +} + +int search_list(int thread_id, void *data, int range_bound[]) { + struct timespec localclock[2]; + struct node *cur = (struct node *) data, *tmp; + spin_lock(&lock); + + list_for_each_entry_safe(cur, tmp, &list, list) { + getrawmonotonic(&localclock[0]); + getrawmonotonic(&localclock[1]); + calclock(localclock, &time_search,&count_search); + }; + spin_unlock(&lock); + return 0; +} + +int delete_from_list(int thread_id, int range_bound[2]) { + struct node *cur, *tmp; + struct timespec localclock[2]; + list_for_each_entry_safe(cur, tmp, &list, list) { + spin_lock(&lock); + getrawmonotonic(&localclock[0]); + list_del(&cur->list); + kfree(cur); + + getrawmonotonic(&localclock[1]); + calclock(localclock, &time_delete, &count_delete); + spin_unlock(&lock); + + }; + return 0; +} + +void set_iter_range(int thread_id, int range_bound[2]) { + range_bound[0] = thread_id * 250000; + range_bound[1] = range_bound[0] + 249999; +} + +static int work_fn(void *data) +{ + int range_bound[2]; + int thread_id = *(int*) data; + set_iter_range(thread_id, range_bound); + + void *ret = add_to_list(thread_id, range_bound); + search_list(thread_id, ret, range_bound); + delete_from_list(thread_id, range_bound); + + while(!kthread_should_stop()) { + msleep(500); + } + + printk(KERN_INFO "thread #%d stopped!\n", thread_id); + return 0; +} int __init spinlock_module_init(void) { - printk("simple module"); return 0; + printk("Entering Spinlock Module!\n"); + INIT_LIST_HEAD(&list); + spin_lock_init(&lock); + + int i; + //int range_start = 0; + for (i = 0; i < NUM_THREADS; i++) { + //int range_bound[2]; + //range_bound[0] = range_start; + //range_bound[1] = range_start + 250000 - 1; + int* arg = (int*)kmalloc(sizeof(int), GFP_KERNEL); + *arg = i; + threads[i] = kthread_run(work_fn, (void*)arg, "compare_and_swap_function"); + //range_start += 250000 - 1; + } + + //for (i = 0; i < NUM_THREADS; i++) { + // threads[i] = kthread_run(search_list_function, NULL, "compare_and_swap_function"); + //} + + //for (i = 0; i < NUM_THREADS; i++) { + // threads[i] = kthread_run(delete_from_list_function, NULL, "compare_and_swap_function"); + //} + + return 0; } void __exit spinlock_module_cleanup(void) { - printk("Bye Module!\n"); + printk(KERN_INFO"%s: Spinlock linked list insert time: %llu ms, count: %llu", __func__, time_insert, count_insert); + + int i; + for(i = 0; i < NUM_THREADS; i++) { + kthread_stop(threads[i]); + printk("thread #%d stopped!", i + 1); + } + printk(KERN_INFO"%s: Exiting Spinlock Module!\n", __func__); } module_init(spinlock_module_init); From e1fbc0f8ca861cc61946f0cde92600df98321701 Mon Sep 17 00:00:00 2001 From: jimin-kiim <0305jimin@naver.com> Date: Wed, 6 Dec 2023 19:44:36 +0900 Subject: [PATCH 08/37] Feat: adding makefile --- assignment10/Makefile | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/assignment10/Makefile b/assignment10/Makefile index 3e26f43d5..0949e994b 100644 --- a/assignment10/Makefile +++ b/assignment10/Makefile @@ -1,4 +1,6 @@ -obj-m := compare_and_swap_module.o test_and_set_module.o fetch_and_add_module.o +obj-m := compare_and_swap_module.o test_and_set_module.o fetch_and_add_module.o \ + spinlock.o +spinlock-y := spinlock_module.o calclock.o KERNEL_DIR := /lib/modules/$(shell uname -r)/build PWD := $(shell pwd) From dd999cf474cc8823e78b01ad42ad485e5a58922e Mon Sep 17 00:00:00 2001 From: jimin-kiim <0305jimin@naver.com> Date: Wed, 6 Dec 2023 19:44:53 +0900 Subject: [PATCH 09/37] Feat: adding calclock.h --- assignment10/calclock.h | 12 ++++++++++++ 1 file changed, 12 insertions(+) create mode 100644 assignment10/calclock.h diff --git a/assignment10/calclock.h b/assignment10/calclock.h new file mode 100644 index 000000000..bb0f4bab8 --- /dev/null +++ b/assignment10/calclock.h @@ -0,0 +1,12 @@ +#ifndef __CALCLOCK_H +#define __CALCLOCK_H + +#include + +#define BILLION 1000000000UL + +unsigned long long calclock(struct timespec *myclock, + unsigned long long *total_time, unsigned long long *total_clock); + +#endif + From ced12f96415b4c4444a963efdc23294d3eefb394 Mon Sep 17 00:00:00 2001 From: jimin-kiim <0305jimin@naver.com> Date: Wed, 6 Dec 2023 19:45:07 +0900 Subject: [PATCH 10/37] Feat: adding calclock.c --- assignment10/calclock.c | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) create mode 100644 assignment10/calclock.c diff --git a/assignment10/calclock.c b/assignment10/calclock.c new file mode 100644 index 000000000..da1c794b6 --- /dev/null +++ b/assignment10/calclock.c @@ -0,0 +1,20 @@ +#include "calclock.h" +unsigned long long calclock(struct timespec *myclock, +unsigned long long *total_time, unsigned long long *total_count) + +{ + unsigned long long timedelay = 0, temp = 0, temp_n = 0; + if (myclock[1].tv_nsec >= myclock[0].tv_nsec) { + temp = myclock[1].tv_sec - myclock[0].tv_sec; + temp_n = myclock[1].tv_nsec - myclock[0].tv_nsec; + timedelay = BILLION * temp + temp_n; + } else { + temp = myclock[1].tv_sec - myclock[0].tv_sec - 1; + temp_n = BILLION + myclock[1].tv_nsec - myclock[0].tv_nsec; + timedelay = BILLION * temp + temp_n; + } + __sync_fetch_and_add(total_time, timedelay); + __sync_fetch_and_add(total_count, 1); + return timedelay; +} + From 6c5422a3d19df71a5d957408d715bfa2317a5c89 Mon Sep 17 00:00:00 2001 From: jimin-kiim <0305jimin@naver.com> Date: Wed, 6 Dec 2023 21:16:46 +0900 Subject: [PATCH 11/37] Feat: including all modules --- assignment10/Makefile | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/assignment10/Makefile b/assignment10/Makefile index 0949e994b..c83215132 100644 --- a/assignment10/Makefile +++ b/assignment10/Makefile @@ -1,6 +1,8 @@ -obj-m := compare_and_swap_module.o test_and_set_module.o fetch_and_add_module.o \ - spinlock.o -spinlock-y := spinlock_module.o calclock.o +obj-m := compare_and_swap_module.o test_and_set_module.o fetch_and_add_module.o \ + spinlock_module_final.o mutex_module_final.o rw_semaphore_module_final.o +spinlock_module_final-y := spinlock_module.o calclock.o +mutex_module_final-y := mutex_module.o calclock.o +rw_semaphore_module_final-y := rw_semaphore_module.o calclock.o KERNEL_DIR := /lib/modules/$(shell uname -r)/build PWD := $(shell pwd) From 2919d80e869ae9f0e7e5d3ad651f03d6d0e88da5 Mon Sep 17 00:00:00 2001 From: jimin-kiim <0305jimin@naver.com> Date: Wed, 6 Dec 2023 21:22:19 +0900 Subject: [PATCH 12/37] Feat: adding all module files --- assignment10/mutex_module.c | 142 ++++++++++++++++++++++++++++ assignment10/rw_semaphore_module.c | 145 +++++++++++++++++++++++++++++ assignment10/spinlock_module.c | 60 +++++------- 3 files changed, 311 insertions(+), 36 deletions(-) create mode 100644 assignment10/mutex_module.c create mode 100644 assignment10/rw_semaphore_module.c diff --git a/assignment10/mutex_module.c b/assignment10/mutex_module.c new file mode 100644 index 000000000..d791ea938 --- /dev/null +++ b/assignment10/mutex_module.c @@ -0,0 +1,142 @@ +// #--------- mutex_module.c ---------# +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "calclock.h" + +#define NUM_THREADS 4 +static struct task_struct *threads[NUM_THREADS]; + +struct mutex lock; + +struct node { + struct list_head list; + unsigned int data; +}; + +struct list_head list; + +unsigned long long count_insert, count_search, count_delete; +unsigned long long time_insert, time_search, time_delete; + +void *add_to_list(int thread_id, int range_bound[]) { + struct timespec localclock[2]; + struct node *first = NULL; + + int i; + mutex_lock(&lock); + for (i = range_bound[0]; i < range_bound[1] + 1; i++) { + struct node *new = kmalloc(sizeof(struct node), GFP_KERNEL); + new->data = i; + + //mutex_lock(&lock); + getrawmonotonic(&localclock[0]); + + list_add(&new->list, &list); + + getrawmonotonic(&localclock[1]); + + calclock(localclock, &time_insert, &count_insert); + //mutex_unlock(&lock); + if (first == NULL) first = new; + } + mutex_lock(&lock); + printk(KERN_INFO "thread #%d range: %d ~ %d\n",thread_id, range_bound[0], range_bound[1]); + return first; +} + +int search_list(int thread_id, void *data, int range_bound[]) { + struct timespec localclock[2]; + struct node *cur = (struct node *) data, *tmp; + mutex_lock(&lock); + + list_for_each_entry_safe(cur, tmp, &list, list) { + if(range_bound[0] <= cur->data && cur->data <= range_bound[1]) { + getrawmonotonic(&localclock[0]); + getrawmonotonic(&localclock[1]); + calclock(localclock, &time_search,&count_search); + } + }; + mutex_unlock(&lock); + return 0; +} + +int delete_from_list(int thread_id, int range_bound[2]) { + struct node *cur, *tmp; + struct timespec localclock[2]; + mutex_lock(&lock); + list_for_each_entry_safe(cur, tmp, &list, list) { + //mutex_lock(&lock); + getrawmonotonic(&localclock[0]); + list_del(&cur->list); + kfree(cur); + + getrawmonotonic(&localclock[1]); + calclock(localclock, &time_delete, &count_delete); + //mutex_unlock(&lock); + }; + mutex_unlock(&lock); + return 0; +} + +void set_iter_range(int thread_id, int range_bound[2]) { + range_bound[0] = thread_id * 250000; + range_bound[1] = range_bound[0] + 249999; +} + +static int work_fn(void *data) +{ + int range_bound[2]; + int thread_id = *(int*) data; + set_iter_range(thread_id, range_bound); + + void *ret = add_to_list(thread_id, range_bound); + search_list(thread_id, ret, range_bound); + delete_from_list(thread_id, range_bound); + + while(!kthread_should_stop()) { + msleep(500); + } + + printk(KERN_INFO "thread #%d stopped!\n", thread_id); + return 0; +} + +int __init mutex_module_init(void) { + printk("Entering Mutex Module!\n"); + INIT_LIST_HEAD(&list); + mutex_init(&lock); + + int i; + for (i = 0; i < NUM_THREADS; i++) { + int* arg = (int*)kmalloc(sizeof(int), GFP_KERNEL); + *arg = i; + threads[i] = kthread_run(work_fn, (void*)arg, "thread%d", i); + } + return 0; +} + +void __exit mutex_module_cleanup(void) { + printk(KERN_INFO"%s: Mutex linked list insert time: %llu ms, count: %llu", __func__, time_insert, count_insert); + printk(KERN_INFO"%s: Mutex linked list search time: %llu ms, count: %llu", __func__, time_search, count_search); + printk(KERN_INFO"%s: Mutex linked list delete time: %llu ms, count: %llu", __func__, time_delete, count_delete); + + int i; + for(i = 0; i < NUM_THREADS; i++) { + kthread_stop(threads[i]); + printk("thread #%d stopped!", i + 1); + } + printk(KERN_INFO"%s: Exiting Mutex Module!\n", __func__); +} + +module_init(mutex_module_init); +module_exit(mutex_module_cleanup); +MODULE_LICENSE("GPL"); + diff --git a/assignment10/rw_semaphore_module.c b/assignment10/rw_semaphore_module.c new file mode 100644 index 000000000..206ad3e48 --- /dev/null +++ b/assignment10/rw_semaphore_module.c @@ -0,0 +1,145 @@ +// #--------- rw_semaphore_module.c ---------# +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "calclock.h" + +#define NUM_THREADS 4 +static struct task_struct *threads[NUM_THREADS]; + +struct rw_semaphore lock; + +struct node { + struct list_head list; + unsigned int data; +}; + +struct list_head list; + +unsigned long long count_insert, count_search, count_delete; +unsigned long long time_insert, time_search, time_delete; + + +void *add_to_list(int thread_id, int range_bound[]) { + struct timespec localclock[2]; + struct node *first = NULL; + + int i; + down_write(&lock); + for (i = range_bound[0]; i < range_bound[1] + 1; i++) { + struct node *new = kmalloc(sizeof(struct node), GFP_KERNEL); + new->data = i; + + getrawmonotonic(&localclock[0]); + + list_add(&new->list, &list); + + getrawmonotonic(&localclock[1]); + + calclock(localclock, &time_insert, &count_insert); + if (first == NULL) first = new; + } + up_write(&lock); + + printk(KERN_INFO "thread #%d range: %d ~ %d\n",thread_id, range_bound[0], range_bound[1]); + return first; +} + +int search_list(int thread_id, void *data, int range_bound[]) { + struct timespec localclock[2]; + struct node *cur = (struct node *) data, *tmp; + down_read(&lock); + + list_for_each_entry_safe(cur, tmp, &list, list) { + if (range_bound[0] <= cur->data && cur->data <= range_bound[1]) { + getrawmonotonic(&localclock[0]); + getrawmonotonic(&localclock[1]); + calclock(localclock, &time_search,&count_search); + } + }; + up_read(&lock); + return 0; +} +int delete_from_list(int thread_id, int range_bound[2]) { + struct node *cur, *tmp; + struct timespec localclock[2]; + down_write(&lock); + list_for_each_entry_safe(cur, tmp, &list, list) { + getrawmonotonic(&localclock[0]); + list_del(&cur->list); + kfree(cur); + + getrawmonotonic(&localclock[1]); + calclock(localclock, &time_delete, &count_delete); + + + }; + up_write(&lock); + return 0; +} + +void set_iter_range(int thread_id, int range_bound[2]) { + range_bound[0] = thread_id * 250000; + range_bound[1] = range_bound[0] + 249999; +} +static int work_fn(void *data) +{ + int range_bound[2]; + int thread_id = *(int*) data; + set_iter_range(thread_id, range_bound); + + void *ret = add_to_list(thread_id, range_bound); + search_list(thread_id, ret, range_bound); + delete_from_list(thread_id, range_bound); + + while(!kthread_should_stop()) { + msleep(500); + } + + printk(KERN_INFO "thread #%d stopped!\n", thread_id); + return 0; +} + +int __init rw_semaphore_module_init(void) { + printk("Entering RW Semaphore Module!\n"); + INIT_LIST_HEAD(&list); + init_rwsem(&lock); + + int i; + for (i = 0; i < NUM_THREADS; i++) { + int* arg = (int*)kmalloc(sizeof(int), GFP_KERNEL); + *arg = i; + threads[i] = kthread_run(work_fn, (void*)arg, "thread%d", i); + } + return 0; +} + +void __exit rw_semaphore_module_cleanup(void) { + printk(KERN_INFO"%s: RW Semaphore linked list insert time: %llu ms, count: %llu", __func__, time_insert, count_insert); + printk(KERN_INFO"%s: RW Semaphore linked list search time: %llu ms, count: %llu", __func__, time_search, count_search); + printk(KERN_INFO"%s: RW Semaphore linked list delete time: %llu ms, count: %llu", __func__, time_delete, count_delete); + + int i; + for(i = 0; i < NUM_THREADS; i++) { + kthread_stop(threads[i]); + printk("thread #%d stopped!", i + 1); + } + printk(KERN_INFO"%s: Exiting RW semaphore Module!\n", __func__); +} + +module_init(rw_semaphore_module_init); +module_exit(rw_semaphore_module_cleanup); +MODULE_LICENSE("GPL"); + + + + + + + diff --git a/assignment10/spinlock_module.c b/assignment10/spinlock_module.c index 921ef2a63..1d49add70 100644 --- a/assignment10/spinlock_module.c +++ b/assignment10/spinlock_module.c @@ -13,14 +13,14 @@ #define NUM_THREADS 4 static struct task_struct *threads[NUM_THREADS]; -spinlock_t lock; +spinlock_t my_lock; -struct node { +struct my_node { struct list_head list; unsigned int data; }; -struct list_head list; +struct list_head my_list; unsigned long long count_insert, count_search, count_delete; unsigned long long time_insert, time_search, time_delete; @@ -28,55 +28,55 @@ unsigned long long time_insert, time_search, time_delete; void *add_to_list(int thread_id, int range_bound[]) { struct timespec localclock[2]; - struct node *first = NULL; + struct my_node *first = NULL; int i; for (i = range_bound[0]; i < range_bound[1] + 1; i++) { - struct node *new = kmalloc(sizeof(struct node), GFP_KERNEL); + struct my_node *new = kmalloc(sizeof(struct my_node), GFP_KERNEL); new->data = i; - spin_lock(&lock); + spin_lock(&my_lock); getrawmonotonic(&localclock[0]); - list_add(&new->list, &list); + list_add(&new->list, &my_list); getrawmonotonic(&localclock[1]); calclock(localclock, &time_insert, &count_insert); - spin_unlock(&lock); + spin_unlock(&my_lock); if (first == NULL) first = new; } - printk(KERN_INFO "thread #%d range: %d ~ %d\n",thread_id, range_bound[0], range_bound[1]); + printk(KERN_INFO "thread #%d range: %d ~ %d\n", thread_id, range_bound[0], range_bound[1]); return first; } int search_list(int thread_id, void *data, int range_bound[]) { struct timespec localclock[2]; - struct node *cur = (struct node *) data, *tmp; - spin_lock(&lock); + struct my_node *cur = (struct my_node *) data, *tmp; + spin_lock(&my_lock); - list_for_each_entry_safe(cur, tmp, &list, list) { + list_for_each_entry_safe(cur, tmp, &my_list, list) { getrawmonotonic(&localclock[0]); getrawmonotonic(&localclock[1]); calclock(localclock, &time_search,&count_search); }; - spin_unlock(&lock); + spin_unlock(&my_lock); return 0; } int delete_from_list(int thread_id, int range_bound[2]) { - struct node *cur, *tmp; + struct my_node *cur, *tmp; struct timespec localclock[2]; - list_for_each_entry_safe(cur, tmp, &list, list) { - spin_lock(&lock); + list_for_each_entry_safe(cur, tmp, &my_list, list) { + spin_lock(&my_lock); getrawmonotonic(&localclock[0]); list_del(&cur->list); kfree(cur); getrawmonotonic(&localclock[1]); calclock(localclock, &time_delete, &count_delete); - spin_unlock(&lock); + spin_unlock(&my_lock); }; return 0; @@ -107,41 +107,29 @@ static int work_fn(void *data) int __init spinlock_module_init(void) { printk("Entering Spinlock Module!\n"); - INIT_LIST_HEAD(&list); - spin_lock_init(&lock); + INIT_LIST_HEAD(&my_list); + spin_lock_init(&my_lock); int i; - //int range_start = 0; for (i = 0; i < NUM_THREADS; i++) { - //int range_bound[2]; - //range_bound[0] = range_start; - //range_bound[1] = range_start + 250000 - 1; int* arg = (int*)kmalloc(sizeof(int), GFP_KERNEL); *arg = i; - threads[i] = kthread_run(work_fn, (void*)arg, "compare_and_swap_function"); - //range_start += 250000 - 1; + threads[i] = kthread_run(work_fn, (void*)arg, "thread%d", i); } - - //for (i = 0; i < NUM_THREADS; i++) { - // threads[i] = kthread_run(search_list_function, NULL, "compare_and_swap_function"); - //} - - //for (i = 0; i < NUM_THREADS; i++) { - // threads[i] = kthread_run(delete_from_list_function, NULL, "compare_and_swap_function"); - //} - return 0; } void __exit spinlock_module_cleanup(void) { printk(KERN_INFO"%s: Spinlock linked list insert time: %llu ms, count: %llu", __func__, time_insert, count_insert); - + printk(KERN_INFO"%s: Spinlock linked list search time: %llu ms, count: %llu", __func__, time_search, count_search); + printk(KERN_INFO"%s: Spinlock linked list delete time: %llu ms, count: %llu", __func__, time_delete, count_delete); + int i; for(i = 0; i < NUM_THREADS; i++) { kthread_stop(threads[i]); printk("thread #%d stopped!", i + 1); } - printk(KERN_INFO"%s: Exiting Spinlock Module!\n", __func__); + printk(KERN_INFO"%s: Exiting Spinlock Module!\n", __func__); } module_init(spinlock_module_init); From 02cb32200a4363d7741f6ddfc46056bfe8b8a969 Mon Sep 17 00:00:00 2001 From: jimin-kiim <0305jimin@naver.com> Date: Thu, 7 Dec 2023 17:27:33 +0900 Subject: [PATCH 13/37] Feat: separating files into sub directories --- assignment10/compare_and_swap_module/Makefile | 12 ++++++++++++ .../compare_and_swap_module.c | 0 assignment10/fetch_and_add_module/Makefile | 12 ++++++++++++ .../fetch_and_add_module.c | 0 assignment10/mutex_module/Makefile | 12 ++++++++++++ assignment10/{ => mutex_module}/mutex_module.c | 0 assignment10/rw_semaphore_module/Makefile | 12 ++++++++++++ .../{ => rw_semaphore_module}/rw_semaphore_module.c | 0 assignment10/spinlock_module/Makefile | 11 +++++++++++ assignment10/{ => spinlock_module}/spinlock_module.c | 6 +++--- assignment10/test_and_set_module/Makefile | 12 ++++++++++++ .../{ => test_and_set_module}/test_and_set_module.c | 0 12 files changed, 74 insertions(+), 3 deletions(-) create mode 100644 assignment10/compare_and_swap_module/Makefile rename assignment10/{ => compare_and_swap_module}/compare_and_swap_module.c (100%) create mode 100644 assignment10/fetch_and_add_module/Makefile rename assignment10/{ => fetch_and_add_module}/fetch_and_add_module.c (100%) create mode 100644 assignment10/mutex_module/Makefile rename assignment10/{ => mutex_module}/mutex_module.c (100%) create mode 100644 assignment10/rw_semaphore_module/Makefile rename assignment10/{ => rw_semaphore_module}/rw_semaphore_module.c (100%) create mode 100644 assignment10/spinlock_module/Makefile rename assignment10/{ => spinlock_module}/spinlock_module.c (96%) create mode 100644 assignment10/test_and_set_module/Makefile rename assignment10/{ => test_and_set_module}/test_and_set_module.c (100%) diff --git a/assignment10/compare_and_swap_module/Makefile b/assignment10/compare_and_swap_module/Makefile new file mode 100644 index 000000000..c83215132 --- /dev/null +++ b/assignment10/compare_and_swap_module/Makefile @@ -0,0 +1,12 @@ +obj-m := compare_and_swap_module.o test_and_set_module.o fetch_and_add_module.o \ + spinlock_module_final.o mutex_module_final.o rw_semaphore_module_final.o +spinlock_module_final-y := spinlock_module.o calclock.o +mutex_module_final-y := mutex_module.o calclock.o +rw_semaphore_module_final-y := rw_semaphore_module.o calclock.o +KERNEL_DIR := /lib/modules/$(shell uname -r)/build +PWD := $(shell pwd) + +default: + $(MAKE) -C $(KERNEL_DIR) M=$(PWD) modules +clean: + $(MAKE) -C $(KERNEL_DIR) M=$(PWD) clean diff --git a/assignment10/compare_and_swap_module.c b/assignment10/compare_and_swap_module/compare_and_swap_module.c similarity index 100% rename from assignment10/compare_and_swap_module.c rename to assignment10/compare_and_swap_module/compare_and_swap_module.c diff --git a/assignment10/fetch_and_add_module/Makefile b/assignment10/fetch_and_add_module/Makefile new file mode 100644 index 000000000..c83215132 --- /dev/null +++ b/assignment10/fetch_and_add_module/Makefile @@ -0,0 +1,12 @@ +obj-m := compare_and_swap_module.o test_and_set_module.o fetch_and_add_module.o \ + spinlock_module_final.o mutex_module_final.o rw_semaphore_module_final.o +spinlock_module_final-y := spinlock_module.o calclock.o +mutex_module_final-y := mutex_module.o calclock.o +rw_semaphore_module_final-y := rw_semaphore_module.o calclock.o +KERNEL_DIR := /lib/modules/$(shell uname -r)/build +PWD := $(shell pwd) + +default: + $(MAKE) -C $(KERNEL_DIR) M=$(PWD) modules +clean: + $(MAKE) -C $(KERNEL_DIR) M=$(PWD) clean diff --git a/assignment10/fetch_and_add_module.c b/assignment10/fetch_and_add_module/fetch_and_add_module.c similarity index 100% rename from assignment10/fetch_and_add_module.c rename to assignment10/fetch_and_add_module/fetch_and_add_module.c diff --git a/assignment10/mutex_module/Makefile b/assignment10/mutex_module/Makefile new file mode 100644 index 000000000..c83215132 --- /dev/null +++ b/assignment10/mutex_module/Makefile @@ -0,0 +1,12 @@ +obj-m := compare_and_swap_module.o test_and_set_module.o fetch_and_add_module.o \ + spinlock_module_final.o mutex_module_final.o rw_semaphore_module_final.o +spinlock_module_final-y := spinlock_module.o calclock.o +mutex_module_final-y := mutex_module.o calclock.o +rw_semaphore_module_final-y := rw_semaphore_module.o calclock.o +KERNEL_DIR := /lib/modules/$(shell uname -r)/build +PWD := $(shell pwd) + +default: + $(MAKE) -C $(KERNEL_DIR) M=$(PWD) modules +clean: + $(MAKE) -C $(KERNEL_DIR) M=$(PWD) clean diff --git a/assignment10/mutex_module.c b/assignment10/mutex_module/mutex_module.c similarity index 100% rename from assignment10/mutex_module.c rename to assignment10/mutex_module/mutex_module.c diff --git a/assignment10/rw_semaphore_module/Makefile b/assignment10/rw_semaphore_module/Makefile new file mode 100644 index 000000000..c83215132 --- /dev/null +++ b/assignment10/rw_semaphore_module/Makefile @@ -0,0 +1,12 @@ +obj-m := compare_and_swap_module.o test_and_set_module.o fetch_and_add_module.o \ + spinlock_module_final.o mutex_module_final.o rw_semaphore_module_final.o +spinlock_module_final-y := spinlock_module.o calclock.o +mutex_module_final-y := mutex_module.o calclock.o +rw_semaphore_module_final-y := rw_semaphore_module.o calclock.o +KERNEL_DIR := /lib/modules/$(shell uname -r)/build +PWD := $(shell pwd) + +default: + $(MAKE) -C $(KERNEL_DIR) M=$(PWD) modules +clean: + $(MAKE) -C $(KERNEL_DIR) M=$(PWD) clean diff --git a/assignment10/rw_semaphore_module.c b/assignment10/rw_semaphore_module/rw_semaphore_module.c similarity index 100% rename from assignment10/rw_semaphore_module.c rename to assignment10/rw_semaphore_module/rw_semaphore_module.c diff --git a/assignment10/spinlock_module/Makefile b/assignment10/spinlock_module/Makefile new file mode 100644 index 000000000..f88bd41ae --- /dev/null +++ b/assignment10/spinlock_module/Makefile @@ -0,0 +1,11 @@ +obj-m := spinlock_module_final.o +spinlock_module_final-y := spinlock_module.o ../calclock.o +mutex_module_final-y := mutex_module.o calclock.o +rw_semaphore_module_final-y := rw_semaphore_module.o calclock.o +KERNEL_DIR := /lib/modules/$(shell uname -r)/build +PWD := $(shell pwd) + +default: + $(MAKE) -C $(KERNEL_DIR) M=$(PWD) modules +clean: + $(MAKE) -C $(KERNEL_DIR) M=$(PWD) clean diff --git a/assignment10/spinlock_module.c b/assignment10/spinlock_module/spinlock_module.c similarity index 96% rename from assignment10/spinlock_module.c rename to assignment10/spinlock_module/spinlock_module.c index 1d49add70..e097283ca 100644 --- a/assignment10/spinlock_module.c +++ b/assignment10/spinlock_module/spinlock_module.c @@ -8,7 +8,7 @@ #include #include #include -#include "calclock.h" +#include "../calclock.h" #define NUM_THREADS 4 static struct task_struct *threads[NUM_THREADS]; @@ -31,7 +31,7 @@ void *add_to_list(int thread_id, int range_bound[]) { struct my_node *first = NULL; int i; - for (i = range_bound[0]; i < range_bound[1] + 1; i++) { + for (i = range_bound[0]; i <= range_bound[1]; i++) { struct my_node *new = kmalloc(sizeof(struct my_node), GFP_KERNEL); new->data = i; @@ -65,7 +65,7 @@ int search_list(int thread_id, void *data, int range_bound[]) { return 0; } -int delete_from_list(int thread_id, int range_bound[2]) { +int delete_from_list(int thread_id, int range_bound[]) { struct my_node *cur, *tmp; struct timespec localclock[2]; list_for_each_entry_safe(cur, tmp, &my_list, list) { diff --git a/assignment10/test_and_set_module/Makefile b/assignment10/test_and_set_module/Makefile new file mode 100644 index 000000000..c83215132 --- /dev/null +++ b/assignment10/test_and_set_module/Makefile @@ -0,0 +1,12 @@ +obj-m := compare_and_swap_module.o test_and_set_module.o fetch_and_add_module.o \ + spinlock_module_final.o mutex_module_final.o rw_semaphore_module_final.o +spinlock_module_final-y := spinlock_module.o calclock.o +mutex_module_final-y := mutex_module.o calclock.o +rw_semaphore_module_final-y := rw_semaphore_module.o calclock.o +KERNEL_DIR := /lib/modules/$(shell uname -r)/build +PWD := $(shell pwd) + +default: + $(MAKE) -C $(KERNEL_DIR) M=$(PWD) modules +clean: + $(MAKE) -C $(KERNEL_DIR) M=$(PWD) clean diff --git a/assignment10/test_and_set_module.c b/assignment10/test_and_set_module/test_and_set_module.c similarity index 100% rename from assignment10/test_and_set_module.c rename to assignment10/test_and_set_module/test_and_set_module.c From a1227882591b49af9f7dee2a84092d834ab8a69d Mon Sep 17 00:00:00 2001 From: jimin-kiim <0305jimin@naver.com> Date: Thu, 7 Dec 2023 17:45:20 +0900 Subject: [PATCH 14/37] Feat: copying filemap.c internal.h from kernel code and deleting redundant functions --- pxt4/mm/filemap.c | 298 ++++++++++++++++++++++ pxt4/mm/internal.h | 600 +++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 898 insertions(+) create mode 100644 pxt4/mm/filemap.c create mode 100644 pxt4/mm/internal.h diff --git a/pxt4/mm/filemap.c b/pxt4/mm/filemap.c new file mode 100644 index 000000000..ad0a07746 --- /dev/null +++ b/pxt4/mm/filemap.c @@ -0,0 +1,298 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * linux/mm/filemap.c + * + * Copyright (C) 1994-1999 Linus Torvalds + */ + +/* + * This file handles the generic file mmap semantics used by + * most "normal" filesystems (but you don't /have/ to use this: + * the NFS filesystem used to do this differently, for example) + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "internal.h" + +#define CREATE_TRACE_POINTS +#include + +/* + * FIXME: remove all knowledge of the buffer layer from the core VM + */ +#include /* for try_to_free_buffers */ + +#include + +/* + * Shared mappings implemented 30.11.1994. It's not fully working yet, + * though. + * + * Shared mappings now work. 15.8.1995 Bruno. + * + * finished 'unifying' the page and buffer cache and SMP-threaded the + * page-cache, 21.05.1999, Ingo Molnar + * + * SMP-threaded pagemap-LRU 1999, Andrea Arcangeli + */ + +/* + * Lock ordering: + * + * ->i_mmap_rwsem (truncate_pagecache) + * ->private_lock (__free_pte->__set_page_dirty_buffers) + * ->swap_lock (exclusive_swap_page, others) + * ->i_pages lock + * + * ->i_mutex + * ->i_mmap_rwsem (truncate->unmap_mapping_range) + * + * ->mmap_sem + * ->i_mmap_rwsem + * ->page_table_lock or pte_lock (various, mainly in memory.c) + * ->i_pages lock (arch-dependent flush_dcache_mmap_lock) + * + * ->mmap_sem + * ->lock_page (access_process_vm) + * + * ->i_mutex (generic_perform_write) + * ->mmap_sem (fault_in_pages_readable->do_page_fault) + * + * bdi->wb.list_lock + * sb_lock (fs/fs-writeback.c) + * ->i_pages lock (__sync_single_inode) + * + * ->i_mmap_rwsem + * ->anon_vma.lock (vma_adjust) + * + * ->anon_vma.lock + * ->page_table_lock or pte_lock (anon_vma_prepare and various) + * + * ->page_table_lock or pte_lock + * ->swap_lock (try_to_unmap_one) + * ->private_lock (try_to_unmap_one) + * ->i_pages lock (try_to_unmap_one) + * ->pgdat->lru_lock (follow_page->mark_page_accessed) + * ->pgdat->lru_lock (check_pte_range->isolate_lru_page) + * ->private_lock (page_remove_rmap->set_page_dirty) + * ->i_pages lock (page_remove_rmap->set_page_dirty) + * bdi.wb->list_lock (page_remove_rmap->set_page_dirty) + * ->inode->i_lock (page_remove_rmap->set_page_dirty) + * ->memcg->move_lock (page_remove_rmap->lock_page_memcg) + * bdi.wb->list_lock (zap_pte_range->set_page_dirty) + * ->inode->i_lock (zap_pte_range->set_page_dirty) + * ->private_lock (zap_pte_range->__set_page_dirty_buffers) + * + * ->i_mmap_rwsem + * ->tasklist_lock (memory_failure, collect_procs_ao) + */ + +ssize_t generic_perform_write(struct file *file, + struct iov_iter *i, loff_t pos) +{ + struct address_space *mapping = file->f_mapping; + const struct address_space_operations *a_ops = mapping->a_ops; + long status = 0; + ssize_t written = 0; + unsigned int flags = 0; + + do { + struct page *page; + unsigned long offset; /* Offset into pagecache page */ + unsigned long bytes; /* Bytes to write to page */ + size_t copied; /* Bytes copied from user */ + void *fsdata; + + offset = (pos & (PAGE_SIZE - 1)); + bytes = min_t(unsigned long, PAGE_SIZE - offset, + iov_iter_count(i)); + +again: + /* + * Bring in the user page that we will copy from _first_. + * Otherwise there's a nasty deadlock on copying from the + * same page as we're writing to, without it being marked + * up-to-date. + * + * Not only is this an optimisation, but it is also required + * to check that the address is actually valid, when atomic + * usercopies are used, below. + */ + if (unlikely(iov_iter_fault_in_readable(i, bytes))) { + status = -EFAULT; + break; + } + + if (fatal_signal_pending(current)) { + status = -EINTR; + break; + } + + status = a_ops->write_begin(file, mapping, pos, bytes, flags, + &page, &fsdata); + if (unlikely(status < 0)) + break; + + if (mapping_writably_mapped(mapping)) + flush_dcache_page(page); + + copied = iov_iter_copy_from_user_atomic(page, i, offset, bytes); + flush_dcache_page(page); + + status = a_ops->write_end(file, mapping, pos, bytes, copied, + page, fsdata); + if (unlikely(status < 0)) + break; + copied = status; + + cond_resched(); + + iov_iter_advance(i, copied); + if (unlikely(copied == 0)) { + /* + * If we were unable to copy any data at all, we must + * fall back to a single segment length write. + * + * If we didn't fallback here, we could livelock + * because not all segments in the iov can be copied at + * once without a pagefault. + */ + bytes = min_t(unsigned long, PAGE_SIZE - offset, + iov_iter_single_seg_count(i)); + goto again; + } + pos += copied; + written += copied; + + balance_dirty_pages_ratelimited(mapping); + } while (iov_iter_count(i)); + + return written ? written : status; +} +EXPORT_SYMBOL(generic_perform_write); + +/** + * __generic_file_write_iter - write data to a file + * @iocb: IO state structure (file, offset, etc.) + * @from: iov_iter with data to write + * + * This function does all the work needed for actually writing data to a + * file. It does all basic checks, removes SUID from the file, updates + * modification times and calls proper subroutines depending on whether we + * do direct IO or a standard buffered write. + * + * It expects i_mutex to be grabbed unless we work on a block device or similar + * object which does not need locking at all. + * + * This function does *not* take care of syncing data in case of O_SYNC write. + * A caller has to handle it. This is mainly due to the fact that we want to + * avoid syncing under i_mutex. + * + * Return: + * * number of bytes written, even for truncated writes + * * negative error code if no data has been written at all + */ +ssize_t __generic_file_write_iter(struct kiocb *iocb, struct iov_iter *from) +{ + struct file *file = iocb->ki_filp; + struct address_space * mapping = file->f_mapping; + struct inode *inode = mapping->host; + ssize_t written = 0; + ssize_t err; + ssize_t status; + + /* We can write back this queue in page reclaim */ + current->backing_dev_info = inode_to_bdi(inode); + err = file_remove_privs(file); + if (err) + goto out; + + err = file_update_time(file); + if (err) + goto out; + + if (iocb->ki_flags & IOCB_DIRECT) { + loff_t pos, endbyte; + + written = generic_file_direct_write(iocb, from); + /* + * If the write stopped short of completing, fall back to + * buffered writes. Some filesystems do this for writes to + * holes, for example. For DAX files, a buffered write will + * not succeed (even if it did, DAX does not handle dirty + * page-cache pages correctly). + */ + if (written < 0 || !iov_iter_count(from) || IS_DAX(inode)) + goto out; + + status = generic_perform_write(file, from, pos = iocb->ki_pos); + /* + * If generic_perform_write() returned a synchronous error + * then we want to return the number of bytes which were + * direct-written, or the error code if that was zero. Note + * that this differs from normal direct-io semantics, which + * will return -EFOO even if some bytes were written. + */ + if (unlikely(status < 0)) { + err = status; + goto out; + } + /* + * We need to ensure that the page cache pages are written to + * disk and invalidated to preserve the expected O_DIRECT + * semantics. + */ + endbyte = pos + status - 1; + err = filemap_write_and_wait_range(mapping, pos, endbyte); + if (err == 0) { + iocb->ki_pos = endbyte + 1; + written += status; + invalidate_mapping_pages(mapping, + pos >> PAGE_SHIFT, + endbyte >> PAGE_SHIFT); + } else { + /* + * We don't know how much we wrote, so just return + * the number of bytes which were direct-written + */ + } + } else { + written = generic_perform_write(file, from, iocb->ki_pos); + if (likely(written > 0)) + iocb->ki_pos += written; + } +out: + current->backing_dev_info = NULL; + return written ? written : err; +} +EXPORT_SYMBOL(__generic_file_write_iter); + diff --git a/pxt4/mm/internal.h b/pxt4/mm/internal.h new file mode 100644 index 000000000..cf382549d --- /dev/null +++ b/pxt4/mm/internal.h @@ -0,0 +1,600 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* internal.h: mm/ internal definitions + * + * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + */ +#ifndef __MM_INTERNAL_H +#define __MM_INTERNAL_H + +#include +#include +#include +#include + +/* + * The set of flags that only affect watermark checking and reclaim + * behaviour. This is used by the MM to obey the caller constraints + * about IO, FS and watermark checking while ignoring placement + * hints such as HIGHMEM usage. + */ +#define GFP_RECLAIM_MASK (__GFP_RECLAIM|__GFP_HIGH|__GFP_IO|__GFP_FS|\ + __GFP_NOWARN|__GFP_RETRY_MAYFAIL|__GFP_NOFAIL|\ + __GFP_NORETRY|__GFP_MEMALLOC|__GFP_NOMEMALLOC|\ + __GFP_ATOMIC) + +/* The GFP flags allowed during early boot */ +#define GFP_BOOT_MASK (__GFP_BITS_MASK & ~(__GFP_RECLAIM|__GFP_IO|__GFP_FS)) + +/* Control allocation cpuset and node placement constraints */ +#define GFP_CONSTRAINT_MASK (__GFP_HARDWALL|__GFP_THISNODE) + +/* Do not use these with a slab allocator */ +#define GFP_SLAB_BUG_MASK (__GFP_DMA32|__GFP_HIGHMEM|~__GFP_BITS_MASK) + +void page_writeback_init(void); + +vm_fault_t do_swap_page(struct vm_fault *vmf); + +void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *start_vma, + unsigned long floor, unsigned long ceiling); + +static inline bool can_madv_lru_vma(struct vm_area_struct *vma) +{ + return !(vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP)); +} + +void unmap_page_range(struct mmu_gather *tlb, + struct vm_area_struct *vma, + unsigned long addr, unsigned long end, + struct zap_details *details); + +extern unsigned int __do_page_cache_readahead(struct address_space *mapping, + struct file *filp, pgoff_t offset, unsigned long nr_to_read, + unsigned long lookahead_size); + +/* + * Submit IO for the read-ahead request in file_ra_state. + */ +static inline unsigned long ra_submit(struct file_ra_state *ra, + struct address_space *mapping, struct file *filp) +{ + return __do_page_cache_readahead(mapping, filp, + ra->start, ra->size, ra->async_size); +} + +/* + * Turn a non-refcounted page (->_refcount == 0) into refcounted with + * a count of one. + */ +static inline void set_page_refcounted(struct page *page) +{ + VM_BUG_ON_PAGE(PageTail(page), page); + VM_BUG_ON_PAGE(page_ref_count(page), page); + set_page_count(page, 1); +} + +extern unsigned long highest_memmap_pfn; + +/* + * Maximum number of reclaim retries without progress before the OOM + * killer is consider the only way forward. + */ +#define MAX_RECLAIM_RETRIES 16 + +/* + * in mm/vmscan.c: + */ +extern int isolate_lru_page(struct page *page); +extern void putback_lru_page(struct page *page); + +/* + * in mm/rmap.c: + */ +extern pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address); + +/* + * in mm/page_alloc.c + */ + +/* + * Structure for holding the mostly immutable allocation parameters passed + * between functions involved in allocations, including the alloc_pages* + * family of functions. + * + * nodemask, migratetype and high_zoneidx are initialized only once in + * __alloc_pages_nodemask() and then never change. + * + * zonelist, preferred_zone and classzone_idx are set first in + * __alloc_pages_nodemask() for the fast path, and might be later changed + * in __alloc_pages_slowpath(). All other functions pass the whole strucure + * by a const pointer. + */ +struct alloc_context { + struct zonelist *zonelist; + nodemask_t *nodemask; + struct zoneref *preferred_zoneref; + int migratetype; + enum zone_type high_zoneidx; + bool spread_dirty_pages; +}; + +#define ac_classzone_idx(ac) zonelist_zone_idx(ac->preferred_zoneref) + +/* + * Locate the struct page for both the matching buddy in our + * pair (buddy1) and the combined O(n+1) page they form (page). + * + * 1) Any buddy B1 will have an order O twin B2 which satisfies + * the following equation: + * B2 = B1 ^ (1 << O) + * For example, if the starting buddy (buddy2) is #8 its order + * 1 buddy is #10: + * B2 = 8 ^ (1 << 1) = 8 ^ 2 = 10 + * + * 2) Any buddy B will have an order O+1 parent P which + * satisfies the following equation: + * P = B & ~(1 << O) + * + * Assumption: *_mem_map is contiguous at least up to MAX_ORDER + */ +static inline unsigned long +__find_buddy_pfn(unsigned long page_pfn, unsigned int order) +{ + return page_pfn ^ (1 << order); +} + +extern struct page *__pageblock_pfn_to_page(unsigned long start_pfn, + unsigned long end_pfn, struct zone *zone); + +static inline struct page *pageblock_pfn_to_page(unsigned long start_pfn, + unsigned long end_pfn, struct zone *zone) +{ + if (zone->contiguous) + return pfn_to_page(start_pfn); + + return __pageblock_pfn_to_page(start_pfn, end_pfn, zone); +} + +extern int __isolate_free_page(struct page *page, unsigned int order); +extern void memblock_free_pages(struct page *page, unsigned long pfn, + unsigned int order); +extern void __free_pages_core(struct page *page, unsigned int order); +extern void prep_compound_page(struct page *page, unsigned int order); +extern void post_alloc_hook(struct page *page, unsigned int order, + gfp_t gfp_flags); +extern int user_min_free_kbytes; + +#if defined CONFIG_COMPACTION || defined CONFIG_CMA + +/* + * in mm/compaction.c + */ +/* + * compact_control is used to track pages being migrated and the free pages + * they are being migrated to during memory compaction. The free_pfn starts + * at the end of a zone and migrate_pfn begins at the start. Movable pages + * are moved to the end of a zone during a compaction run and the run + * completes when free_pfn <= migrate_pfn + */ +struct compact_control { + struct list_head freepages; /* List of free pages to migrate to */ + struct list_head migratepages; /* List of pages being migrated */ + unsigned int nr_freepages; /* Number of isolated free pages */ + unsigned int nr_migratepages; /* Number of pages to migrate */ + unsigned long free_pfn; /* isolate_freepages search base */ + unsigned long migrate_pfn; /* isolate_migratepages search base */ + unsigned long fast_start_pfn; /* a pfn to start linear scan from */ + struct zone *zone; + unsigned long total_migrate_scanned; + unsigned long total_free_scanned; + unsigned short fast_search_fail;/* failures to use free list searches */ + short search_order; /* order to start a fast search at */ + const gfp_t gfp_mask; /* gfp mask of a direct compactor */ + int order; /* order a direct compactor needs */ + int migratetype; /* migratetype of direct compactor */ + const unsigned int alloc_flags; /* alloc flags of a direct compactor */ + const int classzone_idx; /* zone index of a direct compactor */ + enum migrate_mode mode; /* Async or sync migration mode */ + bool ignore_skip_hint; /* Scan blocks even if marked skip */ + bool no_set_skip_hint; /* Don't mark blocks for skipping */ + bool ignore_block_suitable; /* Scan blocks considered unsuitable */ + bool direct_compaction; /* False from kcompactd or /proc/... */ + bool whole_zone; /* Whole zone should/has been scanned */ + bool contended; /* Signal lock or sched contention */ + bool rescan; /* Rescanning the same pageblock */ +}; + +/* + * Used in direct compaction when a page should be taken from the freelists + * immediately when one is created during the free path. + */ +struct capture_control { + struct compact_control *cc; + struct page *page; +}; + +unsigned long +isolate_freepages_range(struct compact_control *cc, + unsigned long start_pfn, unsigned long end_pfn); +unsigned long +isolate_migratepages_range(struct compact_control *cc, + unsigned long low_pfn, unsigned long end_pfn); +int find_suitable_fallback(struct free_area *area, unsigned int order, + int migratetype, bool only_stealable, bool *can_steal); + +#endif + +/* + * This function returns the order of a free page in the buddy system. In + * general, page_zone(page)->lock must be held by the caller to prevent the + * page from being allocated in parallel and returning garbage as the order. + * If a caller does not hold page_zone(page)->lock, it must guarantee that the + * page cannot be allocated or merged in parallel. Alternatively, it must + * handle invalid values gracefully, and use page_order_unsafe() below. + */ +static inline unsigned int page_order(struct page *page) +{ + /* PageBuddy() must be checked by the caller */ + return page_private(page); +} + +/* + * Like page_order(), but for callers who cannot afford to hold the zone lock. + * PageBuddy() should be checked first by the caller to minimize race window, + * and invalid values must be handled gracefully. + * + * READ_ONCE is used so that if the caller assigns the result into a local + * variable and e.g. tests it for valid range before using, the compiler cannot + * decide to remove the variable and inline the page_private(page) multiple + * times, potentially observing different values in the tests and the actual + * use of the result. + */ +#define page_order_unsafe(page) READ_ONCE(page_private(page)) + +static inline bool is_cow_mapping(vm_flags_t flags) +{ + return (flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE; +} + +/* + * These three helpers classifies VMAs for virtual memory accounting. + */ + +/* + * Executable code area - executable, not writable, not stack + */ +static inline bool is_exec_mapping(vm_flags_t flags) +{ + return (flags & (VM_EXEC | VM_WRITE | VM_STACK)) == VM_EXEC; +} + +/* + * Stack area - atomatically grows in one direction + * + * VM_GROWSUP / VM_GROWSDOWN VMAs are always private anonymous: + * do_mmap() forbids all other combinations. + */ +static inline bool is_stack_mapping(vm_flags_t flags) +{ + return (flags & VM_STACK) == VM_STACK; +} + +/* + * Data area - private, writable, not stack + */ +static inline bool is_data_mapping(vm_flags_t flags) +{ + return (flags & (VM_WRITE | VM_SHARED | VM_STACK)) == VM_WRITE; +} + +/* mm/util.c */ +void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma, + struct vm_area_struct *prev, struct rb_node *rb_parent); + +#ifdef CONFIG_MMU +extern long populate_vma_page_range(struct vm_area_struct *vma, + unsigned long start, unsigned long end, int *nonblocking); +extern void munlock_vma_pages_range(struct vm_area_struct *vma, + unsigned long start, unsigned long end); +static inline void munlock_vma_pages_all(struct vm_area_struct *vma) +{ + munlock_vma_pages_range(vma, vma->vm_start, vma->vm_end); +} + +/* + * must be called with vma's mmap_sem held for read or write, and page locked. + */ +extern void mlock_vma_page(struct page *page); +extern unsigned int munlock_vma_page(struct page *page); + +/* + * Clear the page's PageMlocked(). This can be useful in a situation where + * we want to unconditionally remove a page from the pagecache -- e.g., + * on truncation or freeing. + * + * It is legal to call this function for any page, mlocked or not. + * If called for a page that is still mapped by mlocked vmas, all we do + * is revert to lazy LRU behaviour -- semantics are not broken. + */ +extern void clear_page_mlock(struct page *page); + +/* + * mlock_migrate_page - called only from migrate_misplaced_transhuge_page() + * (because that does not go through the full procedure of migration ptes): + * to migrate the Mlocked page flag; update statistics. + */ +static inline void mlock_migrate_page(struct page *newpage, struct page *page) +{ + if (TestClearPageMlocked(page)) { + int nr_pages = hpage_nr_pages(page); + + /* Holding pmd lock, no change in irq context: __mod is safe */ + __mod_zone_page_state(page_zone(page), NR_MLOCK, -nr_pages); + SetPageMlocked(newpage); + __mod_zone_page_state(page_zone(newpage), NR_MLOCK, nr_pages); + } +} + +extern pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma); + +/* + * At what user virtual address is page expected in vma? + * Returns -EFAULT if all of the page is outside the range of vma. + * If page is a compound head, the entire compound page is considered. + */ +static inline unsigned long +vma_address(struct page *page, struct vm_area_struct *vma) +{ + pgoff_t pgoff; + unsigned long address; + + VM_BUG_ON_PAGE(PageKsm(page), page); /* KSM page->index unusable */ + pgoff = page_to_pgoff(page); + if (pgoff >= vma->vm_pgoff) { + address = vma->vm_start + + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT); + /* Check for address beyond vma (or wrapped through 0?) */ + if (address < vma->vm_start || address >= vma->vm_end) + address = -EFAULT; + } else if (PageHead(page) && + pgoff + compound_nr(page) - 1 >= vma->vm_pgoff) { + /* Test above avoids possibility of wrap to 0 on 32-bit */ + address = vma->vm_start; + } else { + address = -EFAULT; + } + return address; +} + +/* + * Then at what user virtual address will none of the page be found in vma? + * Assumes that vma_address() already returned a good starting address. + * If page is a compound head, the entire compound page is considered. + */ +static inline unsigned long +vma_address_end(struct page *page, struct vm_area_struct *vma) +{ + pgoff_t pgoff; + unsigned long address; + + VM_BUG_ON_PAGE(PageKsm(page), page); /* KSM page->index unusable */ + pgoff = page_to_pgoff(page) + compound_nr(page); + address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT); + /* Check for address beyond vma (or wrapped through 0?) */ + if (address < vma->vm_start || address > vma->vm_end) + address = vma->vm_end; + return address; +} + +static inline struct file *maybe_unlock_mmap_for_io(struct vm_fault *vmf, + struct file *fpin) +{ + int flags = vmf->flags; + + if (fpin) + return fpin; + + /* + * FAULT_FLAG_RETRY_NOWAIT means we don't want to wait on page locks or + * anything, so we only pin the file and drop the mmap_sem if only + * FAULT_FLAG_ALLOW_RETRY is set. + */ + if ((flags & (FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_RETRY_NOWAIT)) == + FAULT_FLAG_ALLOW_RETRY) { + fpin = get_file(vmf->vma->vm_file); + up_read(&vmf->vma->vm_mm->mmap_sem); + } + return fpin; +} + +#else /* !CONFIG_MMU */ +static inline void clear_page_mlock(struct page *page) { } +static inline void mlock_vma_page(struct page *page) { } +static inline void mlock_migrate_page(struct page *new, struct page *old) { } + +#endif /* !CONFIG_MMU */ + +/* + * Return the mem_map entry representing the 'offset' subpage within + * the maximally aligned gigantic page 'base'. Handle any discontiguity + * in the mem_map at MAX_ORDER_NR_PAGES boundaries. + */ +static inline struct page *mem_map_offset(struct page *base, int offset) +{ + if (unlikely(offset >= MAX_ORDER_NR_PAGES)) + return nth_page(base, offset); + return base + offset; +} + +/* + * Iterator over all subpages within the maximally aligned gigantic + * page 'base'. Handle any discontiguity in the mem_map. + */ +static inline struct page *mem_map_next(struct page *iter, + struct page *base, int offset) +{ + if (unlikely((offset & (MAX_ORDER_NR_PAGES - 1)) == 0)) { + unsigned long pfn = page_to_pfn(base) + offset; + if (!pfn_valid(pfn)) + return NULL; + return pfn_to_page(pfn); + } + return iter + 1; +} + +/* Memory initialisation debug and verification */ +enum mminit_level { + MMINIT_WARNING, + MMINIT_VERIFY, + MMINIT_TRACE +}; + +#ifdef CONFIG_DEBUG_MEMORY_INIT + +extern int mminit_loglevel; + +#define mminit_dprintk(level, prefix, fmt, arg...) \ +do { \ + if (level < mminit_loglevel) { \ + if (level <= MMINIT_WARNING) \ + pr_warn("mminit::" prefix " " fmt, ##arg); \ + else \ + printk(KERN_DEBUG "mminit::" prefix " " fmt, ##arg); \ + } \ +} while (0) + +extern void mminit_verify_pageflags_layout(void); +extern void mminit_verify_zonelist(void); +#else + +static inline void mminit_dprintk(enum mminit_level level, + const char *prefix, const char *fmt, ...) +{ +} + +static inline void mminit_verify_pageflags_layout(void) +{ +} + +static inline void mminit_verify_zonelist(void) +{ +} +#endif /* CONFIG_DEBUG_MEMORY_INIT */ + +/* mminit_validate_memmodel_limits is independent of CONFIG_DEBUG_MEMORY_INIT */ +#if defined(CONFIG_SPARSEMEM) +extern void mminit_validate_memmodel_limits(unsigned long *start_pfn, + unsigned long *end_pfn); +#else +static inline void mminit_validate_memmodel_limits(unsigned long *start_pfn, + unsigned long *end_pfn) +{ +} +#endif /* CONFIG_SPARSEMEM */ + +#define NODE_RECLAIM_NOSCAN -2 +#define NODE_RECLAIM_FULL -1 +#define NODE_RECLAIM_SOME 0 +#define NODE_RECLAIM_SUCCESS 1 + +#ifdef CONFIG_NUMA +extern int node_reclaim(struct pglist_data *, gfp_t, unsigned int); +#else +static inline int node_reclaim(struct pglist_data *pgdat, gfp_t mask, + unsigned int order) +{ + return NODE_RECLAIM_NOSCAN; +} +#endif + +extern int hwpoison_filter(struct page *p); + +extern u32 hwpoison_filter_dev_major; +extern u32 hwpoison_filter_dev_minor; +extern u64 hwpoison_filter_flags_mask; +extern u64 hwpoison_filter_flags_value; +extern u64 hwpoison_filter_memcg; +extern u32 hwpoison_filter_enable; + +extern unsigned long __must_check vm_mmap_pgoff(struct file *, unsigned long, + unsigned long, unsigned long, + unsigned long, unsigned long); + +extern void set_pageblock_order(void); +unsigned long reclaim_clean_pages_from_list(struct zone *zone, + struct list_head *page_list); +/* The ALLOC_WMARK bits are used as an index to zone->watermark */ +#define ALLOC_WMARK_MIN WMARK_MIN +#define ALLOC_WMARK_LOW WMARK_LOW +#define ALLOC_WMARK_HIGH WMARK_HIGH +#define ALLOC_NO_WATERMARKS 0x04 /* don't check watermarks at all */ + +/* Mask to get the watermark bits */ +#define ALLOC_WMARK_MASK (ALLOC_NO_WATERMARKS-1) + +/* + * Only MMU archs have async oom victim reclaim - aka oom_reaper so we + * cannot assume a reduced access to memory reserves is sufficient for + * !MMU + */ +#ifdef CONFIG_MMU +#define ALLOC_OOM 0x08 +#else +#define ALLOC_OOM ALLOC_NO_WATERMARKS +#endif + +#define ALLOC_HARDER 0x10 /* try to alloc harder */ +#define ALLOC_HIGH 0x20 /* __GFP_HIGH set */ +#define ALLOC_CPUSET 0x40 /* check for correct cpuset */ +#define ALLOC_CMA 0x80 /* allow allocations from CMA areas */ +#ifdef CONFIG_ZONE_DMA32 +#define ALLOC_NOFRAGMENT 0x100 /* avoid mixing pageblock types */ +#else +#define ALLOC_NOFRAGMENT 0x0 +#endif +#define ALLOC_KSWAPD 0x200 /* allow waking of kswapd */ + +enum ttu_flags; +struct tlbflush_unmap_batch; + + +/* + * only for MM internal work items which do not depend on + * any allocations or locks which might depend on allocations + */ +extern struct workqueue_struct *mm_percpu_wq; + +#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH +void try_to_unmap_flush(void); +void try_to_unmap_flush_dirty(void); +void flush_tlb_batched_pending(struct mm_struct *mm); +#else +static inline void try_to_unmap_flush(void) +{ +} +static inline void try_to_unmap_flush_dirty(void) +{ +} +static inline void flush_tlb_batched_pending(struct mm_struct *mm) +{ +} +#endif /* CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH */ + +extern const struct trace_print_flags pageflag_names[]; +extern const struct trace_print_flags vmaflag_names[]; +extern const struct trace_print_flags gfpflag_names[]; + +static inline bool is_migrate_highatomic(enum migratetype migratetype) +{ + return migratetype == MIGRATE_HIGHATOMIC; +} + +static inline bool is_migrate_highatomic_page(struct page *page) +{ + return get_pageblock_migratetype(page) == MIGRATE_HIGHATOMIC; +} + +void setup_zone_pageset(struct zone *zone); +extern struct page *alloc_new_node_page(struct page *page, unsigned long node); +#endif /* __MM_INTERNAL_H */ From d378f71bc4724e967b0dd20d550207eac33939b6 Mon Sep 17 00:00:00 2001 From: jimin-kiim <0305jimin@naver.com> Date: Thu, 7 Dec 2023 17:51:00 +0900 Subject: [PATCH 15/37] Feat: adding prefix pxt4_* in all functions --- pxt4/mm/filemap.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pxt4/mm/filemap.c b/pxt4/mm/filemap.c index ad0a07746..feeda5c40 100644 --- a/pxt4/mm/filemap.c +++ b/pxt4/mm/filemap.c @@ -116,7 +116,7 @@ * ->tasklist_lock (memory_failure, collect_procs_ao) */ -ssize_t generic_perform_write(struct file *file, +ssize_t pxt4_generic_perform_write(struct file *file, struct iov_iter *i, loff_t pos) { struct address_space *mapping = file->f_mapping; @@ -221,7 +221,7 @@ EXPORT_SYMBOL(generic_perform_write); * * number of bytes written, even for truncated writes * * negative error code if no data has been written at all */ -ssize_t __generic_file_write_iter(struct kiocb *iocb, struct iov_iter *from) +ssize_t pxt4___generic_file_write_iter(struct kiocb *iocb, struct iov_iter *from) { struct file *file = iocb->ki_filp; struct address_space * mapping = file->f_mapping; From 41cb5150b055be7ab3cbb879f95870942c47fec9 Mon Sep 17 00:00:00 2001 From: jimin-kiim <0305jimin@naver.com> Date: Thu, 7 Dec 2023 17:51:59 +0900 Subject: [PATCH 16/37] Feat: removing export symbol macros --- pxt4/mm/filemap.c | 3 --- 1 file changed, 3 deletions(-) diff --git a/pxt4/mm/filemap.c b/pxt4/mm/filemap.c index feeda5c40..5c7764e0a 100644 --- a/pxt4/mm/filemap.c +++ b/pxt4/mm/filemap.c @@ -198,7 +198,6 @@ ssize_t pxt4_generic_perform_write(struct file *file, return written ? written : status; } -EXPORT_SYMBOL(generic_perform_write); /** * __generic_file_write_iter - write data to a file @@ -294,5 +293,3 @@ ssize_t pxt4___generic_file_write_iter(struct kiocb *iocb, struct iov_iter *from current->backing_dev_info = NULL; return written ? written : err; } -EXPORT_SYMBOL(__generic_file_write_iter); - From 91385ee90946a842deff1f8b456f4287d76f8bcd Mon Sep 17 00:00:00 2001 From: jimin-kiim <0305jimin@naver.com> Date: Thu, 7 Dec 2023 17:57:56 +0900 Subject: [PATCH 17/37] Feat: commenting redundant codes again and adding pxt4_ prefix --- pxt4/mm/filemap.c | 3225 ++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 3222 insertions(+), 3 deletions(-) diff --git a/pxt4/mm/filemap.c b/pxt4/mm/filemap.c index 5c7764e0a..5e1079eba 100644 --- a/pxt4/mm/filemap.c +++ b/pxt4/mm/filemap.c @@ -43,8 +43,8 @@ #include #include "internal.h" -#define CREATE_TRACE_POINTS -#include +//#define CREATE_TRACE_POINTS +//#include /* * FIXME: remove all knowledge of the buffer layer from the core VM @@ -115,6 +115,3158 @@ * ->i_mmap_rwsem * ->tasklist_lock (memory_failure, collect_procs_ao) */ +# if 0 +static void page_cache_delete(struct address_space *mapping, + struct page *page, void *shadow) +{ + XA_STATE(xas, &mapping->i_pages, page->index); + unsigned int nr = 1; + + mapping_set_update(&xas, mapping); + + /* hugetlb pages are represented by a single entry in the xarray */ + if (!PageHuge(page)) { + xas_set_order(&xas, page->index, compound_order(page)); + nr = compound_nr(page); + } + + VM_BUG_ON_PAGE(!PageLocked(page), page); + VM_BUG_ON_PAGE(PageTail(page), page); + VM_BUG_ON_PAGE(nr != 1 && shadow, page); + + xas_store(&xas, shadow); + xas_init_marks(&xas); + + page->mapping = NULL; + /* Leave page->index set: truncation lookup relies upon it */ + + if (shadow) { + mapping->nrexceptional += nr; + /* + * Make sure the nrexceptional update is committed before + * the nrpages update so that final truncate racing + * with reclaim does not see both counters 0 at the + * same time and miss a shadow entry. + */ + smp_wmb(); + } + mapping->nrpages -= nr; +} + +static void unaccount_page_cache_page(struct address_space *mapping, + struct page *page) +{ + int nr; + + /* + * if we're uptodate, flush out into the cleancache, otherwise + * invalidate any existing cleancache entries. We can't leave + * stale data around in the cleancache once our page is gone + */ + if (PageUptodate(page) && PageMappedToDisk(page)) + cleancache_put_page(page); + else + cleancache_invalidate_page(mapping, page); + + VM_BUG_ON_PAGE(PageTail(page), page); + VM_BUG_ON_PAGE(page_mapped(page), page); + if (!IS_ENABLED(CONFIG_DEBUG_VM) && unlikely(page_mapped(page))) { + int mapcount; + + pr_alert("BUG: Bad page cache in process %s pfn:%05lx\n", + current->comm, page_to_pfn(page)); + dump_page(page, "still mapped when deleted"); + dump_stack(); + add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE); + + mapcount = page_mapcount(page); + if (mapping_exiting(mapping) && + page_count(page) >= mapcount + 2) { + /* + * All vmas have already been torn down, so it's + * a good bet that actually the page is unmapped, + * and we'd prefer not to leak it: if we're wrong, + * some other bad page check should catch it later. + */ + page_mapcount_reset(page); + page_ref_sub(page, mapcount); + } + } + + /* hugetlb pages do not participate in page cache accounting. */ + if (PageHuge(page)) + return; + + nr = hpage_nr_pages(page); + + __mod_node_page_state(page_pgdat(page), NR_FILE_PAGES, -nr); + if (PageSwapBacked(page)) { + __mod_node_page_state(page_pgdat(page), NR_SHMEM, -nr); + if (PageTransHuge(page)) + __dec_node_page_state(page, NR_SHMEM_THPS); + } else if (PageTransHuge(page)) { + __dec_node_page_state(page, NR_FILE_THPS); + filemap_nr_thps_dec(mapping); + } + + /* + * At this point page must be either written or cleaned by + * truncate. Dirty page here signals a bug and loss of + * unwritten data. + * + * This fixes dirty accounting after removing the page entirely + * but leaves PageDirty set: it has no effect for truncated + * page and anyway will be cleared before returning page into + * buddy allocator. + */ + if (WARN_ON_ONCE(PageDirty(page))) + account_page_cleaned(page, mapping, inode_to_wb(mapping->host)); +} + +/* + * Delete a page from the page cache and free it. Caller has to make + * sure the page is locked and that nobody else uses it - or that usage + * is safe. The caller must hold the i_pages lock. + */ +void __delete_from_page_cache(struct page *page, void *shadow) +{ + struct address_space *mapping = page->mapping; + + trace_mm_filemap_delete_from_page_cache(page); + + unaccount_page_cache_page(mapping, page); + page_cache_delete(mapping, page, shadow); +} + +static void page_cache_free_page(struct address_space *mapping, + struct page *page) +{ + void (*freepage)(struct page *); + + freepage = mapping->a_ops->freepage; + if (freepage) + freepage(page); + + if (PageTransHuge(page) && !PageHuge(page)) { + page_ref_sub(page, HPAGE_PMD_NR); + VM_BUG_ON_PAGE(page_count(page) <= 0, page); + } else { + put_page(page); + } +} + +/** + * delete_from_page_cache - delete page from page cache + * @page: the page which the kernel is trying to remove from page cache + * + * This must be called only on pages that have been verified to be in the page + * cache and locked. It will never put the page into the free list, the caller + * has a reference on the page. + */ +void delete_from_page_cache(struct page *page) +{ + struct address_space *mapping = page_mapping(page); + unsigned long flags; + + BUG_ON(!PageLocked(page)); + xa_lock_irqsave(&mapping->i_pages, flags); + __delete_from_page_cache(page, NULL); + xa_unlock_irqrestore(&mapping->i_pages, flags); + + page_cache_free_page(mapping, page); +} +EXPORT_SYMBOL(delete_from_page_cache); + +/* + * page_cache_delete_batch - delete several pages from page cache + * @mapping: the mapping to which pages belong + * @pvec: pagevec with pages to delete + * + * The function walks over mapping->i_pages and removes pages passed in @pvec + * from the mapping. The function expects @pvec to be sorted by page index + * and is optimised for it to be dense. + * It tolerates holes in @pvec (mapping entries at those indices are not + * modified). The function expects only THP head pages to be present in the + * @pvec. + * + * The function expects the i_pages lock to be held. + */ +static void page_cache_delete_batch(struct address_space *mapping, + struct pagevec *pvec) +{ + XA_STATE(xas, &mapping->i_pages, pvec->pages[0]->index); + int total_pages = 0; + int i = 0; + struct page *page; + + mapping_set_update(&xas, mapping); + xas_for_each(&xas, page, ULONG_MAX) { + if (i >= pagevec_count(pvec)) + break; + + /* A swap/dax/shadow entry got inserted? Skip it. */ + if (xa_is_value(page)) + continue; + /* + * A page got inserted in our range? Skip it. We have our + * pages locked so they are protected from being removed. + * If we see a page whose index is higher than ours, it + * means our page has been removed, which shouldn't be + * possible because we're holding the PageLock. + */ + if (page != pvec->pages[i]) { + VM_BUG_ON_PAGE(page->index > pvec->pages[i]->index, + page); + continue; + } + + WARN_ON_ONCE(!PageLocked(page)); + + if (page->index == xas.xa_index) + page->mapping = NULL; + /* Leave page->index set: truncation lookup relies on it */ + + /* + * Move to the next page in the vector if this is a regular + * page or the index is of the last sub-page of this compound + * page. + */ + if (page->index + compound_nr(page) - 1 == xas.xa_index) + i++; + xas_store(&xas, NULL); + total_pages++; + } + mapping->nrpages -= total_pages; +} + +void delete_from_page_cache_batch(struct address_space *mapping, + struct pagevec *pvec) +{ + int i; + unsigned long flags; + + if (!pagevec_count(pvec)) + return; + + xa_lock_irqsave(&mapping->i_pages, flags); + for (i = 0; i < pagevec_count(pvec); i++) { + trace_mm_filemap_delete_from_page_cache(pvec->pages[i]); + + unaccount_page_cache_page(mapping, pvec->pages[i]); + } + page_cache_delete_batch(mapping, pvec); + xa_unlock_irqrestore(&mapping->i_pages, flags); + + for (i = 0; i < pagevec_count(pvec); i++) + page_cache_free_page(mapping, pvec->pages[i]); +} + +int filemap_check_errors(struct address_space *mapping) +{ + int ret = 0; + /* Check for outstanding write errors */ + if (test_bit(AS_ENOSPC, &mapping->flags) && + test_and_clear_bit(AS_ENOSPC, &mapping->flags)) + ret = -ENOSPC; + if (test_bit(AS_EIO, &mapping->flags) && + test_and_clear_bit(AS_EIO, &mapping->flags)) + ret = -EIO; + return ret; +} +EXPORT_SYMBOL(filemap_check_errors); + +static int filemap_check_and_keep_errors(struct address_space *mapping) +{ + /* Check for outstanding write errors */ + if (test_bit(AS_EIO, &mapping->flags)) + return -EIO; + if (test_bit(AS_ENOSPC, &mapping->flags)) + return -ENOSPC; + return 0; +} + +/** + * __filemap_fdatawrite_range - start writeback on mapping dirty pages in range + * @mapping: address space structure to write + * @start: offset in bytes where the range starts + * @end: offset in bytes where the range ends (inclusive) + * @sync_mode: enable synchronous operation + * + * Start writeback against all of a mapping's dirty pages that lie + * within the byte offsets inclusive. + * + * If sync_mode is WB_SYNC_ALL then this is a "data integrity" operation, as + * opposed to a regular memory cleansing writeback. The difference between + * these two operations is that if a dirty page/buffer is encountered, it must + * be waited upon, and not just skipped over. + * + * Return: %0 on success, negative error code otherwise. + */ +int __filemap_fdatawrite_range(struct address_space *mapping, loff_t start, + loff_t end, int sync_mode) +{ + int ret; + struct writeback_control wbc = { + .sync_mode = sync_mode, + .nr_to_write = LONG_MAX, + .range_start = start, + .range_end = end, + }; + + if (!mapping_cap_writeback_dirty(mapping) || + !mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) + return 0; + + wbc_attach_fdatawrite_inode(&wbc, mapping->host); + ret = do_writepages(mapping, &wbc); + wbc_detach_inode(&wbc); + return ret; +} + +static inline int __filemap_fdatawrite(struct address_space *mapping, + int sync_mode) +{ + return __filemap_fdatawrite_range(mapping, 0, LLONG_MAX, sync_mode); +} + +int filemap_fdatawrite(struct address_space *mapping) +{ + return __filemap_fdatawrite(mapping, WB_SYNC_ALL); +} +EXPORT_SYMBOL(filemap_fdatawrite); + +int filemap_fdatawrite_range(struct address_space *mapping, loff_t start, + loff_t end) +{ + return __filemap_fdatawrite_range(mapping, start, end, WB_SYNC_ALL); +} +EXPORT_SYMBOL(filemap_fdatawrite_range); + +/** + * filemap_flush - mostly a non-blocking flush + * @mapping: target address_space + * + * This is a mostly non-blocking flush. Not suitable for data-integrity + * purposes - I/O may not be started against all dirty pages. + * + * Return: %0 on success, negative error code otherwise. + */ +int filemap_flush(struct address_space *mapping) +{ + return __filemap_fdatawrite(mapping, WB_SYNC_NONE); +} +EXPORT_SYMBOL(filemap_flush); + +/** + * filemap_range_has_page - check if a page exists in range. + * @mapping: address space within which to check + * @start_byte: offset in bytes where the range starts + * @end_byte: offset in bytes where the range ends (inclusive) + * + * Find at least one page in the range supplied, usually used to check if + * direct writing in this range will trigger a writeback. + * + * Return: %true if at least one page exists in the specified range, + * %false otherwise. + */ +bool filemap_range_has_page(struct address_space *mapping, + loff_t start_byte, loff_t end_byte) +{ + struct page *page; + XA_STATE(xas, &mapping->i_pages, start_byte >> PAGE_SHIFT); + pgoff_t max = end_byte >> PAGE_SHIFT; + + if (end_byte < start_byte) + return false; + + rcu_read_lock(); + for (;;) { + page = xas_find(&xas, max); + if (xas_retry(&xas, page)) + continue; + /* Shadow entries don't count */ + if (xa_is_value(page)) + continue; + /* + * We don't need to try to pin this page; we're about to + * release the RCU lock anyway. It is enough to know that + * there was a page here recently. + */ + break; + } + rcu_read_unlock(); + + return page != NULL; +} +EXPORT_SYMBOL(filemap_range_has_page); + +static void __filemap_fdatawait_range(struct address_space *mapping, + loff_t start_byte, loff_t end_byte) +{ + pgoff_t index = start_byte >> PAGE_SHIFT; + pgoff_t end = end_byte >> PAGE_SHIFT; + struct pagevec pvec; + int nr_pages; + + if (end_byte < start_byte) + return; + + pagevec_init(&pvec); + while (index <= end) { + unsigned i; + + nr_pages = pagevec_lookup_range_tag(&pvec, mapping, &index, + end, PAGECACHE_TAG_WRITEBACK); + if (!nr_pages) + break; + + for (i = 0; i < nr_pages; i++) { + struct page *page = pvec.pages[i]; + + wait_on_page_writeback(page); + ClearPageError(page); + } + pagevec_release(&pvec); + cond_resched(); + } +} + +/** + * filemap_fdatawait_range - wait for writeback to complete + * @mapping: address space structure to wait for + * @start_byte: offset in bytes where the range starts + * @end_byte: offset in bytes where the range ends (inclusive) + * + * Walk the list of under-writeback pages of the given address space + * in the given range and wait for all of them. Check error status of + * the address space and return it. + * + * Since the error status of the address space is cleared by this function, + * callers are responsible for checking the return value and handling and/or + * reporting the error. + * + * Return: error status of the address space. + */ +int filemap_fdatawait_range(struct address_space *mapping, loff_t start_byte, + loff_t end_byte) +{ + __filemap_fdatawait_range(mapping, start_byte, end_byte); + return filemap_check_errors(mapping); +} +EXPORT_SYMBOL(filemap_fdatawait_range); + +/** + * filemap_fdatawait_range_keep_errors - wait for writeback to complete + * @mapping: address space structure to wait for + * @start_byte: offset in bytes where the range starts + * @end_byte: offset in bytes where the range ends (inclusive) + * + * Walk the list of under-writeback pages of the given address space in the + * given range and wait for all of them. Unlike filemap_fdatawait_range(), + * this function does not clear error status of the address space. + * + * Use this function if callers don't handle errors themselves. Expected + * call sites are system-wide / filesystem-wide data flushers: e.g. sync(2), + * fsfreeze(8) + */ +int filemap_fdatawait_range_keep_errors(struct address_space *mapping, + loff_t start_byte, loff_t end_byte) +{ + __filemap_fdatawait_range(mapping, start_byte, end_byte); + return filemap_check_and_keep_errors(mapping); +} +EXPORT_SYMBOL(filemap_fdatawait_range_keep_errors); + +/** + * file_fdatawait_range - wait for writeback to complete + * @file: file pointing to address space structure to wait for + * @start_byte: offset in bytes where the range starts + * @end_byte: offset in bytes where the range ends (inclusive) + * + * Walk the list of under-writeback pages of the address space that file + * refers to, in the given range and wait for all of them. Check error + * status of the address space vs. the file->f_wb_err cursor and return it. + * + * Since the error status of the file is advanced by this function, + * callers are responsible for checking the return value and handling and/or + * reporting the error. + * + * Return: error status of the address space vs. the file->f_wb_err cursor. + */ +int file_fdatawait_range(struct file *file, loff_t start_byte, loff_t end_byte) +{ + struct address_space *mapping = file->f_mapping; + + __filemap_fdatawait_range(mapping, start_byte, end_byte); + return file_check_and_advance_wb_err(file); +} +EXPORT_SYMBOL(file_fdatawait_range); + +/** + * filemap_fdatawait_keep_errors - wait for writeback without clearing errors + * @mapping: address space structure to wait for + * + * Walk the list of under-writeback pages of the given address space + * and wait for all of them. Unlike filemap_fdatawait(), this function + * does not clear error status of the address space. + * + * Use this function if callers don't handle errors themselves. Expected + * call sites are system-wide / filesystem-wide data flushers: e.g. sync(2), + * fsfreeze(8) + * + * Return: error status of the address space. + */ +int filemap_fdatawait_keep_errors(struct address_space *mapping) +{ + __filemap_fdatawait_range(mapping, 0, LLONG_MAX); + return filemap_check_and_keep_errors(mapping); +} +EXPORT_SYMBOL(filemap_fdatawait_keep_errors); + +/* Returns true if writeback might be needed or already in progress. */ +static bool mapping_needs_writeback(struct address_space *mapping) +{ + if (dax_mapping(mapping)) + return mapping->nrexceptional; + + return mapping->nrpages; +} + +int filemap_write_and_wait(struct address_space *mapping) +{ + int err = 0; + + if (mapping_needs_writeback(mapping)) { + err = filemap_fdatawrite(mapping); + /* + * Even if the above returned error, the pages may be + * written partially (e.g. -ENOSPC), so we wait for it. + * But the -EIO is special case, it may indicate the worst + * thing (e.g. bug) happened, so we avoid waiting for it. + */ + if (err != -EIO) { + int err2 = filemap_fdatawait(mapping); + if (!err) + err = err2; + } else { + /* Clear any previously stored errors */ + filemap_check_errors(mapping); + } + } else { + err = filemap_check_errors(mapping); + } + return err; +} +EXPORT_SYMBOL(filemap_write_and_wait); + +/** + * filemap_write_and_wait_range - write out & wait on a file range + * @mapping: the address_space for the pages + * @lstart: offset in bytes where the range starts + * @lend: offset in bytes where the range ends (inclusive) + * + * Write out and wait upon file offsets lstart->lend, inclusive. + * + * Note that @lend is inclusive (describes the last byte to be written) so + * that this function can be used to write to the very end-of-file (end = -1). + * + * Return: error status of the address space. + */ +int filemap_write_and_wait_range(struct address_space *mapping, + loff_t lstart, loff_t lend) +{ + int err = 0; + + if (mapping_needs_writeback(mapping)) { + err = __filemap_fdatawrite_range(mapping, lstart, lend, + WB_SYNC_ALL); + /* See comment of filemap_write_and_wait() */ + if (err != -EIO) { + int err2 = filemap_fdatawait_range(mapping, + lstart, lend); + if (!err) + err = err2; + } else { + /* Clear any previously stored errors */ + filemap_check_errors(mapping); + } + } else { + err = filemap_check_errors(mapping); + } + return err; +} +EXPORT_SYMBOL(filemap_write_and_wait_range); + +void __filemap_set_wb_err(struct address_space *mapping, int err) +{ + errseq_t eseq = errseq_set(&mapping->wb_err, err); + + trace_filemap_set_wb_err(mapping, eseq); +} +EXPORT_SYMBOL(__filemap_set_wb_err); + +/** + * file_check_and_advance_wb_err - report wb error (if any) that was previously + * and advance wb_err to current one + * @file: struct file on which the error is being reported + * + * When userland calls fsync (or something like nfsd does the equivalent), we + * want to report any writeback errors that occurred since the last fsync (or + * since the file was opened if there haven't been any). + * + * Grab the wb_err from the mapping. If it matches what we have in the file, + * then just quickly return 0. The file is all caught up. + * + * If it doesn't match, then take the mapping value, set the "seen" flag in + * it and try to swap it into place. If it works, or another task beat us + * to it with the new value, then update the f_wb_err and return the error + * portion. The error at this point must be reported via proper channels + * (a'la fsync, or NFS COMMIT operation, etc.). + * + * While we handle mapping->wb_err with atomic operations, the f_wb_err + * value is protected by the f_lock since we must ensure that it reflects + * the latest value swapped in for this file descriptor. + * + * Return: %0 on success, negative error code otherwise. + */ +int file_check_and_advance_wb_err(struct file *file) +{ + int err = 0; + errseq_t old = READ_ONCE(file->f_wb_err); + struct address_space *mapping = file->f_mapping; + + /* Locklessly handle the common case where nothing has changed */ + if (errseq_check(&mapping->wb_err, old)) { + /* Something changed, must use slow path */ + spin_lock(&file->f_lock); + old = file->f_wb_err; + err = errseq_check_and_advance(&mapping->wb_err, + &file->f_wb_err); + trace_file_check_and_advance_wb_err(file, old); + spin_unlock(&file->f_lock); + } + + /* + * We're mostly using this function as a drop in replacement for + * filemap_check_errors. Clear AS_EIO/AS_ENOSPC to emulate the effect + * that the legacy code would have had on these flags. + */ + clear_bit(AS_EIO, &mapping->flags); + clear_bit(AS_ENOSPC, &mapping->flags); + return err; +} +EXPORT_SYMBOL(file_check_and_advance_wb_err); + +/** + * file_write_and_wait_range - write out & wait on a file range + * @file: file pointing to address_space with pages + * @lstart: offset in bytes where the range starts + * @lend: offset in bytes where the range ends (inclusive) + * + * Write out and wait upon file offsets lstart->lend, inclusive. + * + * Note that @lend is inclusive (describes the last byte to be written) so + * that this function can be used to write to the very end-of-file (end = -1). + * + * After writing out and waiting on the data, we check and advance the + * f_wb_err cursor to the latest value, and return any errors detected there. + * + * Return: %0 on success, negative error code otherwise. + */ +int file_write_and_wait_range(struct file *file, loff_t lstart, loff_t lend) +{ + int err = 0, err2; + struct address_space *mapping = file->f_mapping; + + if (mapping_needs_writeback(mapping)) { + err = __filemap_fdatawrite_range(mapping, lstart, lend, + WB_SYNC_ALL); + /* See comment of filemap_write_and_wait() */ + if (err != -EIO) + __filemap_fdatawait_range(mapping, lstart, lend); + } + err2 = file_check_and_advance_wb_err(file); + if (!err) + err = err2; + return err; +} +EXPORT_SYMBOL(file_write_and_wait_range); + +/** + * replace_page_cache_page - replace a pagecache page with a new one + * @old: page to be replaced + * @new: page to replace with + * @gfp_mask: allocation mode + * + * This function replaces a page in the pagecache with a new one. On + * success it acquires the pagecache reference for the new page and + * drops it for the old page. Both the old and new pages must be + * locked. This function does not add the new page to the LRU, the + * caller must do that. + * + * The remove + add is atomic. This function cannot fail. + * + * Return: %0 + */ +int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask) +{ + struct address_space *mapping = old->mapping; + void (*freepage)(struct page *) = mapping->a_ops->freepage; + pgoff_t offset = old->index; + XA_STATE(xas, &mapping->i_pages, offset); + unsigned long flags; + + VM_BUG_ON_PAGE(!PageLocked(old), old); + VM_BUG_ON_PAGE(!PageLocked(new), new); + VM_BUG_ON_PAGE(new->mapping, new); + + get_page(new); + new->mapping = mapping; + new->index = offset; + + xas_lock_irqsave(&xas, flags); + xas_store(&xas, new); + + old->mapping = NULL; + /* hugetlb pages do not participate in page cache accounting. */ + if (!PageHuge(old)) + __dec_node_page_state(new, NR_FILE_PAGES); + if (!PageHuge(new)) + __inc_node_page_state(new, NR_FILE_PAGES); + if (PageSwapBacked(old)) + __dec_node_page_state(new, NR_SHMEM); + if (PageSwapBacked(new)) + __inc_node_page_state(new, NR_SHMEM); + xas_unlock_irqrestore(&xas, flags); + mem_cgroup_migrate(old, new); + if (freepage) + freepage(old); + put_page(old); + + return 0; +} +EXPORT_SYMBOL_GPL(replace_page_cache_page); + +noinline int __add_to_page_cache_locked(struct page *page, + struct address_space *mapping, + pgoff_t offset, gfp_t gfp_mask, + void **shadowp) +{ + XA_STATE(xas, &mapping->i_pages, offset); + int huge = PageHuge(page); + struct mem_cgroup *memcg; + int error; + + VM_BUG_ON_PAGE(!PageLocked(page), page); + VM_BUG_ON_PAGE(PageSwapBacked(page), page); + mapping_set_update(&xas, mapping); + + if (!huge) { + error = mem_cgroup_try_charge(page, current->mm, + gfp_mask, &memcg, false); + if (error) + return error; + } + + get_page(page); + page->mapping = mapping; + page->index = offset; + gfp_mask &= GFP_RECLAIM_MASK; + + do { + unsigned int order = xa_get_order(xas.xa, xas.xa_index); + void *entry, *old = NULL; + + if (order > thp_order(page)) + xas_split_alloc(&xas, xa_load(xas.xa, xas.xa_index), + order, gfp_mask); + xas_lock_irq(&xas); + xas_for_each_conflict(&xas, entry) { + old = entry; + if (!xa_is_value(entry)) { + xas_set_err(&xas, -EEXIST); + goto unlock; + } + } + + if (old) { + if (shadowp) + *shadowp = old; + /* entry may have been split before we acquired lock */ + order = xa_get_order(xas.xa, xas.xa_index); + if (order > thp_order(page)) { + xas_split(&xas, old, order); + xas_reset(&xas); + } + } + + xas_store(&xas, page); + if (xas_error(&xas)) + goto unlock; + + if (old) + mapping->nrexceptional--; + mapping->nrpages++; + + /* hugetlb pages do not participate in page cache accounting */ + if (!huge) + __inc_node_page_state(page, NR_FILE_PAGES); +unlock: + xas_unlock_irq(&xas); + } while (xas_nomem(&xas, gfp_mask)); + + if (xas_error(&xas)) + goto error; + + if (!huge) + mem_cgroup_commit_charge(page, memcg, false, false); + trace_mm_filemap_add_to_page_cache(page); + return 0; +error: + page->mapping = NULL; + /* Leave page->index set: truncation relies upon it */ + if (!huge) + mem_cgroup_cancel_charge(page, memcg, false); + put_page(page); + return xas_error(&xas); +} +ALLOW_ERROR_INJECTION(__add_to_page_cache_locked, ERRNO); + +/** + * add_to_page_cache_locked - add a locked page to the pagecache + * @page: page to add + * @mapping: the page's address_space + * @offset: page index + * @gfp_mask: page allocation mode + * + * This function is used to add a page to the pagecache. It must be locked. + * This function does not add the page to the LRU. The caller must do that. + * + * Return: %0 on success, negative error code otherwise. + */ +int add_to_page_cache_locked(struct page *page, struct address_space *mapping, + pgoff_t offset, gfp_t gfp_mask) +{ + return __add_to_page_cache_locked(page, mapping, offset, + gfp_mask, NULL); +} +EXPORT_SYMBOL(add_to_page_cache_locked); + +int add_to_page_cache_lru(struct page *page, struct address_space *mapping, + pgoff_t offset, gfp_t gfp_mask) +{ + void *shadow = NULL; + int ret; + + __SetPageLocked(page); + ret = __add_to_page_cache_locked(page, mapping, offset, + gfp_mask, &shadow); + if (unlikely(ret)) + __ClearPageLocked(page); + else { + /* + * The page might have been evicted from cache only + * recently, in which case it should be activated like + * any other repeatedly accessed page. + * The exception is pages getting rewritten; evicting other + * data from the working set, only to cache data that will + * get overwritten with something else, is a waste of memory. + */ + WARN_ON_ONCE(PageActive(page)); + if (!(gfp_mask & __GFP_WRITE) && shadow) + workingset_refault(page, shadow); + lru_cache_add(page); + } + return ret; +} +EXPORT_SYMBOL_GPL(add_to_page_cache_lru); + +#ifdef CONFIG_NUMA +struct page *__page_cache_alloc(gfp_t gfp) +{ + int n; + struct page *page; + + if (cpuset_do_page_mem_spread()) { + unsigned int cpuset_mems_cookie; + do { + cpuset_mems_cookie = read_mems_allowed_begin(); + n = cpuset_mem_spread_node(); + page = __alloc_pages_node(n, gfp, 0); + } while (!page && read_mems_allowed_retry(cpuset_mems_cookie)); + + return page; + } + return alloc_pages(gfp, 0); +} +EXPORT_SYMBOL(__page_cache_alloc); +#endif + +/* + * In order to wait for pages to become available there must be + * waitqueues associated with pages. By using a hash table of + * waitqueues where the bucket discipline is to maintain all + * waiters on the same queue and wake all when any of the pages + * become available, and for the woken contexts to check to be + * sure the appropriate page became available, this saves space + * at a cost of "thundering herd" phenomena during rare hash + * collisions. + */ +#define PAGE_WAIT_TABLE_BITS 8 +#define PAGE_WAIT_TABLE_SIZE (1 << PAGE_WAIT_TABLE_BITS) +static wait_queue_head_t page_wait_table[PAGE_WAIT_TABLE_SIZE] __cacheline_aligned; + +static wait_queue_head_t *page_waitqueue(struct page *page) +{ + return &page_wait_table[hash_ptr(page, PAGE_WAIT_TABLE_BITS)]; +} + +void __init pagecache_init(void) +{ + int i; + + for (i = 0; i < PAGE_WAIT_TABLE_SIZE; i++) + init_waitqueue_head(&page_wait_table[i]); + + page_writeback_init(); +} + +/* This has the same layout as wait_bit_key - see fs/cachefiles/rdwr.c */ +struct wait_page_key { + struct page *page; + int bit_nr; + int page_match; +}; + +struct wait_page_queue { + struct page *page; + int bit_nr; + wait_queue_entry_t wait; +}; + +static int wake_page_function(wait_queue_entry_t *wait, unsigned mode, int sync, void *arg) +{ + struct wait_page_key *key = arg; + struct wait_page_queue *wait_page + = container_of(wait, struct wait_page_queue, wait); + + if (wait_page->page != key->page) + return 0; + key->page_match = 1; + + if (wait_page->bit_nr != key->bit_nr) + return 0; + + /* + * Stop walking if it's locked. + * Is this safe if put_and_wait_on_page_locked() is in use? + * Yes: the waker must hold a reference to this page, and if PG_locked + * has now already been set by another task, that task must also hold + * a reference to the *same usage* of this page; so there is no need + * to walk on to wake even the put_and_wait_on_page_locked() callers. + */ + if (test_bit(key->bit_nr, &key->page->flags)) + return -1; + + return autoremove_wake_function(wait, mode, sync, key); +} + +static void wake_up_page_bit(struct page *page, int bit_nr) +{ + wait_queue_head_t *q = page_waitqueue(page); + struct wait_page_key key; + unsigned long flags; + wait_queue_entry_t bookmark; + + key.page = page; + key.bit_nr = bit_nr; + key.page_match = 0; + + bookmark.flags = 0; + bookmark.private = NULL; + bookmark.func = NULL; + INIT_LIST_HEAD(&bookmark.entry); + + spin_lock_irqsave(&q->lock, flags); + __wake_up_locked_key_bookmark(q, TASK_NORMAL, &key, &bookmark); + + while (bookmark.flags & WQ_FLAG_BOOKMARK) { + /* + * Take a breather from holding the lock, + * allow pages that finish wake up asynchronously + * to acquire the lock and remove themselves + * from wait queue + */ + spin_unlock_irqrestore(&q->lock, flags); + cpu_relax(); + spin_lock_irqsave(&q->lock, flags); + __wake_up_locked_key_bookmark(q, TASK_NORMAL, &key, &bookmark); + } + + /* + * It is possible for other pages to have collided on the waitqueue + * hash, so in that case check for a page match. That prevents a long- + * term waiter + * + * It is still possible to miss a case here, when we woke page waiters + * and removed them from the waitqueue, but there are still other + * page waiters. + */ + if (!waitqueue_active(q) || !key.page_match) { + ClearPageWaiters(page); + /* + * It's possible to miss clearing Waiters here, when we woke + * our page waiters, but the hashed waitqueue has waiters for + * other pages on it. + * + * That's okay, it's a rare case. The next waker will clear it. + */ + } + spin_unlock_irqrestore(&q->lock, flags); +} + +static void wake_up_page(struct page *page, int bit) +{ + if (!PageWaiters(page)) + return; + wake_up_page_bit(page, bit); +} + +/* + * A choice of three behaviors for wait_on_page_bit_common(): + */ +enum behavior { + EXCLUSIVE, /* Hold ref to page and take the bit when woken, like + * __lock_page() waiting on then setting PG_locked. + */ + SHARED, /* Hold ref to page and check the bit when woken, like + * wait_on_page_writeback() waiting on PG_writeback. + */ + DROP, /* Drop ref to page before wait, no check when woken, + * like put_and_wait_on_page_locked() on PG_locked. + */ +}; + +static inline int wait_on_page_bit_common(wait_queue_head_t *q, + struct page *page, int bit_nr, int state, enum behavior behavior) +{ + struct wait_page_queue wait_page; + wait_queue_entry_t *wait = &wait_page.wait; + bool bit_is_set; + bool thrashing = false; + bool delayacct = false; + unsigned long pflags; + int ret = 0; + + if (bit_nr == PG_locked && + !PageUptodate(page) && PageWorkingset(page)) { + if (!PageSwapBacked(page)) { + delayacct_thrashing_start(); + delayacct = true; + } + psi_memstall_enter(&pflags); + thrashing = true; + } + + init_wait(wait); + wait->flags = behavior == EXCLUSIVE ? WQ_FLAG_EXCLUSIVE : 0; + wait->func = wake_page_function; + wait_page.page = page; + wait_page.bit_nr = bit_nr; + + for (;;) { + spin_lock_irq(&q->lock); + + if (likely(list_empty(&wait->entry))) { + __add_wait_queue_entry_tail(q, wait); + SetPageWaiters(page); + } + + set_current_state(state); + + spin_unlock_irq(&q->lock); + + bit_is_set = test_bit(bit_nr, &page->flags); + if (behavior == DROP) + put_page(page); + + if (likely(bit_is_set)) + io_schedule(); + + if (behavior == EXCLUSIVE) { + if (!test_and_set_bit_lock(bit_nr, &page->flags)) + break; + } else if (behavior == SHARED) { + if (!test_bit(bit_nr, &page->flags)) + break; + } + + if (signal_pending_state(state, current)) { + ret = -EINTR; + break; + } + + if (behavior == DROP) { + /* + * We can no longer safely access page->flags: + * even if CONFIG_MEMORY_HOTREMOVE is not enabled, + * there is a risk of waiting forever on a page reused + * for something that keeps it locked indefinitely. + * But best check for -EINTR above before breaking. + */ + break; + } + } + + finish_wait(q, wait); + + if (thrashing) { + if (delayacct) + delayacct_thrashing_end(); + psi_memstall_leave(&pflags); + } + + /* + * A signal could leave PageWaiters set. Clearing it here if + * !waitqueue_active would be possible (by open-coding finish_wait), + * but still fail to catch it in the case of wait hash collision. We + * already can fail to clear wait hash collision cases, so don't + * bother with signals either. + */ + + return ret; +} + +void wait_on_page_bit(struct page *page, int bit_nr) +{ + wait_queue_head_t *q = page_waitqueue(page); + wait_on_page_bit_common(q, page, bit_nr, TASK_UNINTERRUPTIBLE, SHARED); +} +EXPORT_SYMBOL(wait_on_page_bit); + +int wait_on_page_bit_killable(struct page *page, int bit_nr) +{ + wait_queue_head_t *q = page_waitqueue(page); + return wait_on_page_bit_common(q, page, bit_nr, TASK_KILLABLE, SHARED); +} +EXPORT_SYMBOL(wait_on_page_bit_killable); + +/** + * put_and_wait_on_page_locked - Drop a reference and wait for it to be unlocked + * @page: The page to wait for. + * + * The caller should hold a reference on @page. They expect the page to + * become unlocked relatively soon, but do not wish to hold up migration + * (for example) by holding the reference while waiting for the page to + * come unlocked. After this function returns, the caller should not + * dereference @page. + */ +void put_and_wait_on_page_locked(struct page *page) +{ + wait_queue_head_t *q; + + page = compound_head(page); + q = page_waitqueue(page); + wait_on_page_bit_common(q, page, PG_locked, TASK_UNINTERRUPTIBLE, DROP); +} + +/** + * add_page_wait_queue - Add an arbitrary waiter to a page's wait queue + * @page: Page defining the wait queue of interest + * @waiter: Waiter to add to the queue + * + * Add an arbitrary @waiter to the wait queue for the nominated @page. + */ +void add_page_wait_queue(struct page *page, wait_queue_entry_t *waiter) +{ + wait_queue_head_t *q = page_waitqueue(page); + unsigned long flags; + + spin_lock_irqsave(&q->lock, flags); + __add_wait_queue_entry_tail(q, waiter); + SetPageWaiters(page); + spin_unlock_irqrestore(&q->lock, flags); +} +EXPORT_SYMBOL_GPL(add_page_wait_queue); + +#ifndef clear_bit_unlock_is_negative_byte + +/* + * PG_waiters is the high bit in the same byte as PG_lock. + * + * On x86 (and on many other architectures), we can clear PG_lock and + * test the sign bit at the same time. But if the architecture does + * not support that special operation, we just do this all by hand + * instead. + * + * The read of PG_waiters has to be after (or concurrently with) PG_locked + * being cleared, but a memory barrier should be unneccssary since it is + * in the same byte as PG_locked. + */ +static inline bool clear_bit_unlock_is_negative_byte(long nr, volatile void *mem) +{ + clear_bit_unlock(nr, mem); + /* smp_mb__after_atomic(); */ + return test_bit(PG_waiters, mem); +} + +#endif + +/** + * unlock_page - unlock a locked page + * @page: the page + * + * Unlocks the page and wakes up sleepers in ___wait_on_page_locked(). + * Also wakes sleepers in wait_on_page_writeback() because the wakeup + * mechanism between PageLocked pages and PageWriteback pages is shared. + * But that's OK - sleepers in wait_on_page_writeback() just go back to sleep. + * + * Note that this depends on PG_waiters being the sign bit in the byte + * that contains PG_locked - thus the BUILD_BUG_ON(). That allows us to + * clear the PG_locked bit and test PG_waiters at the same time fairly + * portably (architectures that do LL/SC can test any bit, while x86 can + * test the sign bit). + */ +void unlock_page(struct page *page) +{ + BUILD_BUG_ON(PG_waiters != 7); + page = compound_head(page); + VM_BUG_ON_PAGE(!PageLocked(page), page); + if (clear_bit_unlock_is_negative_byte(PG_locked, &page->flags)) + wake_up_page_bit(page, PG_locked); +} +EXPORT_SYMBOL(unlock_page); + +/** + * end_page_writeback - end writeback against a page + * @page: the page + */ +void end_page_writeback(struct page *page) +{ + /* + * TestClearPageReclaim could be used here but it is an atomic + * operation and overkill in this particular case. Failing to + * shuffle a page marked for immediate reclaim is too mild to + * justify taking an atomic operation penalty at the end of + * ever page writeback. + */ + if (PageReclaim(page)) { + ClearPageReclaim(page); + rotate_reclaimable_page(page); + } + + if (!test_clear_page_writeback(page)) + BUG(); + + smp_mb__after_atomic(); + wake_up_page(page, PG_writeback); +} +EXPORT_SYMBOL(end_page_writeback); + +/* + * After completing I/O on a page, call this routine to update the page + * flags appropriately + */ +void page_endio(struct page *page, bool is_write, int err) +{ + if (!is_write) { + if (!err) { + SetPageUptodate(page); + } else { + ClearPageUptodate(page); + SetPageError(page); + } + unlock_page(page); + } else { + if (err) { + struct address_space *mapping; + + SetPageError(page); + mapping = page_mapping(page); + if (mapping) + mapping_set_error(mapping, err); + } + end_page_writeback(page); + } +} +EXPORT_SYMBOL_GPL(page_endio); + +/** + * __lock_page - get a lock on the page, assuming we need to sleep to get it + * @__page: the page to lock + */ +void __lock_page(struct page *__page) +{ + struct page *page = compound_head(__page); + wait_queue_head_t *q = page_waitqueue(page); + wait_on_page_bit_common(q, page, PG_locked, TASK_UNINTERRUPTIBLE, + EXCLUSIVE); +} +EXPORT_SYMBOL(__lock_page); + +int __lock_page_killable(struct page *__page) +{ + struct page *page = compound_head(__page); + wait_queue_head_t *q = page_waitqueue(page); + return wait_on_page_bit_common(q, page, PG_locked, TASK_KILLABLE, + EXCLUSIVE); +} +EXPORT_SYMBOL_GPL(__lock_page_killable); + +/* + * Return values: + * 1 - page is locked; mmap_sem is still held. + * 0 - page is not locked. + * mmap_sem has been released (up_read()), unless flags had both + * FAULT_FLAG_ALLOW_RETRY and FAULT_FLAG_RETRY_NOWAIT set, in + * which case mmap_sem is still held. + * + * If neither ALLOW_RETRY nor KILLABLE are set, will always return 1 + * with the page locked and the mmap_sem unperturbed. + */ +int __lock_page_or_retry(struct page *page, struct mm_struct *mm, + unsigned int flags) +{ + if (flags & FAULT_FLAG_ALLOW_RETRY) { + /* + * CAUTION! In this case, mmap_sem is not released + * even though return 0. + */ + if (flags & FAULT_FLAG_RETRY_NOWAIT) + return 0; + + up_read(&mm->mmap_sem); + if (flags & FAULT_FLAG_KILLABLE) + wait_on_page_locked_killable(page); + else + wait_on_page_locked(page); + return 0; + } else { + if (flags & FAULT_FLAG_KILLABLE) { + int ret; + + ret = __lock_page_killable(page); + if (ret) { + up_read(&mm->mmap_sem); + return 0; + } + } else + __lock_page(page); + return 1; + } +} + +/** + * page_cache_next_miss() - Find the next gap in the page cache. + * @mapping: Mapping. + * @index: Index. + * @max_scan: Maximum range to search. + * + * Search the range [index, min(index + max_scan - 1, ULONG_MAX)] for the + * gap with the lowest index. + * + * This function may be called under the rcu_read_lock. However, this will + * not atomically search a snapshot of the cache at a single point in time. + * For example, if a gap is created at index 5, then subsequently a gap is + * created at index 10, page_cache_next_miss covering both indices may + * return 10 if called under the rcu_read_lock. + * + * Return: The index of the gap if found, otherwise an index outside the + * range specified (in which case 'return - index >= max_scan' will be true). + * In the rare case of index wrap-around, 0 will be returned. + */ +pgoff_t page_cache_next_miss(struct address_space *mapping, + pgoff_t index, unsigned long max_scan) +{ + XA_STATE(xas, &mapping->i_pages, index); + + while (max_scan--) { + void *entry = xas_next(&xas); + if (!entry || xa_is_value(entry)) + break; + if (xas.xa_index == 0) + break; + } + + return xas.xa_index; +} +EXPORT_SYMBOL(page_cache_next_miss); + +/** + * page_cache_prev_miss() - Find the previous gap in the page cache. + * @mapping: Mapping. + * @index: Index. + * @max_scan: Maximum range to search. + * + * Search the range [max(index - max_scan + 1, 0), index] for the + * gap with the highest index. + * + * This function may be called under the rcu_read_lock. However, this will + * not atomically search a snapshot of the cache at a single point in time. + * For example, if a gap is created at index 10, then subsequently a gap is + * created at index 5, page_cache_prev_miss() covering both indices may + * return 5 if called under the rcu_read_lock. + * + * Return: The index of the gap if found, otherwise an index outside the + * range specified (in which case 'index - return >= max_scan' will be true). + * In the rare case of wrap-around, ULONG_MAX will be returned. + */ +pgoff_t page_cache_prev_miss(struct address_space *mapping, + pgoff_t index, unsigned long max_scan) +{ + XA_STATE(xas, &mapping->i_pages, index); + + while (max_scan--) { + void *entry = xas_prev(&xas); + if (!entry || xa_is_value(entry)) + break; + if (xas.xa_index == ULONG_MAX) + break; + } + + return xas.xa_index; +} +EXPORT_SYMBOL(page_cache_prev_miss); + +/** + * find_get_entry - find and get a page cache entry + * @mapping: the address_space to search + * @offset: the page cache index + * + * Looks up the page cache slot at @mapping & @offset. If there is a + * page cache page, it is returned with an increased refcount. + * + * If the slot holds a shadow entry of a previously evicted page, or a + * swap entry from shmem/tmpfs, it is returned. + * + * Return: the found page or shadow entry, %NULL if nothing is found. + */ +struct page *find_get_entry(struct address_space *mapping, pgoff_t offset) +{ + XA_STATE(xas, &mapping->i_pages, offset); + struct page *page; + + rcu_read_lock(); +repeat: + xas_reset(&xas); + page = xas_load(&xas); + if (xas_retry(&xas, page)) + goto repeat; + /* + * A shadow entry of a recently evicted page, or a swap entry from + * shmem/tmpfs. Return it without attempting to raise page count. + */ + if (!page || xa_is_value(page)) + goto out; + + if (!page_cache_get_speculative(page)) + goto repeat; + + /* + * Has the page moved or been split? + * This is part of the lockless pagecache protocol. See + * include/linux/pagemap.h for details. + */ + if (unlikely(page != xas_reload(&xas))) { + put_page(page); + goto repeat; + } + page = find_subpage(page, offset); +out: + rcu_read_unlock(); + + return page; +} +EXPORT_SYMBOL(find_get_entry); + +/** + * find_lock_entry - locate, pin and lock a page cache entry + * @mapping: the address_space to search + * @offset: the page cache index + * + * Looks up the page cache slot at @mapping & @offset. If there is a + * page cache page, it is returned locked and with an increased + * refcount. + * + * If the slot holds a shadow entry of a previously evicted page, or a + * swap entry from shmem/tmpfs, it is returned. + * + * find_lock_entry() may sleep. + * + * Return: the found page or shadow entry, %NULL if nothing is found. + */ +struct page *find_lock_entry(struct address_space *mapping, pgoff_t offset) +{ + struct page *page; + +repeat: + page = find_get_entry(mapping, offset); + if (page && !xa_is_value(page)) { + lock_page(page); + /* Has the page been truncated? */ + if (unlikely(page_mapping(page) != mapping)) { + unlock_page(page); + put_page(page); + goto repeat; + } + VM_BUG_ON_PAGE(page_to_pgoff(page) != offset, page); + } + return page; +} +EXPORT_SYMBOL(find_lock_entry); + +/** + * pagecache_get_page - find and get a page reference + * @mapping: the address_space to search + * @offset: the page index + * @fgp_flags: PCG flags + * @gfp_mask: gfp mask to use for the page cache data page allocation + * + * Looks up the page cache slot at @mapping & @offset. + * + * PCG flags modify how the page is returned. + * + * @fgp_flags can be: + * + * - FGP_ACCESSED: the page will be marked accessed + * - FGP_LOCK: Page is return locked + * - FGP_CREAT: If page is not present then a new page is allocated using + * @gfp_mask and added to the page cache and the VM's LRU + * list. The page is returned locked and with an increased + * refcount. + * - FGP_FOR_MMAP: Similar to FGP_CREAT, only we want to allow the caller to do + * its own locking dance if the page is already in cache, or unlock the page + * before returning if we had to add the page to pagecache. + * + * If FGP_LOCK or FGP_CREAT are specified then the function may sleep even + * if the GFP flags specified for FGP_CREAT are atomic. + * + * If there is a page cache page, it is returned with an increased refcount. + * + * Return: the found page or %NULL otherwise. + */ +struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset, + int fgp_flags, gfp_t gfp_mask) +{ + struct page *page; + +repeat: + page = find_get_entry(mapping, offset); + if (xa_is_value(page)) + page = NULL; + if (!page) + goto no_page; + + if (fgp_flags & FGP_LOCK) { + if (fgp_flags & FGP_NOWAIT) { + if (!trylock_page(page)) { + put_page(page); + return NULL; + } + } else { + lock_page(page); + } + + /* Has the page been truncated? */ + if (unlikely(compound_head(page)->mapping != mapping)) { + unlock_page(page); + put_page(page); + goto repeat; + } + VM_BUG_ON_PAGE(page->index != offset, page); + } + + if (fgp_flags & FGP_ACCESSED) + mark_page_accessed(page); + +no_page: + if (!page && (fgp_flags & FGP_CREAT)) { + int err; + if ((fgp_flags & FGP_WRITE) && mapping_cap_account_dirty(mapping)) + gfp_mask |= __GFP_WRITE; + if (fgp_flags & FGP_NOFS) + gfp_mask &= ~__GFP_FS; + + page = __page_cache_alloc(gfp_mask); + if (!page) + return NULL; + + if (WARN_ON_ONCE(!(fgp_flags & (FGP_LOCK | FGP_FOR_MMAP)))) + fgp_flags |= FGP_LOCK; + + /* Init accessed so avoid atomic mark_page_accessed later */ + if (fgp_flags & FGP_ACCESSED) + __SetPageReferenced(page); + + err = add_to_page_cache_lru(page, mapping, offset, gfp_mask); + if (unlikely(err)) { + put_page(page); + page = NULL; + if (err == -EEXIST) + goto repeat; + } + + /* + * add_to_page_cache_lru locks the page, and for mmap we expect + * an unlocked page. + */ + if (page && (fgp_flags & FGP_FOR_MMAP)) + unlock_page(page); + } + + return page; +} +EXPORT_SYMBOL(pagecache_get_page); + +/** + * find_get_entries - gang pagecache lookup + * @mapping: The address_space to search + * @start: The starting page cache index + * @nr_entries: The maximum number of entries + * @entries: Where the resulting entries are placed + * @indices: The cache indices corresponding to the entries in @entries + * + * find_get_entries() will search for and return a group of up to + * @nr_entries entries in the mapping. The entries are placed at + * @entries. find_get_entries() takes a reference against any actual + * pages it returns. + * + * The search returns a group of mapping-contiguous page cache entries + * with ascending indexes. There may be holes in the indices due to + * not-present pages. + * + * Any shadow entries of evicted pages, or swap entries from + * shmem/tmpfs, are included in the returned array. + * + * Return: the number of pages and shadow entries which were found. + */ +unsigned find_get_entries(struct address_space *mapping, + pgoff_t start, unsigned int nr_entries, + struct page **entries, pgoff_t *indices) +{ + XA_STATE(xas, &mapping->i_pages, start); + struct page *page; + unsigned int ret = 0; + + if (!nr_entries) + return 0; + + rcu_read_lock(); + xas_for_each(&xas, page, ULONG_MAX) { + if (xas_retry(&xas, page)) + continue; + /* + * A shadow entry of a recently evicted page, a swap + * entry from shmem/tmpfs or a DAX entry. Return it + * without attempting to raise page count. + */ + if (xa_is_value(page)) + goto export; + + if (!page_cache_get_speculative(page)) + goto retry; + + /* Has the page moved or been split? */ + if (unlikely(page != xas_reload(&xas))) + goto put_page; + page = find_subpage(page, xas.xa_index); + +export: + indices[ret] = xas.xa_index; + entries[ret] = page; + if (++ret == nr_entries) + break; + continue; +put_page: + put_page(page); +retry: + xas_reset(&xas); + } + rcu_read_unlock(); + return ret; +} + +/** + * find_get_pages_range - gang pagecache lookup + * @mapping: The address_space to search + * @start: The starting page index + * @end: The final page index (inclusive) + * @nr_pages: The maximum number of pages + * @pages: Where the resulting pages are placed + * + * find_get_pages_range() will search for and return a group of up to @nr_pages + * pages in the mapping starting at index @start and up to index @end + * (inclusive). The pages are placed at @pages. find_get_pages_range() takes + * a reference against the returned pages. + * + * The search returns a group of mapping-contiguous pages with ascending + * indexes. There may be holes in the indices due to not-present pages. + * We also update @start to index the next page for the traversal. + * + * Return: the number of pages which were found. If this number is + * smaller than @nr_pages, the end of specified range has been + * reached. + */ +unsigned find_get_pages_range(struct address_space *mapping, pgoff_t *start, + pgoff_t end, unsigned int nr_pages, + struct page **pages) +{ + XA_STATE(xas, &mapping->i_pages, *start); + struct page *page; + unsigned ret = 0; + + if (unlikely(!nr_pages)) + return 0; + + rcu_read_lock(); + xas_for_each(&xas, page, end) { + if (xas_retry(&xas, page)) + continue; + /* Skip over shadow, swap and DAX entries */ + if (xa_is_value(page)) + continue; + + if (!page_cache_get_speculative(page)) + goto retry; + + /* Has the page moved or been split? */ + if (unlikely(page != xas_reload(&xas))) + goto put_page; + + pages[ret] = find_subpage(page, xas.xa_index); + if (++ret == nr_pages) { + *start = xas.xa_index + 1; + goto out; + } + continue; +put_page: + put_page(page); +retry: + xas_reset(&xas); + } + + /* + * We come here when there is no page beyond @end. We take care to not + * overflow the index @start as it confuses some of the callers. This + * breaks the iteration when there is a page at index -1 but that is + * already broken anyway. + */ + if (end == (pgoff_t)-1) + *start = (pgoff_t)-1; + else + *start = end + 1; +out: + rcu_read_unlock(); + + return ret; +} + +/** + * find_get_pages_contig - gang contiguous pagecache lookup + * @mapping: The address_space to search + * @index: The starting page index + * @nr_pages: The maximum number of pages + * @pages: Where the resulting pages are placed + * + * find_get_pages_contig() works exactly like find_get_pages(), except + * that the returned number of pages are guaranteed to be contiguous. + * + * Return: the number of pages which were found. + */ +unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t index, + unsigned int nr_pages, struct page **pages) +{ + XA_STATE(xas, &mapping->i_pages, index); + struct page *page; + unsigned int ret = 0; + + if (unlikely(!nr_pages)) + return 0; + + rcu_read_lock(); + for (page = xas_load(&xas); page; page = xas_next(&xas)) { + if (xas_retry(&xas, page)) + continue; + /* + * If the entry has been swapped out, we can stop looking. + * No current caller is looking for DAX entries. + */ + if (xa_is_value(page)) + break; + + if (!page_cache_get_speculative(page)) + goto retry; + + /* Has the page moved or been split? */ + if (unlikely(page != xas_reload(&xas))) + goto put_page; + + pages[ret] = find_subpage(page, xas.xa_index); + if (++ret == nr_pages) + break; + continue; +put_page: + put_page(page); +retry: + xas_reset(&xas); + } + rcu_read_unlock(); + return ret; +} +EXPORT_SYMBOL(find_get_pages_contig); + +/** + * find_get_pages_range_tag - find and return pages in given range matching @tag + * @mapping: the address_space to search + * @index: the starting page index + * @end: The final page index (inclusive) + * @tag: the tag index + * @nr_pages: the maximum number of pages + * @pages: where the resulting pages are placed + * + * Like find_get_pages, except we only return pages which are tagged with + * @tag. We update @index to index the next page for the traversal. + * + * Return: the number of pages which were found. + */ +unsigned find_get_pages_range_tag(struct address_space *mapping, pgoff_t *index, + pgoff_t end, xa_mark_t tag, unsigned int nr_pages, + struct page **pages) +{ + XA_STATE(xas, &mapping->i_pages, *index); + struct page *page; + unsigned ret = 0; + + if (unlikely(!nr_pages)) + return 0; + + rcu_read_lock(); + xas_for_each_marked(&xas, page, end, tag) { + if (xas_retry(&xas, page)) + continue; + /* + * Shadow entries should never be tagged, but this iteration + * is lockless so there is a window for page reclaim to evict + * a page we saw tagged. Skip over it. + */ + if (xa_is_value(page)) + continue; + + if (!page_cache_get_speculative(page)) + goto retry; + + /* Has the page moved or been split? */ + if (unlikely(page != xas_reload(&xas))) + goto put_page; + + pages[ret] = find_subpage(page, xas.xa_index); + if (++ret == nr_pages) { + *index = xas.xa_index + 1; + goto out; + } + continue; +put_page: + put_page(page); +retry: + xas_reset(&xas); + } + + /* + * We come here when we got to @end. We take care to not overflow the + * index @index as it confuses some of the callers. This breaks the + * iteration when there is a page at index -1 but that is already + * broken anyway. + */ + if (end == (pgoff_t)-1) + *index = (pgoff_t)-1; + else + *index = end + 1; +out: + rcu_read_unlock(); + + return ret; +} +EXPORT_SYMBOL(find_get_pages_range_tag); + +/* + * CD/DVDs are error prone. When a medium error occurs, the driver may fail + * a _large_ part of the i/o request. Imagine the worst scenario: + * + * ---R__________________________________________B__________ + * ^ reading here ^ bad block(assume 4k) + * + * read(R) => miss => readahead(R...B) => media error => frustrating retries + * => failing the whole request => read(R) => read(R+1) => + * readahead(R+1...B+1) => bang => read(R+2) => read(R+3) => + * readahead(R+3...B+2) => bang => read(R+3) => read(R+4) => + * readahead(R+4...B+3) => bang => read(R+4) => read(R+5) => ...... + * + * It is going insane. Fix it by quickly scaling down the readahead size. + */ +static void shrink_readahead_size_eio(struct file *filp, + struct file_ra_state *ra) +{ + ra->ra_pages /= 4; +} + +/** + * generic_file_buffered_read - generic file read routine + * @iocb: the iocb to read + * @iter: data destination + * @written: already copied + * + * This is a generic file read routine, and uses the + * mapping->a_ops->readpage() function for the actual low-level stuff. + * + * This is really ugly. But the goto's actually try to clarify some + * of the logic when it comes to error handling etc. + * + * Return: + * * total number of bytes copied, including those the were already @written + * * negative error code if nothing was copied + */ +static ssize_t generic_file_buffered_read(struct kiocb *iocb, + struct iov_iter *iter, ssize_t written) +{ + struct file *filp = iocb->ki_filp; + struct address_space *mapping = filp->f_mapping; + struct inode *inode = mapping->host; + struct file_ra_state *ra = &filp->f_ra; + loff_t *ppos = &iocb->ki_pos; + pgoff_t index; + pgoff_t last_index; + pgoff_t prev_index; + unsigned long offset; /* offset into pagecache page */ + unsigned int prev_offset; + int error = 0; + + if (unlikely(*ppos >= inode->i_sb->s_maxbytes)) + return 0; + iov_iter_truncate(iter, inode->i_sb->s_maxbytes); + + index = *ppos >> PAGE_SHIFT; + prev_index = ra->prev_pos >> PAGE_SHIFT; + prev_offset = ra->prev_pos & (PAGE_SIZE-1); + last_index = (*ppos + iter->count + PAGE_SIZE-1) >> PAGE_SHIFT; + offset = *ppos & ~PAGE_MASK; + + for (;;) { + struct page *page; + pgoff_t end_index; + loff_t isize; + unsigned long nr, ret; + + cond_resched(); +find_page: + if (fatal_signal_pending(current)) { + error = -EINTR; + goto out; + } + + page = find_get_page(mapping, index); + if (!page) { + if (iocb->ki_flags & IOCB_NOWAIT) + goto would_block; + page_cache_sync_readahead(mapping, + ra, filp, + index, last_index - index); + page = find_get_page(mapping, index); + if (unlikely(page == NULL)) + goto no_cached_page; + } + if (PageReadahead(page)) { + page_cache_async_readahead(mapping, + ra, filp, page, + index, last_index - index); + } + if (!PageUptodate(page)) { + if (iocb->ki_flags & IOCB_NOWAIT) { + put_page(page); + goto would_block; + } + + /* + * See comment in do_read_cache_page on why + * wait_on_page_locked is used to avoid unnecessarily + * serialisations and why it's safe. + */ + error = wait_on_page_locked_killable(page); + if (unlikely(error)) + goto readpage_error; + if (PageUptodate(page)) + goto page_ok; + + if (inode->i_blkbits == PAGE_SHIFT || + !mapping->a_ops->is_partially_uptodate) + goto page_not_up_to_date; + /* pipes can't handle partially uptodate pages */ + if (unlikely(iov_iter_is_pipe(iter))) + goto page_not_up_to_date; + if (!trylock_page(page)) + goto page_not_up_to_date; + /* Did it get truncated before we got the lock? */ + if (!page->mapping) + goto page_not_up_to_date_locked; + if (!mapping->a_ops->is_partially_uptodate(page, + offset, iter->count)) + goto page_not_up_to_date_locked; + unlock_page(page); + } +page_ok: + /* + * i_size must be checked after we know the page is Uptodate. + * + * Checking i_size after the check allows us to calculate + * the correct value for "nr", which means the zero-filled + * part of the page is not copied back to userspace (unless + * another truncate extends the file - this is desired though). + */ + + isize = i_size_read(inode); + end_index = (isize - 1) >> PAGE_SHIFT; + if (unlikely(!isize || index > end_index)) { + put_page(page); + goto out; + } + + /* nr is the maximum number of bytes to copy from this page */ + nr = PAGE_SIZE; + if (index == end_index) { + nr = ((isize - 1) & ~PAGE_MASK) + 1; + if (nr <= offset) { + put_page(page); + goto out; + } + } + nr = nr - offset; + + /* If users can be writing to this page using arbitrary + * virtual addresses, take care about potential aliasing + * before reading the page on the kernel side. + */ + if (mapping_writably_mapped(mapping)) + flush_dcache_page(page); + + /* + * When a sequential read accesses a page several times, + * only mark it as accessed the first time. + */ + if (prev_index != index || offset != prev_offset) + mark_page_accessed(page); + prev_index = index; + + /* + * Ok, we have the page, and it's up-to-date, so + * now we can copy it to user space... + */ + + ret = copy_page_to_iter(page, offset, nr, iter); + offset += ret; + index += offset >> PAGE_SHIFT; + offset &= ~PAGE_MASK; + prev_offset = offset; + + put_page(page); + written += ret; + if (!iov_iter_count(iter)) + goto out; + if (ret < nr) { + error = -EFAULT; + goto out; + } + continue; + +page_not_up_to_date: + /* Get exclusive access to the page ... */ + error = lock_page_killable(page); + if (unlikely(error)) + goto readpage_error; + +page_not_up_to_date_locked: + /* Did it get truncated before we got the lock? */ + if (!page->mapping) { + unlock_page(page); + put_page(page); + continue; + } + + /* Did somebody else fill it already? */ + if (PageUptodate(page)) { + unlock_page(page); + goto page_ok; + } + +readpage: + /* + * A previous I/O error may have been due to temporary + * failures, eg. multipath errors. + * PG_error will be set again if readpage fails. + */ + ClearPageError(page); + /* Start the actual read. The read will unlock the page. */ + error = mapping->a_ops->readpage(filp, page); + + if (unlikely(error)) { + if (error == AOP_TRUNCATED_PAGE) { + put_page(page); + error = 0; + goto find_page; + } + goto readpage_error; + } + + if (!PageUptodate(page)) { + error = lock_page_killable(page); + if (unlikely(error)) + goto readpage_error; + if (!PageUptodate(page)) { + if (page->mapping == NULL) { + /* + * invalidate_mapping_pages got it + */ + unlock_page(page); + put_page(page); + goto find_page; + } + unlock_page(page); + shrink_readahead_size_eio(filp, ra); + error = -EIO; + goto readpage_error; + } + unlock_page(page); + } + + goto page_ok; + +readpage_error: + /* UHHUH! A synchronous read error occurred. Report it */ + put_page(page); + goto out; + +no_cached_page: + /* + * Ok, it wasn't cached, so we need to create a new + * page.. + */ + page = page_cache_alloc(mapping); + if (!page) { + error = -ENOMEM; + goto out; + } + error = add_to_page_cache_lru(page, mapping, index, + mapping_gfp_constraint(mapping, GFP_KERNEL)); + if (error) { + put_page(page); + if (error == -EEXIST) { + error = 0; + goto find_page; + } + goto out; + } + goto readpage; + } + +would_block: + error = -EAGAIN; +out: + ra->prev_pos = prev_index; + ra->prev_pos <<= PAGE_SHIFT; + ra->prev_pos |= prev_offset; + + *ppos = ((loff_t)index << PAGE_SHIFT) + offset; + file_accessed(filp); + return written ? written : error; +} + +/** + * generic_file_read_iter - generic filesystem read routine + * @iocb: kernel I/O control block + * @iter: destination for the data read + * + * This is the "read_iter()" routine for all filesystems + * that can use the page cache directly. + * Return: + * * number of bytes copied, even for partial reads + * * negative error code if nothing was read + */ +ssize_t +generic_file_read_iter(struct kiocb *iocb, struct iov_iter *iter) +{ + size_t count = iov_iter_count(iter); + ssize_t retval = 0; + + if (!count) + goto out; /* skip atime */ + + if (iocb->ki_flags & IOCB_DIRECT) { + struct file *file = iocb->ki_filp; + struct address_space *mapping = file->f_mapping; + struct inode *inode = mapping->host; + loff_t size; + + size = i_size_read(inode); + if (iocb->ki_flags & IOCB_NOWAIT) { + if (filemap_range_has_page(mapping, iocb->ki_pos, + iocb->ki_pos + count - 1)) + return -EAGAIN; + } else { + retval = filemap_write_and_wait_range(mapping, + iocb->ki_pos, + iocb->ki_pos + count - 1); + if (retval < 0) + goto out; + } + + file_accessed(file); + + retval = mapping->a_ops->direct_IO(iocb, iter); + if (retval >= 0) { + iocb->ki_pos += retval; + count -= retval; + } + iov_iter_revert(iter, count - iov_iter_count(iter)); + + /* + * Btrfs can have a short DIO read if we encounter + * compressed extents, so if there was an error, or if + * we've already read everything we wanted to, or if + * there was a short read because we hit EOF, go ahead + * and return. Otherwise fallthrough to buffered io for + * the rest of the read. Buffered reads will not work for + * DAX files, so don't bother trying. + */ + if (retval < 0 || !count || iocb->ki_pos >= size || + IS_DAX(inode)) + goto out; + } + + retval = generic_file_buffered_read(iocb, iter, retval); +out: + return retval; +} +EXPORT_SYMBOL(generic_file_read_iter); + +#ifdef CONFIG_MMU +#define MMAP_LOTSAMISS (100) +/* + * lock_page_maybe_drop_mmap - lock the page, possibly dropping the mmap_sem + * @vmf - the vm_fault for this fault. + * @page - the page to lock. + * @fpin - the pointer to the file we may pin (or is already pinned). + * + * This works similar to lock_page_or_retry in that it can drop the mmap_sem. + * It differs in that it actually returns the page locked if it returns 1 and 0 + * if it couldn't lock the page. If we did have to drop the mmap_sem then fpin + * will point to the pinned file and needs to be fput()'ed at a later point. + */ +static int lock_page_maybe_drop_mmap(struct vm_fault *vmf, struct page *page, + struct file **fpin) +{ + if (trylock_page(page)) + return 1; + + /* + * NOTE! This will make us return with VM_FAULT_RETRY, but with + * the mmap_sem still held. That's how FAULT_FLAG_RETRY_NOWAIT + * is supposed to work. We have way too many special cases.. + */ + if (vmf->flags & FAULT_FLAG_RETRY_NOWAIT) + return 0; + + *fpin = maybe_unlock_mmap_for_io(vmf, *fpin); + if (vmf->flags & FAULT_FLAG_KILLABLE) { + if (__lock_page_killable(page)) { + /* + * We didn't have the right flags to drop the mmap_sem, + * but all fault_handlers only check for fatal signals + * if we return VM_FAULT_RETRY, so we need to drop the + * mmap_sem here and return 0 if we don't have a fpin. + */ + if (*fpin == NULL) + up_read(&vmf->vma->vm_mm->mmap_sem); + return 0; + } + } else + __lock_page(page); + return 1; +} + + +/* + * Synchronous readahead happens when we don't even find a page in the page + * cache at all. We don't want to perform IO under the mmap sem, so if we have + * to drop the mmap sem we return the file that was pinned in order for us to do + * that. If we didn't pin a file then we return NULL. The file that is + * returned needs to be fput()'ed when we're done with it. + */ +static struct file *do_sync_mmap_readahead(struct vm_fault *vmf) +{ + struct file *file = vmf->vma->vm_file; + struct file_ra_state *ra = &file->f_ra; + struct address_space *mapping = file->f_mapping; + struct file *fpin = NULL; + pgoff_t offset = vmf->pgoff; + + /* If we don't want any read-ahead, don't bother */ + if (vmf->vma->vm_flags & VM_RAND_READ) + return fpin; + if (!ra->ra_pages) + return fpin; + + if (vmf->vma->vm_flags & VM_SEQ_READ) { + fpin = maybe_unlock_mmap_for_io(vmf, fpin); + page_cache_sync_readahead(mapping, ra, file, offset, + ra->ra_pages); + return fpin; + } + + /* Avoid banging the cache line if not needed */ + if (ra->mmap_miss < MMAP_LOTSAMISS * 10) + ra->mmap_miss++; + + /* + * Do we miss much more than hit in this file? If so, + * stop bothering with read-ahead. It will only hurt. + */ + if (ra->mmap_miss > MMAP_LOTSAMISS) + return fpin; + + /* + * mmap read-around + */ + fpin = maybe_unlock_mmap_for_io(vmf, fpin); + ra->start = max_t(long, 0, offset - ra->ra_pages / 2); + ra->size = ra->ra_pages; + ra->async_size = ra->ra_pages / 4; + ra_submit(ra, mapping, file); + return fpin; +} + +/* + * Asynchronous readahead happens when we find the page and PG_readahead, + * so we want to possibly extend the readahead further. We return the file that + * was pinned if we have to drop the mmap_sem in order to do IO. + */ +static struct file *do_async_mmap_readahead(struct vm_fault *vmf, + struct page *page) +{ + struct file *file = vmf->vma->vm_file; + struct file_ra_state *ra = &file->f_ra; + struct address_space *mapping = file->f_mapping; + struct file *fpin = NULL; + pgoff_t offset = vmf->pgoff; + + /* If we don't want any read-ahead, don't bother */ + if (vmf->vma->vm_flags & VM_RAND_READ || !ra->ra_pages) + return fpin; + if (ra->mmap_miss > 0) + ra->mmap_miss--; + if (PageReadahead(page)) { + fpin = maybe_unlock_mmap_for_io(vmf, fpin); + page_cache_async_readahead(mapping, ra, file, + page, offset, ra->ra_pages); + } + return fpin; +} + +/** + * filemap_fault - read in file data for page fault handling + * @vmf: struct vm_fault containing details of the fault + * + * filemap_fault() is invoked via the vma operations vector for a + * mapped memory region to read in file data during a page fault. + * + * The goto's are kind of ugly, but this streamlines the normal case of having + * it in the page cache, and handles the special cases reasonably without + * having a lot of duplicated code. + * + * vma->vm_mm->mmap_sem must be held on entry. + * + * If our return value has VM_FAULT_RETRY set, it's because the mmap_sem + * may be dropped before doing I/O or by lock_page_maybe_drop_mmap(). + * + * If our return value does not have VM_FAULT_RETRY set, the mmap_sem + * has not been released. + * + * We never return with VM_FAULT_RETRY and a bit from VM_FAULT_ERROR set. + * + * Return: bitwise-OR of %VM_FAULT_ codes. + */ +vm_fault_t filemap_fault(struct vm_fault *vmf) +{ + int error; + struct file *file = vmf->vma->vm_file; + struct file *fpin = NULL; + struct address_space *mapping = file->f_mapping; + struct file_ra_state *ra = &file->f_ra; + struct inode *inode = mapping->host; + pgoff_t offset = vmf->pgoff; + pgoff_t max_off; + struct page *page; + vm_fault_t ret = 0; + + max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE); + if (unlikely(offset >= max_off)) + return VM_FAULT_SIGBUS; + + /* + * Do we have something in the page cache already? + */ + page = find_get_page(mapping, offset); + if (likely(page) && !(vmf->flags & FAULT_FLAG_TRIED)) { + /* + * We found the page, so try async readahead before + * waiting for the lock. + */ + fpin = do_async_mmap_readahead(vmf, page); + } else if (!page) { + /* No page in the page cache at all */ + count_vm_event(PGMAJFAULT); + count_memcg_event_mm(vmf->vma->vm_mm, PGMAJFAULT); + ret = VM_FAULT_MAJOR; + fpin = do_sync_mmap_readahead(vmf); +retry_find: + page = pagecache_get_page(mapping, offset, + FGP_CREAT|FGP_FOR_MMAP, + vmf->gfp_mask); + if (!page) { + if (fpin) + goto out_retry; + return vmf_error(-ENOMEM); + } + } + + if (!lock_page_maybe_drop_mmap(vmf, page, &fpin)) + goto out_retry; + + /* Did it get truncated? */ + if (unlikely(compound_head(page)->mapping != mapping)) { + unlock_page(page); + put_page(page); + goto retry_find; + } + VM_BUG_ON_PAGE(page_to_pgoff(page) != offset, page); + + /* + * We have a locked page in the page cache, now we need to check + * that it's up-to-date. If not, it is going to be due to an error. + */ + if (unlikely(!PageUptodate(page))) + goto page_not_uptodate; + + /* + * We've made it this far and we had to drop our mmap_sem, now is the + * time to return to the upper layer and have it re-find the vma and + * redo the fault. + */ + if (fpin) { + unlock_page(page); + goto out_retry; + } + + /* + * Found the page and have a reference on it. + * We must recheck i_size under page lock. + */ + max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE); + if (unlikely(offset >= max_off)) { + unlock_page(page); + put_page(page); + return VM_FAULT_SIGBUS; + } + + vmf->page = page; + return ret | VM_FAULT_LOCKED; + +page_not_uptodate: + /* + * Umm, take care of errors if the page isn't up-to-date. + * Try to re-read it _once_. We do this synchronously, + * because there really aren't any performance issues here + * and we need to check for errors. + */ + ClearPageError(page); + fpin = maybe_unlock_mmap_for_io(vmf, fpin); + error = mapping->a_ops->readpage(file, page); + if (!error) { + wait_on_page_locked(page); + if (!PageUptodate(page)) + error = -EIO; + } + if (fpin) + goto out_retry; + put_page(page); + + if (!error || error == AOP_TRUNCATED_PAGE) + goto retry_find; + + /* Things didn't work out. Return zero to tell the mm layer so. */ + shrink_readahead_size_eio(file, ra); + return VM_FAULT_SIGBUS; + +out_retry: + /* + * We dropped the mmap_sem, we need to return to the fault handler to + * re-find the vma and come back and find our hopefully still populated + * page. + */ + if (page) + put_page(page); + if (fpin) + fput(fpin); + return ret | VM_FAULT_RETRY; +} +EXPORT_SYMBOL(filemap_fault); + +void filemap_map_pages(struct vm_fault *vmf, + pgoff_t start_pgoff, pgoff_t end_pgoff) +{ + struct file *file = vmf->vma->vm_file; + struct address_space *mapping = file->f_mapping; + pgoff_t last_pgoff = start_pgoff; + unsigned long max_idx; + XA_STATE(xas, &mapping->i_pages, start_pgoff); + struct page *page; + + rcu_read_lock(); + xas_for_each(&xas, page, end_pgoff) { + if (xas_retry(&xas, page)) + continue; + if (xa_is_value(page)) + goto next; + + /* + * Check for a locked page first, as a speculative + * reference may adversely influence page migration. + */ + if (PageLocked(page)) + goto next; + if (!page_cache_get_speculative(page)) + goto next; + + /* Has the page moved or been split? */ + if (unlikely(page != xas_reload(&xas))) + goto skip; + page = find_subpage(page, xas.xa_index); + + if (!PageUptodate(page) || + PageReadahead(page) || + PageHWPoison(page)) + goto skip; + if (!trylock_page(page)) + goto skip; + + if (page->mapping != mapping || !PageUptodate(page)) + goto unlock; + + max_idx = DIV_ROUND_UP(i_size_read(mapping->host), PAGE_SIZE); + if (page->index >= max_idx) + goto unlock; + + if (file->f_ra.mmap_miss > 0) + file->f_ra.mmap_miss--; + + vmf->address += (xas.xa_index - last_pgoff) << PAGE_SHIFT; + if (vmf->pte) + vmf->pte += xas.xa_index - last_pgoff; + last_pgoff = xas.xa_index; + if (alloc_set_pte(vmf, NULL, page)) + goto unlock; + unlock_page(page); + goto next; +unlock: + unlock_page(page); +skip: + put_page(page); +next: + /* Huge page is mapped? No need to proceed. */ + if (pmd_trans_huge(*vmf->pmd)) + break; + } + rcu_read_unlock(); +} +EXPORT_SYMBOL(filemap_map_pages); + +vm_fault_t filemap_page_mkwrite(struct vm_fault *vmf) +{ + struct page *page = vmf->page; + struct inode *inode = file_inode(vmf->vma->vm_file); + vm_fault_t ret = VM_FAULT_LOCKED; + + sb_start_pagefault(inode->i_sb); + file_update_time(vmf->vma->vm_file); + lock_page(page); + if (page->mapping != inode->i_mapping) { + unlock_page(page); + ret = VM_FAULT_NOPAGE; + goto out; + } + /* + * We mark the page dirty already here so that when freeze is in + * progress, we are guaranteed that writeback during freezing will + * see the dirty page and writeprotect it again. + */ + set_page_dirty(page); + wait_for_stable_page(page); +out: + sb_end_pagefault(inode->i_sb); + return ret; +} + +const struct vm_operations_struct generic_file_vm_ops = { + .fault = filemap_fault, + .map_pages = filemap_map_pages, + .page_mkwrite = filemap_page_mkwrite, +}; + +/* This is used for a general mmap of a disk file */ + +int generic_file_mmap(struct file * file, struct vm_area_struct * vma) +{ + struct address_space *mapping = file->f_mapping; + + if (!mapping->a_ops->readpage) + return -ENOEXEC; + file_accessed(file); + vma->vm_ops = &generic_file_vm_ops; + return 0; +} + +/* + * This is for filesystems which do not implement ->writepage. + */ +int generic_file_readonly_mmap(struct file *file, struct vm_area_struct *vma) +{ + if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE)) + return -EINVAL; + return generic_file_mmap(file, vma); +} +#else +vm_fault_t filemap_page_mkwrite(struct vm_fault *vmf) +{ + return VM_FAULT_SIGBUS; +} +int generic_file_mmap(struct file * file, struct vm_area_struct * vma) +{ + return -ENOSYS; +} +int generic_file_readonly_mmap(struct file * file, struct vm_area_struct * vma) +{ + return -ENOSYS; +} +#endif /* CONFIG_MMU */ + +EXPORT_SYMBOL(filemap_page_mkwrite); +EXPORT_SYMBOL(generic_file_mmap); +EXPORT_SYMBOL(generic_file_readonly_mmap); + +static struct page *wait_on_page_read(struct page *page) +{ + if (!IS_ERR(page)) { + wait_on_page_locked(page); + if (!PageUptodate(page)) { + put_page(page); + page = ERR_PTR(-EIO); + } + } + return page; +} + +static struct page *do_read_cache_page(struct address_space *mapping, + pgoff_t index, + int (*filler)(void *, struct page *), + void *data, + gfp_t gfp) +{ + struct page *page; + int err; +repeat: + page = find_get_page(mapping, index); + if (!page) { + page = __page_cache_alloc(gfp); + if (!page) + return ERR_PTR(-ENOMEM); + err = add_to_page_cache_lru(page, mapping, index, gfp); + if (unlikely(err)) { + put_page(page); + if (err == -EEXIST) + goto repeat; + /* Presumably ENOMEM for xarray node */ + return ERR_PTR(err); + } + +filler: + if (filler) + err = filler(data, page); + else + err = mapping->a_ops->readpage(data, page); + + if (err < 0) { + put_page(page); + return ERR_PTR(err); + } + + page = wait_on_page_read(page); + if (IS_ERR(page)) + return page; + goto out; + } + if (PageUptodate(page)) + goto out; + + /* + * Page is not up to date and may be locked due one of the following + * case a: Page is being filled and the page lock is held + * case b: Read/write error clearing the page uptodate status + * case c: Truncation in progress (page locked) + * case d: Reclaim in progress + * + * Case a, the page will be up to date when the page is unlocked. + * There is no need to serialise on the page lock here as the page + * is pinned so the lock gives no additional protection. Even if the + * the page is truncated, the data is still valid if PageUptodate as + * it's a race vs truncate race. + * Case b, the page will not be up to date + * Case c, the page may be truncated but in itself, the data may still + * be valid after IO completes as it's a read vs truncate race. The + * operation must restart if the page is not uptodate on unlock but + * otherwise serialising on page lock to stabilise the mapping gives + * no additional guarantees to the caller as the page lock is + * released before return. + * Case d, similar to truncation. If reclaim holds the page lock, it + * will be a race with remove_mapping that determines if the mapping + * is valid on unlock but otherwise the data is valid and there is + * no need to serialise with page lock. + * + * As the page lock gives no additional guarantee, we optimistically + * wait on the page to be unlocked and check if it's up to date and + * use the page if it is. Otherwise, the page lock is required to + * distinguish between the different cases. The motivation is that we + * avoid spurious serialisations and wakeups when multiple processes + * wait on the same page for IO to complete. + */ + wait_on_page_locked(page); + if (PageUptodate(page)) + goto out; + + /* Distinguish between all the cases under the safety of the lock */ + lock_page(page); + + /* Case c or d, restart the operation */ + if (!page->mapping) { + unlock_page(page); + put_page(page); + goto repeat; + } + + /* Someone else locked and filled the page in a very small window */ + if (PageUptodate(page)) { + unlock_page(page); + goto out; + } + + /* + * A previous I/O error may have been due to temporary + * failures. + * Clear page error before actual read, PG_error will be + * set again if read page fails. + */ + ClearPageError(page); + goto filler; + +out: + mark_page_accessed(page); + return page; +} + +/** + * read_cache_page - read into page cache, fill it if needed + * @mapping: the page's address_space + * @index: the page index + * @filler: function to perform the read + * @data: first arg to filler(data, page) function, often left as NULL + * + * Read into the page cache. If a page already exists, and PageUptodate() is + * not set, try to fill the page and wait for it to become unlocked. + * + * If the page does not get brought uptodate, return -EIO. + * + * Return: up to date page on success, ERR_PTR() on failure. + */ +struct page *read_cache_page(struct address_space *mapping, + pgoff_t index, + int (*filler)(void *, struct page *), + void *data) +{ + return do_read_cache_page(mapping, index, filler, data, + mapping_gfp_mask(mapping)); +} +EXPORT_SYMBOL(read_cache_page); + +/** + * read_cache_page_gfp - read into page cache, using specified page allocation flags. + * @mapping: the page's address_space + * @index: the page index + * @gfp: the page allocator flags to use if allocating + * + * This is the same as "read_mapping_page(mapping, index, NULL)", but with + * any new page allocations done using the specified allocation flags. + * + * If the page does not get brought uptodate, return -EIO. + * + * Return: up to date page on success, ERR_PTR() on failure. + */ +struct page *read_cache_page_gfp(struct address_space *mapping, + pgoff_t index, + gfp_t gfp) +{ + return do_read_cache_page(mapping, index, NULL, NULL, gfp); +} +EXPORT_SYMBOL(read_cache_page_gfp); + +/* + * Don't operate on ranges the page cache doesn't support, and don't exceed the + * LFS limits. If pos is under the limit it becomes a short access. If it + * exceeds the limit we return -EFBIG. + */ +static int generic_write_check_limits(struct file *file, loff_t pos, + loff_t *count) +{ + struct inode *inode = file->f_mapping->host; + loff_t max_size = inode->i_sb->s_maxbytes; + loff_t limit = rlimit(RLIMIT_FSIZE); + + if (limit != RLIM_INFINITY) { + if (pos >= limit) { + send_sig(SIGXFSZ, current, 0); + return -EFBIG; + } + *count = min(*count, limit - pos); + } + + if (!(file->f_flags & O_LARGEFILE)) + max_size = MAX_NON_LFS; + + if (unlikely(pos >= max_size)) + return -EFBIG; + + *count = min(*count, max_size - pos); + + return 0; +} + +/* + * Performs necessary checks before doing a write + * + * Can adjust writing position or amount of bytes to write. + * Returns appropriate error code that caller should return or + * zero in case that write should be allowed. + */ +inline ssize_t generic_write_checks(struct kiocb *iocb, struct iov_iter *from) +{ + struct file *file = iocb->ki_filp; + struct inode *inode = file->f_mapping->host; + loff_t count; + int ret; + + if (IS_SWAPFILE(inode)) + return -ETXTBSY; + + if (!iov_iter_count(from)) + return 0; + + /* FIXME: this is for backwards compatibility with 2.4 */ + if (iocb->ki_flags & IOCB_APPEND) + iocb->ki_pos = i_size_read(inode); + + if ((iocb->ki_flags & IOCB_NOWAIT) && !(iocb->ki_flags & IOCB_DIRECT)) + return -EINVAL; + + count = iov_iter_count(from); + ret = generic_write_check_limits(file, iocb->ki_pos, &count); + if (ret) + return ret; + + iov_iter_truncate(from, count); + return iov_iter_count(from); +} +EXPORT_SYMBOL(generic_write_checks); + +/* + * Performs necessary checks before doing a clone. + * + * Can adjust amount of bytes to clone via @req_count argument. + * Returns appropriate error code that caller should return or + * zero in case the clone should be allowed. + */ +int generic_remap_checks(struct file *file_in, loff_t pos_in, + struct file *file_out, loff_t pos_out, + loff_t *req_count, unsigned int remap_flags) +{ + struct inode *inode_in = file_in->f_mapping->host; + struct inode *inode_out = file_out->f_mapping->host; + uint64_t count = *req_count; + uint64_t bcount; + loff_t size_in, size_out; + loff_t bs = inode_out->i_sb->s_blocksize; + int ret; + + /* The start of both ranges must be aligned to an fs block. */ + if (!IS_ALIGNED(pos_in, bs) || !IS_ALIGNED(pos_out, bs)) + return -EINVAL; + + /* Ensure offsets don't wrap. */ + if (pos_in + count < pos_in || pos_out + count < pos_out) + return -EINVAL; + + size_in = i_size_read(inode_in); + size_out = i_size_read(inode_out); + + /* Dedupe requires both ranges to be within EOF. */ + if ((remap_flags & REMAP_FILE_DEDUP) && + (pos_in >= size_in || pos_in + count > size_in || + pos_out >= size_out || pos_out + count > size_out)) + return -EINVAL; + + /* Ensure the infile range is within the infile. */ + if (pos_in >= size_in) + return -EINVAL; + count = min(count, size_in - (uint64_t)pos_in); + + ret = generic_write_check_limits(file_out, pos_out, &count); + if (ret) + return ret; + + /* + * If the user wanted us to link to the infile's EOF, round up to the + * next block boundary for this check. + * + * Otherwise, make sure the count is also block-aligned, having + * already confirmed the starting offsets' block alignment. + */ + if (pos_in + count == size_in) { + bcount = ALIGN(size_in, bs) - pos_in; + } else { + if (!IS_ALIGNED(count, bs)) + count = ALIGN_DOWN(count, bs); + bcount = count; + } + + /* Don't allow overlapped cloning within the same file. */ + if (inode_in == inode_out && + pos_out + bcount > pos_in && + pos_out < pos_in + bcount) + return -EINVAL; + + /* + * We shortened the request but the caller can't deal with that, so + * bounce the request back to userspace. + */ + if (*req_count != count && !(remap_flags & REMAP_FILE_CAN_SHORTEN)) + return -EINVAL; + + *req_count = count; + return 0; +} + + +/* + * Performs common checks before doing a file copy/clone + * from @file_in to @file_out. + */ +int generic_file_rw_checks(struct file *file_in, struct file *file_out) +{ + struct inode *inode_in = file_inode(file_in); + struct inode *inode_out = file_inode(file_out); + + /* Don't copy dirs, pipes, sockets... */ + if (S_ISDIR(inode_in->i_mode) || S_ISDIR(inode_out->i_mode)) + return -EISDIR; + if (!S_ISREG(inode_in->i_mode) || !S_ISREG(inode_out->i_mode)) + return -EINVAL; + + if (!(file_in->f_mode & FMODE_READ) || + !(file_out->f_mode & FMODE_WRITE) || + (file_out->f_flags & O_APPEND)) + return -EBADF; + + return 0; +} + +/* + * Performs necessary checks before doing a file copy + * + * Can adjust amount of bytes to copy via @req_count argument. + * Returns appropriate error code that caller should return or + * zero in case the copy should be allowed. + */ +int generic_copy_file_checks(struct file *file_in, loff_t pos_in, + struct file *file_out, loff_t pos_out, + size_t *req_count, unsigned int flags) +{ + struct inode *inode_in = file_inode(file_in); + struct inode *inode_out = file_inode(file_out); + uint64_t count = *req_count; + loff_t size_in; + int ret; + + ret = generic_file_rw_checks(file_in, file_out); + if (ret) + return ret; + + /* Don't touch certain kinds of inodes */ + if (IS_IMMUTABLE(inode_out)) + return -EPERM; + + if (IS_SWAPFILE(inode_in) || IS_SWAPFILE(inode_out)) + return -ETXTBSY; + + /* Ensure offsets don't wrap. */ + if (pos_in + count < pos_in || pos_out + count < pos_out) + return -EOVERFLOW; + + /* Shorten the copy to EOF */ + size_in = i_size_read(inode_in); + if (pos_in >= size_in) + count = 0; + else + count = min(count, size_in - (uint64_t)pos_in); + + ret = generic_write_check_limits(file_out, pos_out, &count); + if (ret) + return ret; + + /* Don't allow overlapped copying within the same file. */ + if (inode_in == inode_out && + pos_out + count > pos_in && + pos_out < pos_in + count) + return -EINVAL; + + *req_count = count; + return 0; +} + +int pagecache_write_begin(struct file *file, struct address_space *mapping, + loff_t pos, unsigned len, unsigned flags, + struct page **pagep, void **fsdata) +{ + const struct address_space_operations *aops = mapping->a_ops; + + return aops->write_begin(file, mapping, pos, len, flags, + pagep, fsdata); +} +EXPORT_SYMBOL(pagecache_write_begin); + +int pagecache_write_end(struct file *file, struct address_space *mapping, + loff_t pos, unsigned len, unsigned copied, + struct page *page, void *fsdata) +{ + const struct address_space_operations *aops = mapping->a_ops; + + return aops->write_end(file, mapping, pos, len, copied, page, fsdata); +} +EXPORT_SYMBOL(pagecache_write_end); + +ssize_t +generic_file_direct_write(struct kiocb *iocb, struct iov_iter *from) +{ + struct file *file = iocb->ki_filp; + struct address_space *mapping = file->f_mapping; + struct inode *inode = mapping->host; + loff_t pos = iocb->ki_pos; + ssize_t written; + size_t write_len; + pgoff_t end; + + write_len = iov_iter_count(from); + end = (pos + write_len - 1) >> PAGE_SHIFT; + + if (iocb->ki_flags & IOCB_NOWAIT) { + /* If there are pages to writeback, return */ + if (filemap_range_has_page(inode->i_mapping, pos, + pos + write_len - 1)) + return -EAGAIN; + } else { + written = filemap_write_and_wait_range(mapping, pos, + pos + write_len - 1); + if (written) + goto out; + } + + /* + * After a write we want buffered reads to be sure to go to disk to get + * the new data. We invalidate clean cached page from the region we're + * about to write. We do this *before* the write so that we can return + * without clobbering -EIOCBQUEUED from ->direct_IO(). + */ + written = invalidate_inode_pages2_range(mapping, + pos >> PAGE_SHIFT, end); + /* + * If a page can not be invalidated, return 0 to fall back + * to buffered write. + */ + if (written) { + if (written == -EBUSY) + return 0; + goto out; + } + + written = mapping->a_ops->direct_IO(iocb, from); + + /* + * Finally, try again to invalidate clean pages which might have been + * cached by non-direct readahead, or faulted in by get_user_pages() + * if the source of the write was an mmap'ed region of the file + * we're writing. Either one is a pretty crazy thing to do, + * so we don't support it 100%. If this invalidation + * fails, tough, the write still worked... + * + * Most of the time we do not need this since dio_complete() will do + * the invalidation for us. However there are some file systems that + * do not end up with dio_complete() being called, so let's not break + * them by removing it completely + */ + if (mapping->nrpages) + invalidate_inode_pages2_range(mapping, + pos >> PAGE_SHIFT, end); + + if (written > 0) { + pos += written; + write_len -= written; + if (pos > i_size_read(inode) && !S_ISBLK(inode->i_mode)) { + i_size_write(inode, pos); + mark_inode_dirty(inode); + } + iocb->ki_pos = pos; + } + iov_iter_revert(from, write_len - iov_iter_count(from)); +out: + return written; +} +EXPORT_SYMBOL(generic_file_direct_write); + +/* + * Find or create a page at the given pagecache position. Return the locked + * page. This function is specifically for buffered writes. + */ +struct page *grab_cache_page_write_begin(struct address_space *mapping, + pgoff_t index, unsigned flags) +{ + struct page *page; + int fgp_flags = FGP_LOCK|FGP_WRITE|FGP_CREAT; + + if (flags & AOP_FLAG_NOFS) + fgp_flags |= FGP_NOFS; + + page = pagecache_get_page(mapping, index, fgp_flags, + mapping_gfp_mask(mapping)); + if (page) + wait_for_stable_page(page); + + return page; +} +EXPORT_SYMBOL(grab_cache_page_write_begin); +#endif ssize_t pxt4_generic_perform_write(struct file *file, struct iov_iter *i, loff_t pos) @@ -198,6 +3350,7 @@ ssize_t pxt4_generic_perform_write(struct file *file, return written ? written : status; } +//EXPORT_SYMBOL(generic_perform_write); /** * __generic_file_write_iter - write data to a file @@ -220,7 +3373,7 @@ ssize_t pxt4_generic_perform_write(struct file *file, * * number of bytes written, even for truncated writes * * negative error code if no data has been written at all */ -ssize_t pxt4___generic_file_write_iter(struct kiocb *iocb, struct iov_iter *from) +ssize_t __pxt4_generic_file_write_iter(struct kiocb *iocb, struct iov_iter *from) { struct file *file = iocb->ki_filp; struct address_space * mapping = file->f_mapping; @@ -293,3 +3446,69 @@ ssize_t pxt4___generic_file_write_iter(struct kiocb *iocb, struct iov_iter *from current->backing_dev_info = NULL; return written ? written : err; } +//EXPORT_SYMBOL(__generic_file_write_iter); + +/** + * generic_file_write_iter - write data to a file + * @iocb: IO state structure + * @from: iov_iter with data to write + * + * This is a wrapper around __generic_file_write_iter() to be used by most + * filesystems. It takes care of syncing the file in case of O_SYNC file + * and acquires i_mutex as needed. + * Return: + * * negative error code if no data has been written at all of + * vfs_fsync_range() failed for a synchronous write + * * number of bytes written, even for truncated writes + */ +#if 0 +ssize_t generic_file_write_iter(struct kiocb *iocb, struct iov_iter *from) +{ + struct file *file = iocb->ki_filp; + struct inode *inode = file->f_mapping->host; + ssize_t ret; + + inode_lock(inode); + ret = generic_write_checks(iocb, from); + if (ret > 0) + ret = __generic_file_write_iter(iocb, from); + inode_unlock(inode); + + if (ret > 0) + ret = generic_write_sync(iocb, ret); + return ret; +} +EXPORT_SYMBOL(generic_file_write_iter); + +/** + * try_to_release_page() - release old fs-specific metadata on a page + * + * @page: the page which the kernel is trying to free + * @gfp_mask: memory allocation flags (and I/O mode) + * + * The address_space is to try to release any data against the page + * (presumably at page->private). + * + * This may also be called if PG_fscache is set on a page, indicating that the + * page is known to the local caching routines. + * + * The @gfp_mask argument specifies whether I/O may be performed to release + * this page (__GFP_IO), and whether the call may block (__GFP_RECLAIM & __GFP_FS). + * + * Return: %1 if the release was successful, otherwise return zero. + */ +int try_to_release_page(struct page *page, gfp_t gfp_mask) +{ + struct address_space * const mapping = page->mapping; + + BUG_ON(!PageLocked(page)); + if (PageWriteback(page)) + return 0; + + if (mapping && mapping->a_ops->releasepage) + return mapping->a_ops->releasepage(page, gfp_mask); + return try_to_free_buffers(page); +} + +EXPORT_SYMBOL(try_to_release_page); +#endif From 5f090a040d6b25b607f82b7415c0457b5a8a90f7 Mon Sep 17 00:00:00 2001 From: jimin-kiim <0305jimin@naver.com> Date: Thu, 7 Dec 2023 18:07:28 +0900 Subject: [PATCH 18/37] Feat: editing file.c --- pxt4/file.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/pxt4/file.c b/pxt4/file.c index 0a3dc1364..b297c4f15 100644 --- a/pxt4/file.c +++ b/pxt4/file.c @@ -268,7 +268,7 @@ pxt4_file_write_iter_internal(struct kiocb *iocb, struct iov_iter *from) } } - ret = __generic_file_write_iter(iocb, from); + ret = __pxt4_generic_file_write_iter(iocb, from); /* * Unaligned direct AIO must be the only IO in flight. Otherwise * overlapping aligned IO after unaligned might result in data @@ -307,13 +307,15 @@ static void print_cpu_dm(unsigned long id, const char * name, unsigned long long DEFINE_DS_MONITORING(cpu_dm, get_cpu_id, get_cpu_name, print_cpu_dm); unsigned long long file_write_iter_time, file_write_iter_count; +ssize_t __pxt4_generic_file_write_iter(struct kiocb *iocb, struct iov_iter *from); static ssize_t pxt4_file_write_iter(struct kiocb *iocb, struct iov_iter *from) { ssize_t ret; struct timespec myclock[2]; getrawmonotonic(&myclock[0]); - ret = pxt4_file_write_iter_internal(iocb, from); + //ret = pxt4_file_write_iter_internal(iocb, from); + ret = __pxt4_generic_file_write_iter(iocb, from); getrawmonotonic(&myclock[1]); calclock(myclock, &file_write_iter_time, &file_write_iter_count); //printk("cpu[%d] called pxt4_file_write_iter()",current->cpu); From d268cd27c946c0fd38df768db6144bb1715e85b9 Mon Sep 17 00:00:00 2001 From: jimin-kiim <0305jimin@naver.com> Date: Thu, 7 Dec 2023 18:08:13 +0900 Subject: [PATCH 19/37] Feat: editing filemap.c --- pxt4/mm/filemap.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pxt4/mm/filemap.c b/pxt4/mm/filemap.c index 5e1079eba..ce635b49c 100644 --- a/pxt4/mm/filemap.c +++ b/pxt4/mm/filemap.c @@ -3438,7 +3438,7 @@ ssize_t __pxt4_generic_file_write_iter(struct kiocb *iocb, struct iov_iter *from */ } } else { - written = generic_perform_write(file, from, iocb->ki_pos); + written = pxt4_generic_perform_write(file, from, iocb->ki_pos); if (likely(written > 0)) iocb->ki_pos += written; } From 0946f8b99a23673d9bb1e589907895760c241ab2 Mon Sep 17 00:00:00 2001 From: jimin-kiim <0305jimin@naver.com> Date: Thu, 4 Jan 2024 23:57:48 +0900 Subject: [PATCH 20/37] WIP: before fixing git error --- assignment10/test_and_set_module/Makefile | 6 +----- pxt4/Makefile | 2 +- pxt4/file.c | 8 +++++--- pxt4/mm/.filemap.c.swo | Bin 0 -> 24576 bytes pxt4/mm/filemap.c | 11 +++++++++++ pxt4/super.c | 7 +++++-- 6 files changed, 23 insertions(+), 11 deletions(-) create mode 100644 pxt4/mm/.filemap.c.swo diff --git a/assignment10/test_and_set_module/Makefile b/assignment10/test_and_set_module/Makefile index c83215132..b43664b04 100644 --- a/assignment10/test_and_set_module/Makefile +++ b/assignment10/test_and_set_module/Makefile @@ -1,8 +1,4 @@ -obj-m := compare_and_swap_module.o test_and_set_module.o fetch_and_add_module.o \ - spinlock_module_final.o mutex_module_final.o rw_semaphore_module_final.o -spinlock_module_final-y := spinlock_module.o calclock.o -mutex_module_final-y := mutex_module.o calclock.o -rw_semaphore_module_final-y := rw_semaphore_module.o calclock.o +obj-m := test_and_set_module.o KERNEL_DIR := /lib/modules/$(shell uname -r)/build PWD := $(shell pwd) diff --git a/pxt4/Makefile b/pxt4/Makefile index d83d09247..6ecf354bb 100644 --- a/pxt4/Makefile +++ b/pxt4/Makefile @@ -10,7 +10,7 @@ pxt4-y := balloc.o bitmap.o block_validity.o dir.o pxt4_jbd3.o extents.o \ indirect.o inline.o inode.o ioctl.o mballoc.o migrate.o \ mmp.o move_extent.o namei.o page-io.o readpage.o resize.o \ super.o symlink.o sysfs.o xattr.o xattr_trusted.o xattr_user.o \ - calclock.o ds_monitoring.o + calclock.o ds_monitoring.o mm/filemap.o pxt4-m += acl.o pxt4-m += xattr_security.o pxt4-m += verity.o diff --git a/pxt4/file.c b/pxt4/file.c index b297c4f15..3834342c6 100644 --- a/pxt4/file.c +++ b/pxt4/file.c @@ -217,6 +217,8 @@ pxt4_dax_write_iter(struct kiocb *iocb, struct iov_iter *from) } #endif +ssize_t __pxt4_generic_file_write_iter(struct kiocb *iocb, struct iov_iter *from); + static ssize_t pxt4_file_write_iter_internal(struct kiocb *iocb, struct iov_iter *from) { @@ -307,16 +309,16 @@ static void print_cpu_dm(unsigned long id, const char * name, unsigned long long DEFINE_DS_MONITORING(cpu_dm, get_cpu_id, get_cpu_name, print_cpu_dm); unsigned long long file_write_iter_time, file_write_iter_count; -ssize_t __pxt4_generic_file_write_iter(struct kiocb *iocb, struct iov_iter *from); static ssize_t pxt4_file_write_iter(struct kiocb *iocb, struct iov_iter *from) { ssize_t ret; struct timespec myclock[2]; getrawmonotonic(&myclock[0]); - //ret = pxt4_file_write_iter_internal(iocb, from); - ret = __pxt4_generic_file_write_iter(iocb, from); + ret = pxt4_file_write_iter_internal(iocb, from); + //ret = pxt4_generic_file_write_iter_internal(iocb, from); getrawmonotonic(&myclock[1]); + calclock(myclock, &file_write_iter_time, &file_write_iter_count); //printk("cpu[%d] called pxt4_file_write_iter()",current->cpu); find_ds_monitoring(&cpu_dm, current); diff --git a/pxt4/mm/.filemap.c.swo b/pxt4/mm/.filemap.c.swo new file mode 100644 index 0000000000000000000000000000000000000000..fd40986ca309270a9307a1eaf047a03df3187d29 GIT binary patch literal 24576 zcmeI4eT*FES-^)RwAmyLZK0(?5WR6qe0RRxv*QHCA9wb}K0DXiKI5|!7ZpacGqbmo zo1NLr%-;Fzd{kA70wF0uO_c%?9}O)LDgRWEmXKCapeg+bLITm4szMC{1+@k7(GsXC ze$V^P?C$OD`R-gmfM%ti_jcy}eBS4MXP)PMUe|ZtHG5p$UfX5yd81|h#{2K}@3nr! z+I_8M-4nEeNUm@2*=)K8bi#=+h&tyc+UK%cCt9tEMiBZfr(JV5sHD7IN}!a$}36v5j zB~VJBlt3wgQUav}N(qz_C?!xzpp?LWg9IE;Z~I$s6bAtN|Aqbkmp@=xe-599XW8J>kF;bG`N24~?X;5N7gzIlyh{XKjfo`w&>WVGOpzcdxdr=iyQ4Kms1rUf$JOMV`1h3*?c^XpK58q)t zUxUAcC*jxO*C2oz$audM-U9ufO3+XvK@@ww9Ykt!QklQwPNr_Y*{Y1G!*R4dQ%)Gh z%PL;-lhA3meNVYz90k#WS`M-%8MCIZQm5rB3Gb+>u?Z_pgY&+fDT({A7yQUi0@rT) zNh3~L_Hq(r{z#f79XHci)R<%(w=Bu3#*|Xksmw_i!ywJY~=Qja{2{^X=ViUXhKYO;s+T&1W1Kn=NEq z@SF@eB%NYd+@>!w>5XiV#-Sc}(n!HQA+^_JZlF@}vQCENog^2o2C6LOINm*-G&7M zdTJHIP3yTBDkF__qLEhhvQc;3#hPoEnq~aJgoC{HE?N@sLoJ-dO);4`rdO5CQ z9x?rk@W4W0+l|EcWwvj$g0I^fJ&Ib|#Q+~e-b^izXIHea*P0mXKA+}~pR8t0(IcLP z(m+{+kDWTBcJ0}-XIxE39{1FA;>t564Ag$+7FQ0Yoz$;+{!}g%$ywD^j;mXC)pp)i zGx=wug;*Vr!^lb0eu6R=;)99rHJz;H#x2RrLo*7}CY5jRL_uSPB-`~vSW46r**vwB zE?i$&)ntU3H(|cX)b~&lJ%p)>c+M}!$zo0Ix~)c>=$<{dDRl<}9@p|5Su^e|H075X z%pA1ZM0cXRl(>6mZPzZze``(6X4|D?bh;Cwic&r;Rx5rsuBCcY5HaMQkDSsLQKC*% zYfkZ^)4gnBNKK#YCSvElnm`OJeEAxw$3K5oo3{x4CUIo;?D3hs$~-LPJw`3k#nAVV zQ{2!p%S9DBD}JIHN!-#`-IyiuuHysk%M!=+C;TP)$dZ)DveMR@Jf9)))uB@})2C1`+@9W350MR6o6V{sB*7PoMT8&itJ=pDb8VtMa`LviNCmVKC-U^0Y zPn#?P_V9Y#(07>EEH}e)%6ml|Wl2oq*S4|QS@5&4KJ1p;M(Kw(GWFdKZ-Ohsa@E5{ z&tDpLM^t>tcZc1QX9J4GB04l@@A)yT2Ts}?R;HgMaZ(MUdw9bPVy&2i1JnuPVLA1? z@Q|AF^3~+o=cPI^)qX54T|%Q#8h# zNP`8f*lTrTxVCxDxnYH|z_poH$*_C=xptiBw&!Mom>nmM%xe=OT}d;)rO-7`WwG*N zW^s@zFOau)rfaw|u9pk{oFl80eoVFEG*jCmOcE#D)>lfZM(S8VD{+T%#QvFRI!pe9 zluP-#;Jvc`ot7G8m{pSnKIDa0T9m&um>cpqRlJ2bQ5LwVXgC_UHcy+x}3<8}0}VI|`$24a z3%)2eIy?{0zjgiXPqQcVgY4-qz-id+N&^d_E{Ay zF?ma_PUSX~jSYpztP!h8b>hsiWBV-j1om1L%Boa?*u|B-h$}Bn)sD%cFpAOp31P74 zhbtpRWm9VB=qR^}-;>1~^1yz8G@N&I4DfN)Ujsd;+^_tQH}4A+qgx;b01e?CT;8~& zPOFaXjy+#r$#@H2ZU$~sE%V;b`+tJrrd@}0JkN7C*Y$h7_ySZDS{3f9LFx)6!i1Q6imHx5nj85b-`A;H}21 zmpjhHB1}i}Upi4CD}*cdS|``?GVb&bpk`h zUUq`acuh74Svi`Q!mv0>eD%#Fj@TkFjUuy_doIY$ zOl_GJ>@)T)A*dVWtQs~MrDAj$9d+M*>c${uMPfy>-MABFBO=#PxiLF$AD%vIb}{ur z(;Y<76ysmjPTCX^vdX#m0!H)U5~pO^VONLljT9psWDJxyt|*mtg7rjF84F)VFk3Mq zYsOgotDFi9#t{Ze^2uVxO-C8;BE?I07#j4-WM6$FCGWgwmd>bs88|O;Cx*B=b#@#K zqUuaD#!H6P6q;tS-jz+8_(xgac58akvUDV*CFdJP99xCqVZ6%TFnRQUav} zN(qz_C?!xzpp?M>b_r;6x%{V;!2gH@(yWq)o;n^;IYeruZK68VSnhzqIu#?VI@Kr~ zH2bUzVxV4j#-hIBdyDlJ*^2MYWS6vCNiP9hc{ht}*xSXQo2ii!Hj})PZYI@g>9cx?D-y~Yi|U}<-Nb|G~AHM1w~n#N_RU1EJ{Z_E=D+pKOJAtnf~SR-^6QgyRB zHFJFWq`UI{#oU1b@#Gjzu37Fh<+zzOTnB7$0@$nDlX0>eVCt8VcE07XD zmNvx+d&c+GpPr}!`R!tp~+{(vs{VF!rf&bFH-*ZpU$nT{=@y5CH;)iML2Kpt^9@@CxMfs~3)l2FT zGqRUU@BQhC+q<@vlH}CRCg-+3@zblNSF=6{gcA4eZMGjlFUzpvE>qA*S}#u=Rk}p^ zn{MMe5*k|c(2Kcl^zQZk>h^j_Yld9XLr^SaLrO7cvdnst-@beqZgow${0cYd$)%n{ z8)YpTbs1vVsA&Ck97rTT2QE#jH5VkDbfeK-)#;%nbeEh>6-A;=$v`LDTvC1Hue*3` zJ z86)0c!O1vlG_c1W5kc-`4!gdos{695o=46Qj)3hZ!QbQ^3_CGt2&0ft0dqFRoRrFU zecDIA*a=(7$Y&=nb|Cl{78#FN{j1+%qLF0j;%~! z2L2cw!oV3JMI+fjmx3^omxyhbfQXd;rap9%Tz+hk8`DG0&dFV36f#0woWvX0dDjhS z#heESIJ+kOmnoa-P!%!9Q^oON4k!~2P*1S0InBccygrR5y&(N^(?_`M#LL+!?IMzs zO-77nk39`srH`Gm^KWK&s^-tk9`2ng5)~g;X3fyrFl0!iEEz>PCZ*$H5R51jgWBO9 i=cmxK?f@Y`Czr)R(e1rNXPGz&<5SS1NFvGPTmK2GGT9jb literal 0 HcmV?d00001 diff --git a/pxt4/mm/filemap.c b/pxt4/mm/filemap.c index ce635b49c..785b46db7 100644 --- a/pxt4/mm/filemap.c +++ b/pxt4/mm/filemap.c @@ -3268,6 +3268,10 @@ struct page *grab_cache_page_write_begin(struct address_space *mapping, EXPORT_SYMBOL(grab_cache_page_write_begin); #endif +#include "../calclock.h" + +unsigned long long dirty_pages_time, dirty_pages_count; + ssize_t pxt4_generic_perform_write(struct file *file, struct iov_iter *i, loff_t pos) { @@ -3345,7 +3349,14 @@ ssize_t pxt4_generic_perform_write(struct file *file, pos += copied; written += copied; + struct timespec myclock[2]; + getrawmonotonic(&myclock[0]); + balance_dirty_pages_ratelimited(mapping); + + getrawmonotonic(&myclock[1]); + calclock(myclock, &dirty_pages_time, &dirty_pages_count); + } while (iov_iter_count(i)); return written ? written : status; diff --git a/pxt4/super.c b/pxt4/super.c index ed1395924..9808be6cf 100644 --- a/pxt4/super.c +++ b/pxt4/super.c @@ -6338,6 +6338,8 @@ static int __init pxt4_init_fs(void) DECLARE_DS_MONITORING(cpu_dm); extern unsigned long long file_write_iter_time, file_write_iter_count; +extern unsigned long long dirty_pages_time, dirty_pages_count; + static void __exit pxt4_exit_fs(void) { pxt4_destroy_lazyinit_thread(); @@ -6353,8 +6355,9 @@ static void __exit pxt4_exit_fs(void) pxt4_exit_es(); pxt4_exit_pending(); - printk("pxt4_file_write_iter is called %llu times and the time interval is %lluns\n", file_write_iter_count, file_write_iter_time); - print_ds_monitoring(&cpu_dm); + printk("file_write_iter is called %llu times and the time interval is %lluns\n", file_write_iter_count, file_write_iter_time); + //print_ds_monitoring(&cpu_dm); + printk("balance_dirty_pages_ratelimited is called %llu times, and the time interval is %lluns\n", dirty_pages_time, dirty_pages_count); delete_ds_monitoring(&cpu_dm); } From bb81d7399fa3620e30d51dacec4efc6177316593 Mon Sep 17 00:00:00 2001 From: jimin-kiim <0305jimin@naver.com> Date: Fri, 5 Jan 2024 00:33:31 +0900 Subject: [PATCH 21/37] Feat: deleting redundant codes of Makefile in fetch_and_add_module --- assignment10/fetch_and_add_module/Makefile | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/assignment10/fetch_and_add_module/Makefile b/assignment10/fetch_and_add_module/Makefile index c83215132..d0bc79fbd 100644 --- a/assignment10/fetch_and_add_module/Makefile +++ b/assignment10/fetch_and_add_module/Makefile @@ -1,8 +1,4 @@ -obj-m := compare_and_swap_module.o test_and_set_module.o fetch_and_add_module.o \ - spinlock_module_final.o mutex_module_final.o rw_semaphore_module_final.o -spinlock_module_final-y := spinlock_module.o calclock.o -mutex_module_final-y := mutex_module.o calclock.o -rw_semaphore_module_final-y := rw_semaphore_module.o calclock.o +obj-m := fetch_and_add_module.o KERNEL_DIR := /lib/modules/$(shell uname -r)/build PWD := $(shell pwd) From 3df3a6207aaa9a699c9bd59adfd3fc5e8fc9b081 Mon Sep 17 00:00:00 2001 From: jimin-kiim <0305jimin@naver.com> Date: Fri, 5 Jan 2024 00:35:46 +0900 Subject: [PATCH 22/37] Feat: deleting redundant codes of Makefile in compare_and_swap_module --- assignment10/compare_and_swap_module/Makefile | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/assignment10/compare_and_swap_module/Makefile b/assignment10/compare_and_swap_module/Makefile index c83215132..d0dce605d 100644 --- a/assignment10/compare_and_swap_module/Makefile +++ b/assignment10/compare_and_swap_module/Makefile @@ -1,8 +1,4 @@ -obj-m := compare_and_swap_module.o test_and_set_module.o fetch_and_add_module.o \ - spinlock_module_final.o mutex_module_final.o rw_semaphore_module_final.o -spinlock_module_final-y := spinlock_module.o calclock.o -mutex_module_final-y := mutex_module.o calclock.o -rw_semaphore_module_final-y := rw_semaphore_module.o calclock.o +obj-m := compare_and_swap_module.o KERNEL_DIR := /lib/modules/$(shell uname -r)/build PWD := $(shell pwd) From 820fe5ccb45e5761a6484255df7257f57160e6b8 Mon Sep 17 00:00:00 2001 From: jimin-kiim <0305jimin@naver.com> Date: Fri, 5 Jan 2024 04:10:54 +0900 Subject: [PATCH 23/37] Feat: deleting redundant codes of Makefile in spinlock module --- assignment10/spinlock_module/Makefile | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/assignment10/spinlock_module/Makefile b/assignment10/spinlock_module/Makefile index f88bd41ae..546c4479e 100644 --- a/assignment10/spinlock_module/Makefile +++ b/assignment10/spinlock_module/Makefile @@ -1,7 +1,5 @@ obj-m := spinlock_module_final.o -spinlock_module_final-y := spinlock_module.o ../calclock.o -mutex_module_final-y := mutex_module.o calclock.o -rw_semaphore_module_final-y := rw_semaphore_module.o calclock.o +spinlock_module_final-y := spinlock_module.o linked_list_impl.o ../calclock.o KERNEL_DIR := /lib/modules/$(shell uname -r)/build PWD := $(shell pwd) From c43fd79087e9665a3d4740c1081861807f960afa Mon Sep 17 00:00:00 2001 From: jimin-kiim <0305jimin@naver.com> Date: Fri, 5 Jan 2024 04:14:08 +0900 Subject: [PATCH 24/37] Feat: separating codes related to linked_list implementation from spinlock_module.c by adding new file named linked_list_impl.c --- assignment10/linked_list_impl.h | 17 ++++ .../spinlock_module/linked_list_impl.c | 77 +++++++++++++++++++ .../spinlock_module/spinlock_module.c | 73 +----------------- 3 files changed, 98 insertions(+), 69 deletions(-) create mode 100644 assignment10/linked_list_impl.h create mode 100644 assignment10/spinlock_module/linked_list_impl.c diff --git a/assignment10/linked_list_impl.h b/assignment10/linked_list_impl.h new file mode 100644 index 000000000..659228d4e --- /dev/null +++ b/assignment10/linked_list_impl.h @@ -0,0 +1,17 @@ +#ifndef _LINKED_LIST_IMPL_H +#define _LINKED_LIST_IMPL_H + + +struct my_node { + struct list_head list; + unsigned int data; +}; + +struct list_head my_list; + +void *add_to_list(int thread_id, int range_bound[]); +int search_list(int thread_id, void *data, int range_bound[]); +int delete_from_list(int thread_id, int range_bound[]); + + +#endif diff --git a/assignment10/spinlock_module/linked_list_impl.c b/assignment10/spinlock_module/linked_list_impl.c new file mode 100644 index 000000000..e59c55197 --- /dev/null +++ b/assignment10/spinlock_module/linked_list_impl.c @@ -0,0 +1,77 @@ +#include "../calclock.h" +#include "../linked_list_impl.h" +#include +#include + +spinlock_t my_lock; +DEFINE_SPINLOCK(my_lock); + +//struct my_node { +// struct list_head list; +// unsigned int data; +//}; + +//struct list_head my_list; + +//unsigned long long count_insert, count_search, count_delete; +//unsigned long long time_insert, time_search, time_delete; + +void *add_to_list(int thread_id, int range_bound[]) { + struct timespec localclock[2]; + struct my_node *first = NULL; + + int i; + for (i = range_bound[0]; i <= range_bound[1]; i++) { + struct my_node *new = kmalloc(sizeof(struct my_node), GFP_KERNEL); + new->data = i; + + spin_lock(&my_lock); + getrawmonotonic(&localclock[0]); + + list_add(&new->list, &my_list); + + getrawmonotonic(&localclock[1]); + + calclock(localclock, &time_insert, &count_insert); + spin_unlock(&my_lock); + if (first == NULL) first = new; + } + + printk(KERN_INFO "thread #%d range: %d ~ %d\n", thread_id, range_bound[0], range_bound[1]); + return first; +} + +int search_list(int thread_id, void *data, int range_bound[]) { + struct timespec localclock[2]; + struct my_node *cur = (struct my_node *) data, *tmp; + spin_lock(&my_lock); + + list_for_each_entry_safe(cur, tmp, &my_list, list) { + getrawmonotonic(&localclock[0]); + getrawmonotonic(&localclock[1]); + calclock(localclock, &time_search,&count_search); + }; + spin_unlock(&my_lock); + return 0; +} + +int delete_from_list(int thread_id, int range_bound[]) +{ + struct my_node *cur, *tmp; + struct timespec localclock[2]; + spin_lock(&my_lock); + + list_for_each_entry_safe(cur, tmp, &my_list, list) { + getrawmonotonic(&localclock[0]); + list_del(&cur->list); + kfree(cur); + getrawmonotonic(&localclock[1]); + calclock(localclock, &time_delete, &count_delete); + + }; + spin_unlock(&my_lock); + return 0; +} + + + diff --git a/assignment10/spinlock_module/spinlock_module.c b/assignment10/spinlock_module/spinlock_module.c index e097283ca..26807cae2 100644 --- a/assignment10/spinlock_module/spinlock_module.c +++ b/assignment10/spinlock_module/spinlock_module.c @@ -9,78 +9,13 @@ #include #include #include "../calclock.h" +#include "../linked_list_impl.h" #define NUM_THREADS 4 static struct task_struct *threads[NUM_THREADS]; -spinlock_t my_lock; - -struct my_node { - struct list_head list; - unsigned int data; -}; - -struct list_head my_list; - -unsigned long long count_insert, count_search, count_delete; -unsigned long long time_insert, time_search, time_delete; - - -void *add_to_list(int thread_id, int range_bound[]) { - struct timespec localclock[2]; - struct my_node *first = NULL; - - int i; - for (i = range_bound[0]; i <= range_bound[1]; i++) { - struct my_node *new = kmalloc(sizeof(struct my_node), GFP_KERNEL); - new->data = i; - - spin_lock(&my_lock); - getrawmonotonic(&localclock[0]); - - list_add(&new->list, &my_list); - - getrawmonotonic(&localclock[1]); - - calclock(localclock, &time_insert, &count_insert); - spin_unlock(&my_lock); - if (first == NULL) first = new; - } - - printk(KERN_INFO "thread #%d range: %d ~ %d\n", thread_id, range_bound[0], range_bound[1]); - return first; -} - -int search_list(int thread_id, void *data, int range_bound[]) { - struct timespec localclock[2]; - struct my_node *cur = (struct my_node *) data, *tmp; - spin_lock(&my_lock); - - list_for_each_entry_safe(cur, tmp, &my_list, list) { - getrawmonotonic(&localclock[0]); - getrawmonotonic(&localclock[1]); - calclock(localclock, &time_search,&count_search); - }; - spin_unlock(&my_lock); - return 0; -} - -int delete_from_list(int thread_id, int range_bound[]) { - struct my_node *cur, *tmp; - struct timespec localclock[2]; - list_for_each_entry_safe(cur, tmp, &my_list, list) { - spin_lock(&my_lock); - getrawmonotonic(&localclock[0]); - list_del(&cur->list); - kfree(cur); - - getrawmonotonic(&localclock[1]); - calclock(localclock, &time_delete, &count_delete); - spin_unlock(&my_lock); - - }; - return 0; -} +extern unsigned long long count_insert, count_search, count_delete; +extern unsigned long long time_insert, time_search, time_delete; void set_iter_range(int thread_id, int range_bound[2]) { range_bound[0] = thread_id * 250000; @@ -108,7 +43,7 @@ static int work_fn(void *data) int __init spinlock_module_init(void) { printk("Entering Spinlock Module!\n"); INIT_LIST_HEAD(&my_list); - spin_lock_init(&my_lock); + //spin_lock_init(&my_lock); int i; for (i = 0; i < NUM_THREADS; i++) { From 08a1418f516edb80ca79a9b1cc95e67b462ca3b2 Mon Sep 17 00:00:00 2001 From: jimin-kiim <0305jimin@naver.com> Date: Fri, 5 Jan 2024 04:15:51 +0900 Subject: [PATCH 25/37] Fix: error occured using undeclared extern variables --- assignment10/spinlock_module/linked_list_impl.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/assignment10/spinlock_module/linked_list_impl.c b/assignment10/spinlock_module/linked_list_impl.c index e59c55197..a7c6da5e1 100644 --- a/assignment10/spinlock_module/linked_list_impl.c +++ b/assignment10/spinlock_module/linked_list_impl.c @@ -13,8 +13,8 @@ DEFINE_SPINLOCK(my_lock); //struct list_head my_list; -//unsigned long long count_insert, count_search, count_delete; -//unsigned long long time_insert, time_search, time_delete; +unsigned long long count_insert, count_search, count_delete; +unsigned long long time_insert, time_search, time_delete; void *add_to_list(int thread_id, int range_bound[]) { struct timespec localclock[2]; From f331236e5176c0fdf1b451f4cb074d405d354e75 Mon Sep 17 00:00:00 2001 From: jimin-kiim <0305jimin@naver.com> Date: Fri, 5 Jan 2024 04:21:57 +0900 Subject: [PATCH 26/37] Fix: errors occured by omitting headerfiles --- assignment10/linked_list_impl.h | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/assignment10/linked_list_impl.h b/assignment10/linked_list_impl.h index 659228d4e..73303519e 100644 --- a/assignment10/linked_list_impl.h +++ b/assignment10/linked_list_impl.h @@ -1,6 +1,8 @@ #ifndef _LINKED_LIST_IMPL_H #define _LINKED_LIST_IMPL_H - +#include +#include +#include struct my_node { struct list_head list; From 426c868a97714bd8921d606153c2e2782914c477 Mon Sep 17 00:00:00 2001 From: jimin-kiim <0305jimin@naver.com> Date: Fri, 5 Jan 2024 04:26:14 +0900 Subject: [PATCH 27/37] Fix: errors occured forbidding ISO C90; replacing codes of declaring variables --- assignment10/spinlock_module/spinlock_module.c | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/assignment10/spinlock_module/spinlock_module.c b/assignment10/spinlock_module/spinlock_module.c index 26807cae2..9c653cf7f 100644 --- a/assignment10/spinlock_module/spinlock_module.c +++ b/assignment10/spinlock_module/spinlock_module.c @@ -24,11 +24,11 @@ void set_iter_range(int thread_id, int range_bound[2]) { static int work_fn(void *data) { + void *ret; int range_bound[2]; int thread_id = *(int*) data; set_iter_range(thread_id, range_bound); - - void *ret = add_to_list(thread_id, range_bound); + ret = add_to_list(thread_id, range_bound); search_list(thread_id, ret, range_bound); delete_from_list(thread_id, range_bound); @@ -41,11 +41,11 @@ static int work_fn(void *data) } int __init spinlock_module_init(void) { + int i; + printk("Entering Spinlock Module!\n"); INIT_LIST_HEAD(&my_list); - //spin_lock_init(&my_lock); - int i; for (i = 0; i < NUM_THREADS; i++) { int* arg = (int*)kmalloc(sizeof(int), GFP_KERNEL); *arg = i; @@ -55,11 +55,12 @@ int __init spinlock_module_init(void) { } void __exit spinlock_module_cleanup(void) { + int i; + printk(KERN_INFO"%s: Spinlock linked list insert time: %llu ms, count: %llu", __func__, time_insert, count_insert); printk(KERN_INFO"%s: Spinlock linked list search time: %llu ms, count: %llu", __func__, time_search, count_search); printk(KERN_INFO"%s: Spinlock linked list delete time: %llu ms, count: %llu", __func__, time_delete, count_delete); - int i; for(i = 0; i < NUM_THREADS; i++) { kthread_stop(threads[i]); printk("thread #%d stopped!", i + 1); From 03ee542ce1deeefb3a1d04d6800349753d0633db Mon Sep 17 00:00:00 2001 From: jimin-kiim <0305jimin@naver.com> Date: Fri, 5 Jan 2024 04:32:25 +0900 Subject: [PATCH 28/37] Fix: errors related to initiating list --- assignment10/spinlock_module/linked_list_impl.c | 1 + assignment10/spinlock_module/spinlock_module.c | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/assignment10/spinlock_module/linked_list_impl.c b/assignment10/spinlock_module/linked_list_impl.c index a7c6da5e1..498d54dae 100644 --- a/assignment10/spinlock_module/linked_list_impl.c +++ b/assignment10/spinlock_module/linked_list_impl.c @@ -12,6 +12,7 @@ DEFINE_SPINLOCK(my_lock); //}; //struct list_head my_list; +LIST_HEAD(my_list); unsigned long long count_insert, count_search, count_delete; unsigned long long time_insert, time_search, time_delete; diff --git a/assignment10/spinlock_module/spinlock_module.c b/assignment10/spinlock_module/spinlock_module.c index 9c653cf7f..0cd296506 100644 --- a/assignment10/spinlock_module/spinlock_module.c +++ b/assignment10/spinlock_module/spinlock_module.c @@ -44,7 +44,7 @@ int __init spinlock_module_init(void) { int i; printk("Entering Spinlock Module!\n"); - INIT_LIST_HEAD(&my_list); + //INIT_LIST_HEAD(&my_list); for (i = 0; i < NUM_THREADS; i++) { int* arg = (int*)kmalloc(sizeof(int), GFP_KERNEL); From 0e2b19259f17eb968ced98440794ce87aae79027 Mon Sep 17 00:00:00 2001 From: jimin-kiim <0305jimin@naver.com> Date: Fri, 5 Jan 2024 05:00:29 +0900 Subject: [PATCH 29/37] Refactor: deleting redundant codes --- assignment10/linked_list_impl.h | 2 - .../spinlock_module/linked_list_impl.c | 83 +++++++++---------- .../spinlock_module/spinlock_module.c | 9 +- 3 files changed, 41 insertions(+), 53 deletions(-) diff --git a/assignment10/linked_list_impl.h b/assignment10/linked_list_impl.h index 73303519e..582a55b01 100644 --- a/assignment10/linked_list_impl.h +++ b/assignment10/linked_list_impl.h @@ -9,8 +9,6 @@ struct my_node { unsigned int data; }; -struct list_head my_list; - void *add_to_list(int thread_id, int range_bound[]); int search_list(int thread_id, void *data, int range_bound[]); int delete_from_list(int thread_id, int range_bound[]); diff --git a/assignment10/spinlock_module/linked_list_impl.c b/assignment10/spinlock_module/linked_list_impl.c index 498d54dae..a4f20587c 100644 --- a/assignment10/spinlock_module/linked_list_impl.c +++ b/assignment10/spinlock_module/linked_list_impl.c @@ -6,72 +6,65 @@ spinlock_t my_lock; DEFINE_SPINLOCK(my_lock); -//struct my_node { -// struct list_head list; -// unsigned int data; -//}; - -//struct list_head my_list; LIST_HEAD(my_list); unsigned long long count_insert, count_search, count_delete; unsigned long long time_insert, time_search, time_delete; void *add_to_list(int thread_id, int range_bound[]) { - struct timespec localclock[2]; - struct my_node *first = NULL; + struct timespec localclock[2]; + struct my_node *first = NULL; - int i; - for (i = range_bound[0]; i <= range_bound[1]; i++) { - struct my_node *new = kmalloc(sizeof(struct my_node), GFP_KERNEL); - new->data = i; + int i; + for (i = range_bound[0]; i <= range_bound[1]; i++) { + struct my_node *new = kmalloc(sizeof(struct my_node), GFP_KERNEL); + new->data = i; - spin_lock(&my_lock); - getrawmonotonic(&localclock[0]); + spin_lock(&my_lock); + getrawmonotonic(&localclock[0]); - list_add(&new->list, &my_list); + list_add(&new->list, &my_list); - getrawmonotonic(&localclock[1]); + getrawmonotonic(&localclock[1]); - calclock(localclock, &time_insert, &count_insert); - spin_unlock(&my_lock); - if (first == NULL) first = new; - } + calclock(localclock, &time_insert, &count_insert); + spin_unlock(&my_lock); + if (first == NULL) first = new; + } - printk(KERN_INFO "thread #%d range: %d ~ %d\n", thread_id, range_bound[0], range_bound[1]); - return first; + printk(KERN_INFO "thread #%d range: %d ~ %d\n", thread_id, range_bound[0], range_bound[1]); + return first; } int search_list(int thread_id, void *data, int range_bound[]) { - struct timespec localclock[2]; - struct my_node *cur = (struct my_node *) data, *tmp; - spin_lock(&my_lock); - - list_for_each_entry_safe(cur, tmp, &my_list, list) { - getrawmonotonic(&localclock[0]); - getrawmonotonic(&localclock[1]); - calclock(localclock, &time_search,&count_search); - }; - spin_unlock(&my_lock); - return 0; + struct timespec localclock[2]; + struct my_node *cur = (struct my_node *) data, *tmp; + spin_lock(&my_lock); + + list_for_each_entry_safe(cur, tmp, &my_list, list) { + getrawmonotonic(&localclock[0]); + getrawmonotonic(&localclock[1]); + calclock(localclock, &time_search,&count_search); + }; + spin_unlock(&my_lock); + return 0; } -int delete_from_list(int thread_id, int range_bound[]) -{ - struct my_node *cur, *tmp; - struct timespec localclock[2]; +int delete_from_list(int thread_id, int range_bound[]) { + struct my_node *cur, *tmp; + struct timespec localclock[2]; spin_lock(&my_lock); - list_for_each_entry_safe(cur, tmp, &my_list, list) { - getrawmonotonic(&localclock[0]); - list_del(&cur->list); - kfree(cur); - getrawmonotonic(&localclock[1]); - calclock(localclock, &time_delete, &count_delete); + list_for_each_entry_safe(cur, tmp, &my_list, list) { + getrawmonotonic(&localclock[0]); + list_del(&cur->list); + kfree(cur); + getrawmonotonic(&localclock[1]); + calclock(localclock, &time_delete, &count_delete); - }; + }; spin_unlock(&my_lock); - return 0; + return 0; } diff --git a/assignment10/spinlock_module/spinlock_module.c b/assignment10/spinlock_module/spinlock_module.c index 0cd296506..03f672bf3 100644 --- a/assignment10/spinlock_module/spinlock_module.c +++ b/assignment10/spinlock_module/spinlock_module.c @@ -44,7 +44,6 @@ int __init spinlock_module_init(void) { int i; printk("Entering Spinlock Module!\n"); - //INIT_LIST_HEAD(&my_list); for (i = 0; i < NUM_THREADS; i++) { int* arg = (int*)kmalloc(sizeof(int), GFP_KERNEL); @@ -56,15 +55,13 @@ int __init spinlock_module_init(void) { void __exit spinlock_module_cleanup(void) { int i; - printk(KERN_INFO"%s: Spinlock linked list insert time: %llu ms, count: %llu", __func__, time_insert, count_insert); printk(KERN_INFO"%s: Spinlock linked list search time: %llu ms, count: %llu", __func__, time_search, count_search); printk(KERN_INFO"%s: Spinlock linked list delete time: %llu ms, count: %llu", __func__, time_delete, count_delete); - - for(i = 0; i < NUM_THREADS; i++) { + + for (i = 0; i < NUM_THREADS; i++) kthread_stop(threads[i]); - printk("thread #%d stopped!", i + 1); - } + printk(KERN_INFO"%s: Exiting Spinlock Module!\n", __func__); } From 5b49d1c529d5dac94212b613397b02c4dbc1fb81 Mon Sep 17 00:00:00 2001 From: jimin-kiim <0305jimin@naver.com> Date: Fri, 5 Jan 2024 05:09:34 +0900 Subject: [PATCH 30/37] Refactor: deleting codes of including redundant header files --- assignment10/spinlock_module/spinlock_module.c | 13 +++++-------- 1 file changed, 5 insertions(+), 8 deletions(-) diff --git a/assignment10/spinlock_module/spinlock_module.c b/assignment10/spinlock_module/spinlock_module.c index 03f672bf3..3cd8ec175 100644 --- a/assignment10/spinlock_module/spinlock_module.c +++ b/assignment10/spinlock_module/spinlock_module.c @@ -1,17 +1,12 @@ // #--------- spinlock_module.c ---------# -#include -#include #include #include -#include #include -#include -#include -#include #include "../calclock.h" #include "../linked_list_impl.h" #define NUM_THREADS 4 + static struct task_struct *threads[NUM_THREADS]; extern unsigned long long count_insert, count_search, count_delete; @@ -22,11 +17,11 @@ void set_iter_range(int thread_id, int range_bound[2]) { range_bound[1] = range_bound[0] + 249999; } -static int work_fn(void *data) -{ +static int work_fn(void *data) { void *ret; int range_bound[2]; int thread_id = *(int*) data; + set_iter_range(thread_id, range_bound); ret = add_to_list(thread_id, range_bound); search_list(thread_id, ret, range_bound); @@ -50,11 +45,13 @@ int __init spinlock_module_init(void) { *arg = i; threads[i] = kthread_run(work_fn, (void*)arg, "thread%d", i); } + return 0; } void __exit spinlock_module_cleanup(void) { int i; + printk(KERN_INFO"%s: Spinlock linked list insert time: %llu ms, count: %llu", __func__, time_insert, count_insert); printk(KERN_INFO"%s: Spinlock linked list search time: %llu ms, count: %llu", __func__, time_search, count_search); printk(KERN_INFO"%s: Spinlock linked list delete time: %llu ms, count: %llu", __func__, time_delete, count_delete); From b73773fa301dd3a51c492d1f37e8872923cb9176 Mon Sep 17 00:00:00 2001 From: jimin-kiim <0305jimin@naver.com> Date: Sat, 6 Jan 2024 23:27:30 +0900 Subject: [PATCH 31/37] Feat: deleting redundant code lines of Makefile in mutex module --- assignment10/mutex_module/Makefile | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/assignment10/mutex_module/Makefile b/assignment10/mutex_module/Makefile index c83215132..198e52958 100644 --- a/assignment10/mutex_module/Makefile +++ b/assignment10/mutex_module/Makefile @@ -1,8 +1,5 @@ -obj-m := compare_and_swap_module.o test_and_set_module.o fetch_and_add_module.o \ - spinlock_module_final.o mutex_module_final.o rw_semaphore_module_final.o -spinlock_module_final-y := spinlock_module.o calclock.o -mutex_module_final-y := mutex_module.o calclock.o -rw_semaphore_module_final-y := rw_semaphore_module.o calclock.o +obj-m := mutex_module_final.o +mutex_module_final-y := mutex_module.o linked_list_impl.o ../calclock.o KERNEL_DIR := /lib/modules/$(shell uname -r)/build PWD := $(shell pwd) From 113e7df745a6556f8fd7eb245bb7a629a2e5fed3 Mon Sep 17 00:00:00 2001 From: jimin-kiim <0305jimin@naver.com> Date: Sat, 6 Jan 2024 23:33:28 +0900 Subject: [PATCH 32/37] Feat: separating codes related to implementation of linked list from mutex module.c --- assignment10/mutex_module/linked_list_impl.c | 70 +++++++++++++ assignment10/mutex_module/mutex_module.c | 100 +++---------------- 2 files changed, 83 insertions(+), 87 deletions(-) create mode 100644 assignment10/mutex_module/linked_list_impl.c diff --git a/assignment10/mutex_module/linked_list_impl.c b/assignment10/mutex_module/linked_list_impl.c new file mode 100644 index 000000000..78c01b0ba --- /dev/null +++ b/assignment10/mutex_module/linked_list_impl.c @@ -0,0 +1,70 @@ +#include "../calclock.h" +#include "../linked_list_impl.h" +#include +#include + +DEFINE_MUTEX(my_lock); + +LIST_HEAD(my_list); + +unsigned long long count_insert, count_search, count_delete; +unsigned long long time_insert, time_search, time_delete; + +void *add_to_list(int thread_id, int range_bound[]) { + struct timespec localclock[2]; + struct my_node *first = NULL; + int i; + + mutex_lock(&my_lock); + for (i = range_bound[0]; i < range_bound[1] + 1; i++) { + struct my_node *new = kmalloc(sizeof(struct my_node), GFP_KERNEL); + new->data = i; + + getrawmonotonic(&localclock[0]); + list_add(&new->list, &my_list); + getrawmonotonic(&localclock[1]); + calclock(localclock, &time_insert, &count_insert); + + if (first == NULL) first = new; + } + mutex_lock(&my_lock); + + printk(KERN_INFO "thread #%d range: %d ~ %d\n",thread_id, range_bound[0], range_bound[1]); + + return first; +} + +int search_list(int thread_id, void *data, int range_bound[]) { + struct my_node *cur = (struct my_node *) data, *tmp; + struct timespec localclock[2]; + + mutex_lock(&my_lock); + list_for_each_entry_safe(cur, tmp, &my_list, list) { + if(range_bound[0] <= cur->data && cur->data <= range_bound[1]) { + getrawmonotonic(&localclock[0]); + getrawmonotonic(&localclock[1]); + calclock(localclock, &time_search,&count_search); + } + }; + mutex_unlock(&my_lock); + + return 0; +} + +int delete_from_list(int thread_id, int range_bound[2]) { + struct my_node *cur, *tmp; + struct timespec localclock[2]; + + mutex_lock(&my_lock); + list_for_each_entry_safe(cur, tmp, &my_list, list) { + getrawmonotonic(&localclock[0]); + list_del(&cur->list); + kfree(cur); + getrawmonotonic(&localclock[1]); + calclock(localclock, &time_delete, &count_delete); + }; + mutex_unlock(&my_lock); + + return 0; +} + diff --git a/assignment10/mutex_module/mutex_module.c b/assignment10/mutex_module/mutex_module.c index d791ea938..d05155f88 100644 --- a/assignment10/mutex_module/mutex_module.c +++ b/assignment10/mutex_module/mutex_module.c @@ -1,90 +1,16 @@ // #--------- mutex_module.c ---------# -#include -#include #include #include -#include #include #include -#include -#include -#include -#include "calclock.h" +#include "../calclock.h" +#include "../linked_list_impl.h" #define NUM_THREADS 4 static struct task_struct *threads[NUM_THREADS]; -struct mutex lock; - -struct node { - struct list_head list; - unsigned int data; -}; - -struct list_head list; - -unsigned long long count_insert, count_search, count_delete; -unsigned long long time_insert, time_search, time_delete; - -void *add_to_list(int thread_id, int range_bound[]) { - struct timespec localclock[2]; - struct node *first = NULL; - - int i; - mutex_lock(&lock); - for (i = range_bound[0]; i < range_bound[1] + 1; i++) { - struct node *new = kmalloc(sizeof(struct node), GFP_KERNEL); - new->data = i; - - //mutex_lock(&lock); - getrawmonotonic(&localclock[0]); - - list_add(&new->list, &list); - - getrawmonotonic(&localclock[1]); - - calclock(localclock, &time_insert, &count_insert); - //mutex_unlock(&lock); - if (first == NULL) first = new; - } - mutex_lock(&lock); - printk(KERN_INFO "thread #%d range: %d ~ %d\n",thread_id, range_bound[0], range_bound[1]); - return first; -} - -int search_list(int thread_id, void *data, int range_bound[]) { - struct timespec localclock[2]; - struct node *cur = (struct node *) data, *tmp; - mutex_lock(&lock); - - list_for_each_entry_safe(cur, tmp, &list, list) { - if(range_bound[0] <= cur->data && cur->data <= range_bound[1]) { - getrawmonotonic(&localclock[0]); - getrawmonotonic(&localclock[1]); - calclock(localclock, &time_search,&count_search); - } - }; - mutex_unlock(&lock); - return 0; -} - -int delete_from_list(int thread_id, int range_bound[2]) { - struct node *cur, *tmp; - struct timespec localclock[2]; - mutex_lock(&lock); - list_for_each_entry_safe(cur, tmp, &list, list) { - //mutex_lock(&lock); - getrawmonotonic(&localclock[0]); - list_del(&cur->list); - kfree(cur); - - getrawmonotonic(&localclock[1]); - calclock(localclock, &time_delete, &count_delete); - //mutex_unlock(&lock); - }; - mutex_unlock(&lock); - return 0; -} +extern unsigned long long count_insert, count_search, count_delete; +extern unsigned long long time_insert, time_search, time_delete; void set_iter_range(int thread_id, int range_bound[2]) { range_bound[0] = thread_id * 250000; @@ -95,9 +21,10 @@ static int work_fn(void *data) { int range_bound[2]; int thread_id = *(int*) data; - set_iter_range(thread_id, range_bound); + void *ret; - void *ret = add_to_list(thread_id, range_bound); + set_iter_range(thread_id, range_bound); + add_to_list(thread_id, range_bound); search_list(thread_id, ret, range_bound); delete_from_list(thread_id, range_bound); @@ -110,11 +37,10 @@ static int work_fn(void *data) } int __init mutex_module_init(void) { + int i; + printk("Entering Mutex Module!\n"); - INIT_LIST_HEAD(&list); - mutex_init(&lock); - int i; for (i = 0; i < NUM_THREADS; i++) { int* arg = (int*)kmalloc(sizeof(int), GFP_KERNEL); *arg = i; @@ -124,15 +50,15 @@ int __init mutex_module_init(void) { } void __exit mutex_module_cleanup(void) { + int i; + printk(KERN_INFO"%s: Mutex linked list insert time: %llu ms, count: %llu", __func__, time_insert, count_insert); printk(KERN_INFO"%s: Mutex linked list search time: %llu ms, count: %llu", __func__, time_search, count_search); printk(KERN_INFO"%s: Mutex linked list delete time: %llu ms, count: %llu", __func__, time_delete, count_delete); - int i; - for(i = 0; i < NUM_THREADS; i++) { + for(i = 0; i < NUM_THREADS; i++) kthread_stop(threads[i]); - printk("thread #%d stopped!", i + 1); - } + printk(KERN_INFO"%s: Exiting Mutex Module!\n", __func__); } From 2409c8f3a3789dece3dbc18d6763488a7e9de3b0 Mon Sep 17 00:00:00 2001 From: jimin-kiim <0305jimin@naver.com> Date: Sat, 6 Jan 2024 23:50:15 +0900 Subject: [PATCH 33/37] Feat: separating codes related to implementing linked list from rw_semaphore_module.c --- .../rw_semaphore_module/linked_list_impl.c | 71 ++++++++++++ .../rw_semaphore_module/rw_semaphore_module.c | 105 +++--------------- 2 files changed, 88 insertions(+), 88 deletions(-) create mode 100644 assignment10/rw_semaphore_module/linked_list_impl.c diff --git a/assignment10/rw_semaphore_module/linked_list_impl.c b/assignment10/rw_semaphore_module/linked_list_impl.c new file mode 100644 index 000000000..160d767e6 --- /dev/null +++ b/assignment10/rw_semaphore_module/linked_list_impl.c @@ -0,0 +1,71 @@ +#include "../calclock.h" +#include "../linked_list_impl.h" +#include +#include + +DEFINE_RWSEM(my_lock); + +LIST_HEAD(my_list); + +unsigned long long count_insert, count_search, count_delete; +unsigned long long time_insert, time_search, time_delete; + +void *add_to_list(int thread_id, int range_bound[]) { + struct timespec localclock[2]; + struct my_node *first = NULL; + int i; + + down_write(&my_lock); + for (i = range_bound[0]; i < range_bound[1] + 1; i++) { + struct my_node *new = kmalloc(sizeof(struct my_node), GFP_KERNEL); + new->data = i; + + getrawmonotonic(&localclock[0]); + list_add(&new->list, &my_list); + getrawmonotonic(&localclock[1]); + calclock(localclock, &time_insert, &count_insert); + + if (first == NULL) first = new; + } + up_write(&my_lock); + + printk(KERN_INFO "thread #%d range: %d ~ %d\n",thread_id, range_bound[0], range_bound[1]); + + return first; +} + +int search_list(int thread_id, void *data, int range_bound[]) { + struct my_node *cur = (struct my_node *) data, *tmp; + struct timespec localclock[2]; + + down_read(&my_lock); + list_for_each_entry_safe(cur, tmp, &my_list, list) { + if(range_bound[0] <= cur->data && cur->data <= range_bound[1]) { + getrawmonotonic(&localclock[0]); + getrawmonotonic(&localclock[1]); + calclock(localclock, &time_search,&count_search); + } + }; + up_read(&my_lock); + + return 0; +} + +int delete_from_list(int thread_id, int range_bound[2]) { + struct my_node *cur, *tmp; + struct timespec localclock[2]; + + down_write(&my_lock); + list_for_each_entry_safe(cur, tmp, &my_list, list) { + getrawmonotonic(&localclock[0]); + list_del(&cur->list); + kfree(cur); + getrawmonotonic(&localclock[1]); + calclock(localclock, &time_delete, &count_delete); + }; + up_write(&my_lock); + + return 0; +} + + diff --git a/assignment10/rw_semaphore_module/rw_semaphore_module.c b/assignment10/rw_semaphore_module/rw_semaphore_module.c index 206ad3e48..a63b853ba 100644 --- a/assignment10/rw_semaphore_module/rw_semaphore_module.c +++ b/assignment10/rw_semaphore_module/rw_semaphore_module.c @@ -1,100 +1,30 @@ // #--------- rw_semaphore_module.c ---------# -#include -#include #include #include -#include #include -#include -#include -#include -#include "calclock.h" +#include "../calclock.h" +#include "../linked_list_impl.h" #define NUM_THREADS 4 static struct task_struct *threads[NUM_THREADS]; -struct rw_semaphore lock; +extern unsigned long long count_insert, count_search, count_delete; +extern unsigned long long time_insert, time_search, time_delete; -struct node { - struct list_head list; - unsigned int data; -}; - -struct list_head list; - -unsigned long long count_insert, count_search, count_delete; -unsigned long long time_insert, time_search, time_delete; - - -void *add_to_list(int thread_id, int range_bound[]) { - struct timespec localclock[2]; - struct node *first = NULL; - - int i; - down_write(&lock); - for (i = range_bound[0]; i < range_bound[1] + 1; i++) { - struct node *new = kmalloc(sizeof(struct node), GFP_KERNEL); - new->data = i; - - getrawmonotonic(&localclock[0]); - - list_add(&new->list, &list); - - getrawmonotonic(&localclock[1]); - - calclock(localclock, &time_insert, &count_insert); - if (first == NULL) first = new; - } - up_write(&lock); - - printk(KERN_INFO "thread #%d range: %d ~ %d\n",thread_id, range_bound[0], range_bound[1]); - return first; -} - -int search_list(int thread_id, void *data, int range_bound[]) { - struct timespec localclock[2]; - struct node *cur = (struct node *) data, *tmp; - down_read(&lock); - - list_for_each_entry_safe(cur, tmp, &list, list) { - if (range_bound[0] <= cur->data && cur->data <= range_bound[1]) { - getrawmonotonic(&localclock[0]); - getrawmonotonic(&localclock[1]); - calclock(localclock, &time_search,&count_search); - } - }; - up_read(&lock); - return 0; -} -int delete_from_list(int thread_id, int range_bound[2]) { - struct node *cur, *tmp; - struct timespec localclock[2]; - down_write(&lock); - list_for_each_entry_safe(cur, tmp, &list, list) { - getrawmonotonic(&localclock[0]); - list_del(&cur->list); - kfree(cur); - - getrawmonotonic(&localclock[1]); - calclock(localclock, &time_delete, &count_delete); - - - }; - up_write(&lock); - return 0; -} void set_iter_range(int thread_id, int range_bound[2]) { range_bound[0] = thread_id * 250000; range_bound[1] = range_bound[0] + 249999; } + static int work_fn(void *data) { int range_bound[2]; int thread_id = *(int*) data; - set_iter_range(thread_id, range_bound); + void *ret; - void *ret = add_to_list(thread_id, range_bound); + set_iter_range(thread_id, range_bound); + ret = add_to_list(thread_id, range_bound); search_list(thread_id, ret, range_bound); delete_from_list(thread_id, range_bound); @@ -107,11 +37,10 @@ static int work_fn(void *data) } int __init rw_semaphore_module_init(void) { - printk("Entering RW Semaphore Module!\n"); - INIT_LIST_HEAD(&list); - init_rwsem(&lock); - int i; + + printk("Entering RW Semaphore Module!\n"); + for (i = 0; i < NUM_THREADS; i++) { int* arg = (int*)kmalloc(sizeof(int), GFP_KERNEL); *arg = i; @@ -121,16 +50,16 @@ int __init rw_semaphore_module_init(void) { } void __exit rw_semaphore_module_cleanup(void) { - printk(KERN_INFO"%s: RW Semaphore linked list insert time: %llu ms, count: %llu", __func__, time_insert, count_insert); + int i; + + printk(KERN_INFO"%s: RW Semaphore linked list insert time: %llu ms, count: %llu", __func__, time_insert, count_insert); printk(KERN_INFO"%s: RW Semaphore linked list search time: %llu ms, count: %llu", __func__, time_search, count_search); printk(KERN_INFO"%s: RW Semaphore linked list delete time: %llu ms, count: %llu", __func__, time_delete, count_delete); - int i; - for(i = 0; i < NUM_THREADS; i++) { + for(i = 0; i < NUM_THREADS; i++) kthread_stop(threads[i]); - printk("thread #%d stopped!", i + 1); - } - printk(KERN_INFO"%s: Exiting RW semaphore Module!\n", __func__); + + printk(KERN_INFO"%s: Exiting RW semaphore Module!\n", __func__); } module_init(rw_semaphore_module_init); From bc7d6da965e600860f65736ab66c6d28b6a5150f Mon Sep 17 00:00:00 2001 From: jimin-kiim <0305jimin@naver.com> Date: Sat, 6 Jan 2024 23:51:47 +0900 Subject: [PATCH 34/37] Feat: deleting redundant codes of Makefile in rw_semaphore_module --- assignment10/rw_semaphore_module/Makefile | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/assignment10/rw_semaphore_module/Makefile b/assignment10/rw_semaphore_module/Makefile index c83215132..189dbd913 100644 --- a/assignment10/rw_semaphore_module/Makefile +++ b/assignment10/rw_semaphore_module/Makefile @@ -1,8 +1,5 @@ -obj-m := compare_and_swap_module.o test_and_set_module.o fetch_and_add_module.o \ - spinlock_module_final.o mutex_module_final.o rw_semaphore_module_final.o -spinlock_module_final-y := spinlock_module.o calclock.o -mutex_module_final-y := mutex_module.o calclock.o -rw_semaphore_module_final-y := rw_semaphore_module.o calclock.o +obj-m := rw_semaphore_module_final.o +rw_semaphore_module_final-y := rw_semaphore_module.o linked_list_impl.o ../calclock.o KERNEL_DIR := /lib/modules/$(shell uname -r)/build PWD := $(shell pwd) From 099d3d52dea0115f21aa94dc3a6d3dd7d712631b Mon Sep 17 00:00:00 2001 From: jimin-kiim <0305jimin@naver.com> Date: Sun, 7 Jan 2024 00:30:44 +0900 Subject: [PATCH 35/37] Fix: errors occurred because of typo --- assignment10/rw_semaphore_module/linked_list_impl.c | 6 +++++- assignment10/rw_semaphore_module/rw_semaphore_module.c | 3 ++- 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/assignment10/rw_semaphore_module/linked_list_impl.c b/assignment10/rw_semaphore_module/linked_list_impl.c index 160d767e6..59f448eca 100644 --- a/assignment10/rw_semaphore_module/linked_list_impl.c +++ b/assignment10/rw_semaphore_module/linked_list_impl.c @@ -2,8 +2,11 @@ #include "../linked_list_impl.h" #include #include +#include -DEFINE_RWSEM(my_lock); +//int counter; +//struct rw_semaphore my_lock; +DECLARE_RWSEM(my_lock); LIST_HEAD(my_list); @@ -16,6 +19,7 @@ void *add_to_list(int thread_id, int range_bound[]) { int i; down_write(&my_lock); +// counter++; for (i = range_bound[0]; i < range_bound[1] + 1; i++) { struct my_node *new = kmalloc(sizeof(struct my_node), GFP_KERNEL); new->data = i; diff --git a/assignment10/rw_semaphore_module/rw_semaphore_module.c b/assignment10/rw_semaphore_module/rw_semaphore_module.c index a63b853ba..053909bf8 100644 --- a/assignment10/rw_semaphore_module/rw_semaphore_module.c +++ b/assignment10/rw_semaphore_module/rw_semaphore_module.c @@ -10,7 +10,7 @@ static struct task_struct *threads[NUM_THREADS]; extern unsigned long long count_insert, count_search, count_delete; extern unsigned long long time_insert, time_search, time_delete; - +//extern struct rw_semaphore my_lock; void set_iter_range(int thread_id, int range_bound[2]) { range_bound[0] = thread_id * 250000; @@ -39,6 +39,7 @@ static int work_fn(void *data) int __init rw_semaphore_module_init(void) { int i; +// init_rwsem(&my_lock); printk("Entering RW Semaphore Module!\n"); for (i = 0; i < NUM_THREADS; i++) { From c487839483d7ee08b8dd552e99734e87cf59768c Mon Sep 17 00:00:00 2001 From: jimin-kiim <0305jimin@naver.com> Date: Sun, 7 Jan 2024 00:32:47 +0900 Subject: [PATCH 36/37] Refactor: deleting redundant codes in rw semaphore module files --- assignment10/rw_semaphore_module/linked_list_impl.c | 4 ---- assignment10/rw_semaphore_module/rw_semaphore_module.c | 5 +---- 2 files changed, 1 insertion(+), 8 deletions(-) diff --git a/assignment10/rw_semaphore_module/linked_list_impl.c b/assignment10/rw_semaphore_module/linked_list_impl.c index 59f448eca..bc704bfc4 100644 --- a/assignment10/rw_semaphore_module/linked_list_impl.c +++ b/assignment10/rw_semaphore_module/linked_list_impl.c @@ -2,10 +2,7 @@ #include "../linked_list_impl.h" #include #include -#include -//int counter; -//struct rw_semaphore my_lock; DECLARE_RWSEM(my_lock); LIST_HEAD(my_list); @@ -19,7 +16,6 @@ void *add_to_list(int thread_id, int range_bound[]) { int i; down_write(&my_lock); -// counter++; for (i = range_bound[0]; i < range_bound[1] + 1; i++) { struct my_node *new = kmalloc(sizeof(struct my_node), GFP_KERNEL); new->data = i; diff --git a/assignment10/rw_semaphore_module/rw_semaphore_module.c b/assignment10/rw_semaphore_module/rw_semaphore_module.c index 053909bf8..51c2dd2fa 100644 --- a/assignment10/rw_semaphore_module/rw_semaphore_module.c +++ b/assignment10/rw_semaphore_module/rw_semaphore_module.c @@ -10,15 +10,13 @@ static struct task_struct *threads[NUM_THREADS]; extern unsigned long long count_insert, count_search, count_delete; extern unsigned long long time_insert, time_search, time_delete; -//extern struct rw_semaphore my_lock; void set_iter_range(int thread_id, int range_bound[2]) { range_bound[0] = thread_id * 250000; range_bound[1] = range_bound[0] + 249999; } -static int work_fn(void *data) -{ +static int work_fn(void *data) { int range_bound[2]; int thread_id = *(int*) data; void *ret; @@ -39,7 +37,6 @@ static int work_fn(void *data) int __init rw_semaphore_module_init(void) { int i; -// init_rwsem(&my_lock); printk("Entering RW Semaphore Module!\n"); for (i = 0; i < NUM_THREADS; i++) { From cdd4ea3539582ec19940a06087f0ef2d84ae2fd3 Mon Sep 17 00:00:00 2001 From: jimin-kiim <0305jimin@naver.com> Date: Sun, 7 Jan 2024 00:48:38 +0900 Subject: [PATCH 37/37] Fix: tasks being blocked for too long because of typo --- assignment10/mutex_module/linked_list_impl.c | 2 +- assignment10/mutex_module/mutex_module.c | 3 +-- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/assignment10/mutex_module/linked_list_impl.c b/assignment10/mutex_module/linked_list_impl.c index 78c01b0ba..6f4127886 100644 --- a/assignment10/mutex_module/linked_list_impl.c +++ b/assignment10/mutex_module/linked_list_impl.c @@ -27,7 +27,7 @@ void *add_to_list(int thread_id, int range_bound[]) { if (first == NULL) first = new; } - mutex_lock(&my_lock); + mutex_unlock(&my_lock); printk(KERN_INFO "thread #%d range: %d ~ %d\n",thread_id, range_bound[0], range_bound[1]); diff --git a/assignment10/mutex_module/mutex_module.c b/assignment10/mutex_module/mutex_module.c index d05155f88..3c7311120 100644 --- a/assignment10/mutex_module/mutex_module.c +++ b/assignment10/mutex_module/mutex_module.c @@ -2,7 +2,6 @@ #include #include #include -#include #include "../calclock.h" #include "../linked_list_impl.h" @@ -24,7 +23,7 @@ static int work_fn(void *data) void *ret; set_iter_range(thread_id, range_bound); - add_to_list(thread_id, range_bound); + ret = add_to_list(thread_id, range_bound); search_list(thread_id, ret, range_bound); delete_from_list(thread_id, range_bound);