hc
2024-02-20 102a0743326a03cd1a1202ceda21e175b7d3575c
kernel/drivers/gpu/drm/drm_cache.c
....@@ -62,10 +62,10 @@
6262 {
6363 unsigned long i;
6464
65
- mb();
65
+ mb(); /*Full memory barrier used before so that CLFLUSH is ordered*/
6666 for (i = 0; i < num_pages; i++)
6767 drm_clflush_page(*pages++);
68
- mb();
68
+ mb(); /*Also used after CLFLUSH so that all cache is flushed*/
6969 }
7070 #endif
7171
....@@ -92,6 +92,7 @@
9292
9393 #elif defined(__powerpc__)
9494 unsigned long i;
95
+
9596 for (i = 0; i < num_pages; i++) {
9697 struct page *page = pages[i];
9798 void *page_virtual;
....@@ -125,10 +126,10 @@
125126 if (static_cpu_has(X86_FEATURE_CLFLUSH)) {
126127 struct sg_page_iter sg_iter;
127128
128
- mb();
129
- for_each_sg_page(st->sgl, &sg_iter, st->nents, 0)
129
+ mb(); /*CLFLUSH is ordered only by using memory barriers*/
130
+ for_each_sgtable_page(st, &sg_iter, 0)
130131 drm_clflush_page(sg_page_iter_page(&sg_iter));
131
- mb();
132
+ mb(); /*Make sure that all cache line entry is flushed*/
132133
133134 return;
134135 }
....@@ -157,12 +158,13 @@
157158 if (static_cpu_has(X86_FEATURE_CLFLUSH)) {
158159 const int size = boot_cpu_data.x86_clflush_size;
159160 void *end = addr + length;
161
+
160162 addr = (void *)(((unsigned long)addr) & -size);
161
- mb();
163
+ mb(); /*CLFLUSH is only ordered with a full memory barrier*/
162164 for (; addr < end; addr += size)
163165 clflushopt(addr);
164166 clflushopt(end - 1); /* force serialisation */
165
- mb();
167
+ mb(); /*Ensure that evry data cache line entry is flushed*/
166168 return;
167169 }
168170