.. | .. |
---|
62 | 62 | { |
---|
63 | 63 | unsigned long i; |
---|
64 | 64 | |
---|
65 | | - mb(); |
---|
| 65 | + mb(); /*Full memory barrier used before so that CLFLUSH is ordered*/ |
---|
66 | 66 | for (i = 0; i < num_pages; i++) |
---|
67 | 67 | drm_clflush_page(*pages++); |
---|
68 | | - mb(); |
---|
| 68 | + mb(); /*Also used after CLFLUSH so that all cache is flushed*/ |
---|
69 | 69 | } |
---|
70 | 70 | #endif |
---|
71 | 71 | |
---|
.. | .. |
---|
92 | 92 | |
---|
93 | 93 | #elif defined(__powerpc__) |
---|
94 | 94 | unsigned long i; |
---|
| 95 | + |
---|
95 | 96 | for (i = 0; i < num_pages; i++) { |
---|
96 | 97 | struct page *page = pages[i]; |
---|
97 | 98 | void *page_virtual; |
---|
.. | .. |
---|
125 | 126 | if (static_cpu_has(X86_FEATURE_CLFLUSH)) { |
---|
126 | 127 | struct sg_page_iter sg_iter; |
---|
127 | 128 | |
---|
128 | | - mb(); |
---|
129 | | - for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) |
---|
| 129 | + mb(); /*CLFLUSH is ordered only by using memory barriers*/ |
---|
| 130 | + for_each_sgtable_page(st, &sg_iter, 0) |
---|
130 | 131 | drm_clflush_page(sg_page_iter_page(&sg_iter)); |
---|
131 | | - mb(); |
---|
| 132 | + mb(); /*Make sure that all cache line entry is flushed*/ |
---|
132 | 133 | |
---|
133 | 134 | return; |
---|
134 | 135 | } |
---|
.. | .. |
---|
157 | 158 | if (static_cpu_has(X86_FEATURE_CLFLUSH)) { |
---|
158 | 159 | const int size = boot_cpu_data.x86_clflush_size; |
---|
159 | 160 | void *end = addr + length; |
---|
| 161 | + |
---|
160 | 162 | addr = (void *)(((unsigned long)addr) & -size); |
---|
161 | | - mb(); |
---|
| 163 | + mb(); /*CLFLUSH is only ordered with a full memory barrier*/ |
---|
162 | 164 | for (; addr < end; addr += size) |
---|
163 | 165 | clflushopt(addr); |
---|
164 | 166 | clflushopt(end - 1); /* force serialisation */ |
---|
165 | | - mb(); |
---|
| 167 | + mb(); /*Ensure that evry data cache line entry is flushed*/ |
---|
166 | 168 | return; |
---|
167 | 169 | } |
---|
168 | 170 | |
---|