.. | .. |
---|
56 | 56 | u32 index, int offset, struct bio *bio); |
---|
57 | 57 | |
---|
58 | 58 | |
---|
| 59 | +#ifdef CONFIG_PREEMPT_RT_BASE |
---|
| 60 | +static void zram_meta_init_table_locks(struct zram *zram, size_t num_pages) |
---|
| 61 | +{ |
---|
| 62 | + size_t index; |
---|
| 63 | + |
---|
| 64 | + for (index = 0; index < num_pages; index++) |
---|
| 65 | + spin_lock_init(&zram->table[index].lock); |
---|
| 66 | +} |
---|
| 67 | + |
---|
| 68 | +static int zram_slot_trylock(struct zram *zram, u32 index) |
---|
| 69 | +{ |
---|
| 70 | + int ret; |
---|
| 71 | + |
---|
| 72 | + ret = spin_trylock(&zram->table[index].lock); |
---|
| 73 | + if (ret) |
---|
| 74 | + __set_bit(ZRAM_LOCK, &zram->table[index].value); |
---|
| 75 | + return ret; |
---|
| 76 | +} |
---|
| 77 | + |
---|
| 78 | +static void zram_slot_lock(struct zram *zram, u32 index) |
---|
| 79 | +{ |
---|
| 80 | + spin_lock(&zram->table[index].lock); |
---|
| 81 | + __set_bit(ZRAM_LOCK, &zram->table[index].value); |
---|
| 82 | +} |
---|
| 83 | + |
---|
| 84 | +static void zram_slot_unlock(struct zram *zram, u32 index) |
---|
| 85 | +{ |
---|
| 86 | + __clear_bit(ZRAM_LOCK, &zram->table[index].value); |
---|
| 87 | + spin_unlock(&zram->table[index].lock); |
---|
| 88 | +} |
---|
| 89 | + |
---|
| 90 | +#else |
---|
| 91 | +static void zram_meta_init_table_locks(struct zram *zram, size_t num_pages) { } |
---|
| 92 | + |
---|
59 | 93 | static int zram_slot_trylock(struct zram *zram, u32 index) |
---|
60 | 94 | { |
---|
61 | 95 | return bit_spin_trylock(ZRAM_LOCK, &zram->table[index].flags); |
---|
.. | .. |
---|
70 | 104 | { |
---|
71 | 105 | bit_spin_unlock(ZRAM_LOCK, &zram->table[index].flags); |
---|
72 | 106 | } |
---|
| 107 | +#endif |
---|
73 | 108 | |
---|
74 | 109 | static inline bool init_done(struct zram *zram) |
---|
75 | 110 | { |
---|
.. | .. |
---|
1145 | 1180 | #endif |
---|
1146 | 1181 | static DEVICE_ATTR_RO(debug_stat); |
---|
1147 | 1182 | |
---|
| 1183 | + |
---|
| 1184 | + |
---|
1148 | 1185 | static void zram_meta_free(struct zram *zram, u64 disksize) |
---|
1149 | 1186 | { |
---|
1150 | 1187 | size_t num_pages = disksize >> PAGE_SHIFT; |
---|
.. | .. |
---|
1175 | 1212 | |
---|
1176 | 1213 | if (!huge_class_size) |
---|
1177 | 1214 | huge_class_size = zs_huge_class_size(zram->mem_pool); |
---|
| 1215 | + zram_meta_init_table_locks(zram, num_pages); |
---|
1178 | 1216 | return true; |
---|
1179 | 1217 | } |
---|
1180 | 1218 | |
---|
.. | .. |
---|
1237 | 1275 | unsigned long handle; |
---|
1238 | 1276 | unsigned int size; |
---|
1239 | 1277 | void *src, *dst; |
---|
| 1278 | + struct zcomp_strm *zstrm; |
---|
1240 | 1279 | |
---|
1241 | 1280 | zram_slot_lock(zram, index); |
---|
1242 | 1281 | if (zram_test_flag(zram, index, ZRAM_WB)) { |
---|
.. | .. |
---|
1267 | 1306 | |
---|
1268 | 1307 | size = zram_get_obj_size(zram, index); |
---|
1269 | 1308 | |
---|
| 1309 | + zstrm = zcomp_stream_get(zram->comp); |
---|
1270 | 1310 | src = zs_map_object(zram->mem_pool, handle, ZS_MM_RO); |
---|
1271 | 1311 | if (size == PAGE_SIZE) { |
---|
1272 | 1312 | dst = kmap_atomic(page); |
---|
.. | .. |
---|
1274 | 1314 | kunmap_atomic(dst); |
---|
1275 | 1315 | ret = 0; |
---|
1276 | 1316 | } else { |
---|
1277 | | - struct zcomp_strm *zstrm = zcomp_stream_get(zram->comp); |
---|
1278 | 1317 | |
---|
1279 | 1318 | dst = kmap_atomic(page); |
---|
1280 | 1319 | ret = zcomp_decompress(zstrm, src, size, dst); |
---|
1281 | 1320 | kunmap_atomic(dst); |
---|
1282 | | - zcomp_stream_put(zram->comp); |
---|
1283 | 1321 | } |
---|
1284 | 1322 | zs_unmap_object(zram->mem_pool, handle); |
---|
| 1323 | + zcomp_stream_put(zram->comp); |
---|
1285 | 1324 | zram_slot_unlock(zram, index); |
---|
1286 | 1325 | |
---|
1287 | 1326 | /* Should NEVER happen. Return bio error if it does. */ |
---|