2 * Copyright (C) 2014 Sergey Senozhatsky.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
10 #include <linux/kernel.h>
11 #include <linux/string.h>
12 #include <linux/err.h>
13 #include <linux/slab.h>
14 #include <linux/wait.h>
15 #include <linux/sched.h>
16 #include <linux/cpu.h>
17 #include <linux/crypto.h>
21 static const char * const backends[] = {
24 #if IS_ENABLED(CONFIG_CRYPTO_LZ4)
27 #if IS_ENABLED(CONFIG_CRYPTO_LZ4HC)
30 #if IS_ENABLED(CONFIG_CRYPTO_842)
33 #if IS_ENABLED(CONFIG_CRYPTO_ZSTD)
39 static void zcomp_strm_free(struct zcomp_strm *zstrm)
41 if (!IS_ERR_OR_NULL(zstrm->tfm))
42 crypto_free_comp(zstrm->tfm);
43 free_pages((unsigned long)zstrm->buffer, 1);
48 * allocate new zcomp_strm structure with ->tfm initialized by
49 * backend, return NULL on error
51 static struct zcomp_strm *zcomp_strm_alloc(struct zcomp *comp)
53 struct zcomp_strm *zstrm = kmalloc(sizeof(*zstrm), GFP_KERNEL);
57 zstrm->tfm = crypto_alloc_comp(comp->name, 0, 0);
59 * allocate 2 pages. 1 for compressed data, plus 1 extra for the
60 * case when compressed size is larger than the original one
62 zstrm->buffer = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 1);
63 if (IS_ERR_OR_NULL(zstrm->tfm) || !zstrm->buffer) {
64 zcomp_strm_free(zstrm);
70 bool zcomp_available_algorithm(const char *comp)
74 i = __sysfs_match_string(backends, -1, comp);
79 * Crypto does not ignore a trailing new line symbol,
80 * so make sure you don't supply a string containing
82 * This also means that we permit zcomp initialisation
83 * with any compressing algorithm known to crypto api.
85 return crypto_has_comp(comp, 0, 0) == 1;
88 /* show available compressors */
89 ssize_t zcomp_available_show(const char *comp, char *buf)
91 bool known_algorithm = false;
95 for (; backends[i]; i++) {
96 if (!strcmp(comp, backends[i])) {
97 known_algorithm = true;
98 sz += scnprintf(buf + sz, PAGE_SIZE - sz - 2,
99 "[%s] ", backends[i]);
101 sz += scnprintf(buf + sz, PAGE_SIZE - sz - 2,
107 * Out-of-tree module known to crypto api or a missing
108 * entry in `backends'.
110 if (!known_algorithm && crypto_has_comp(comp, 0, 0) == 1)
111 sz += scnprintf(buf + sz, PAGE_SIZE - sz - 2,
114 sz += scnprintf(buf + sz, PAGE_SIZE - sz, "\n");
118 struct zcomp_strm *zcomp_stream_get(struct zcomp *comp)
120 return *get_cpu_ptr(comp->stream);
123 void zcomp_stream_put(struct zcomp *comp)
125 put_cpu_ptr(comp->stream);
128 int zcomp_compress(struct zcomp_strm *zstrm,
129 const void *src, unsigned int *dst_len)
132 * Our dst memory (zstrm->buffer) is always `2 * PAGE_SIZE' sized
133 * because sometimes we can endup having a bigger compressed data
134 * due to various reasons: for example compression algorithms tend
135 * to add some padding to the compressed buffer. Speaking of padding,
136 * comp algorithm `842' pads the compressed length to multiple of 8
137 * and returns -ENOSP when the dst memory is not big enough, which
138 * is not something that ZRAM wants to see. We can handle the
139 * `compressed_size > PAGE_SIZE' case easily in ZRAM, but when we
140 * receive -ERRNO from the compressing backend we can't help it
141 * anymore. To make `842' happy we need to tell the exact size of
142 * the dst buffer, zram_drv will take care of the fact that
143 * compressed buffer is too big.
145 *dst_len = PAGE_SIZE * 2;
147 return crypto_comp_compress(zstrm->tfm,
149 zstrm->buffer, dst_len);
152 int zcomp_decompress(struct zcomp_strm *zstrm,
153 const void *src, unsigned int src_len, void *dst)
155 unsigned int dst_len = PAGE_SIZE;
157 return crypto_comp_decompress(zstrm->tfm,
162 int zcomp_cpu_up_prepare(unsigned int cpu, struct hlist_node *node)
164 struct zcomp *comp = hlist_entry(node, struct zcomp, node);
165 struct zcomp_strm *zstrm;
167 if (WARN_ON(*per_cpu_ptr(comp->stream, cpu)))
170 zstrm = zcomp_strm_alloc(comp);
171 if (IS_ERR_OR_NULL(zstrm)) {
172 pr_err("Can't allocate a compression stream\n");
175 *per_cpu_ptr(comp->stream, cpu) = zstrm;
179 int zcomp_cpu_dead(unsigned int cpu, struct hlist_node *node)
181 struct zcomp *comp = hlist_entry(node, struct zcomp, node);
182 struct zcomp_strm *zstrm;
184 zstrm = *per_cpu_ptr(comp->stream, cpu);
185 if (!IS_ERR_OR_NULL(zstrm))
186 zcomp_strm_free(zstrm);
187 *per_cpu_ptr(comp->stream, cpu) = NULL;
191 static int zcomp_init(struct zcomp *comp)
195 comp->stream = alloc_percpu(struct zcomp_strm *);
199 ret = cpuhp_state_add_instance(CPUHP_ZCOMP_PREPARE, &comp->node);
205 free_percpu(comp->stream);
209 void zcomp_destroy(struct zcomp *comp)
211 cpuhp_state_remove_instance(CPUHP_ZCOMP_PREPARE, &comp->node);
212 free_percpu(comp->stream);
217 * search available compressors for requested algorithm.
218 * allocate new zcomp and initialize it. return compressing
219 * backend pointer or ERR_PTR if things went bad. ERR_PTR(-EINVAL)
220 * if requested algorithm is not supported, ERR_PTR(-ENOMEM) in
221 * case of allocation error, or any other error potentially
222 * returned by zcomp_init().
224 struct zcomp *zcomp_create(const char *compress)
229 if (!zcomp_available_algorithm(compress))
230 return ERR_PTR(-EINVAL);
232 comp = kzalloc(sizeof(struct zcomp), GFP_KERNEL);
234 return ERR_PTR(-ENOMEM);
236 comp->name = compress;
237 error = zcomp_init(comp);
240 return ERR_PTR(error);