From bf3afd5f419a2054bf03d963bdd223fbb27b72d2 Mon Sep 17 00:00:00 2001 From: "Emilio G. Cota" Date: Wed, 8 Jun 2016 14:55:26 -0400 Subject: qdist: add module to represent frequency distributions of data MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Sometimes it is useful to have a quick histogram to represent a certain distribution -- for example, when investigating a performance regression in a hash table due to inadequate hashing. The appended allows us to easily represent a distribution using Unicode characters. Further, the data structure keeping track of the distribution is so simple that obtaining its values for off-line processing is trivial. Example, taking the last 10 commits to QEMU: Characters in commit title Count ----------------------------------- 39 1 48 1 53 1 54 2 57 1 61 1 67 1 78 1 80 1 qdist_init(&dist); qdist_inc(&dist, 39); [...] qdist_inc(&dist, 80); char *str = qdist_pr(&dist, 9, QDIST_PR_LABELS); // -> [39.0,43.6)▂▂ █▂ ▂ ▄[75.4,80.0] g_free(str); char *str = qdist_pr(&dist, 4, QDIST_PR_LABELS); // -> [39.0,49.2)▁█▁▁[69.8,80.0] g_free(str); Reviewed-by: Richard Henderson Signed-off-by: Emilio G. Cota Message-Id: <1465412133-3029-9-git-send-email-cota@braap.org> Signed-off-by: Richard Henderson --- util/Makefile.objs | 1 + util/qdist.c | 395 +++++++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 396 insertions(+) create mode 100644 util/qdist.c (limited to 'util') diff --git a/util/Makefile.objs b/util/Makefile.objs index a8a777ec40..702435e839 100644 --- a/util/Makefile.objs +++ b/util/Makefile.objs @@ -32,3 +32,4 @@ util-obj-y += buffer.o util-obj-y += timed-average.o util-obj-y += base64.o util-obj-y += log.o +util-obj-y += qdist.o diff --git a/util/qdist.c b/util/qdist.c new file mode 100644 index 0000000000..4ea2e34fc2 --- /dev/null +++ b/util/qdist.c @@ -0,0 +1,395 @@ +/* + * qdist.c - QEMU helpers for handling frequency distributions of data. + * + * Copyright (C) 2016, Emilio G. Cota + * + * License: GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ +#include "qemu/qdist.h" + +#include +#ifndef NAN +#define NAN (0.0 / 0.0) +#endif + +void qdist_init(struct qdist *dist) +{ + dist->entries = g_malloc(sizeof(*dist->entries)); + dist->size = 1; + dist->n = 0; +} + +void qdist_destroy(struct qdist *dist) +{ + g_free(dist->entries); +} + +static inline int qdist_cmp_double(double a, double b) +{ + if (a > b) { + return 1; + } else if (a < b) { + return -1; + } + return 0; +} + +static int qdist_cmp(const void *ap, const void *bp) +{ + const struct qdist_entry *a = ap; + const struct qdist_entry *b = bp; + + return qdist_cmp_double(a->x, b->x); +} + +void qdist_add(struct qdist *dist, double x, long count) +{ + struct qdist_entry *entry = NULL; + + if (dist->n) { + struct qdist_entry e; + + e.x = x; + entry = bsearch(&e, dist->entries, dist->n, sizeof(e), qdist_cmp); + } + + if (entry) { + entry->count += count; + return; + } + + if (unlikely(dist->n == dist->size)) { + dist->size *= 2; + dist->entries = g_realloc(dist->entries, + sizeof(*dist->entries) * (dist->size)); + } + dist->n++; + entry = &dist->entries[dist->n - 1]; + entry->x = x; + entry->count = count; + qsort(dist->entries, dist->n, sizeof(*entry), qdist_cmp); +} + +void qdist_inc(struct qdist *dist, double x) +{ + qdist_add(dist, x, 1); +} + +/* + * Unicode for block elements. See: + * https://en.wikipedia.org/wiki/Block_Elements + */ +static const gunichar qdist_blocks[] = { + 0x2581, + 0x2582, + 0x2583, + 0x2584, + 0x2585, + 0x2586, + 0x2587, + 0x2588 +}; + +#define QDIST_NR_BLOCK_CODES ARRAY_SIZE(qdist_blocks) + +/* + * Print a distribution into a string. + * + * This function assumes that appropriate binning has been done on the input; + * see qdist_bin__internal() and qdist_pr_plain(). + * + * Callers must free the returned string with g_free(). + */ +static char *qdist_pr_internal(const struct qdist *dist) +{ + double min, max; + GString *s = g_string_new(""); + size_t i; + + /* if only one entry, its printout will be either full or empty */ + if (dist->n == 1) { + if (dist->entries[0].count) { + g_string_append_unichar(s, qdist_blocks[QDIST_NR_BLOCK_CODES - 1]); + } else { + g_string_append_c(s, ' '); + } + goto out; + } + + /* get min and max counts */ + min = dist->entries[0].count; + max = min; + for (i = 0; i < dist->n; i++) { + struct qdist_entry *e = &dist->entries[i]; + + if (e->count < min) { + min = e->count; + } + if (e->count > max) { + max = e->count; + } + } + + for (i = 0; i < dist->n; i++) { + struct qdist_entry *e = &dist->entries[i]; + int index; + + /* make an exception with 0; instead of using block[0], print a space */ + if (e->count) { + /* divide first to avoid loss of precision when e->count == max */ + index = (e->count - min) / (max - min) * (QDIST_NR_BLOCK_CODES - 1); + g_string_append_unichar(s, qdist_blocks[index]); + } else { + g_string_append_c(s, ' '); + } + } + out: + return g_string_free(s, FALSE); +} + +/* + * Bin the distribution in @from into @n bins of consecutive, non-overlapping + * intervals, copying the result to @to. + * + * This function is internal to qdist: only this file and test code should + * ever call it. + * + * Note: calling this function on an already-binned qdist is a bug. + * + * If @n == 0 or @from->n == 1, use @from->n. + */ +void qdist_bin__internal(struct qdist *to, const struct qdist *from, size_t n) +{ + double xmin, xmax; + double step; + size_t i, j; + + qdist_init(to); + + if (from->n == 0) { + return; + } + if (n == 0 || from->n == 1) { + n = from->n; + } + + /* set equally-sized bins between @from's left and right */ + xmin = qdist_xmin(from); + xmax = qdist_xmax(from); + step = (xmax - xmin) / n; + + if (n == from->n) { + /* if @from's entries are equally spaced, no need to re-bin */ + for (i = 0; i < from->n; i++) { + if (from->entries[i].x != xmin + i * step) { + goto rebin; + } + } + /* they're equally spaced, so copy the dist and bail out */ + to->entries = g_new(struct qdist_entry, from->n); + to->n = from->n; + memcpy(to->entries, from->entries, sizeof(*to->entries) * to->n); + return; + } + + rebin: + j = 0; + for (i = 0; i < n; i++) { + double x; + double left, right; + + left = xmin + i * step; + right = xmin + (i + 1) * step; + + /* Add x, even if it might not get any counts later */ + x = left; + qdist_add(to, x, 0); + + /* + * To avoid double-counting we capture [left, right) ranges, except for + * the righmost bin, which captures a [left, right] range. + */ + while (j < from->n && (from->entries[j].x < right || i == n - 1)) { + struct qdist_entry *o = &from->entries[j]; + + qdist_add(to, x, o->count); + j++; + } + } +} + +/* + * Print @dist into a string, after re-binning it into @n bins of consecutive, + * non-overlapping intervals. + * + * If @n == 0, use @orig->n. + * + * Callers must free the returned string with g_free(). + */ +char *qdist_pr_plain(const struct qdist *dist, size_t n) +{ + struct qdist binned; + char *ret; + + if (dist->n == 0) { + return NULL; + } + qdist_bin__internal(&binned, dist, n); + ret = qdist_pr_internal(&binned); + qdist_destroy(&binned); + return ret; +} + +static char *qdist_pr_label(const struct qdist *dist, size_t n_bins, + uint32_t opt, bool is_left) +{ + const char *percent; + const char *lparen; + const char *rparen; + GString *s; + double x1, x2, step; + double x; + double n; + int dec; + + s = g_string_new(""); + if (!(opt & QDIST_PR_LABELS)) { + goto out; + } + + dec = opt & QDIST_PR_NODECIMAL ? 0 : 1; + percent = opt & QDIST_PR_PERCENT ? "%" : ""; + + n = n_bins ? n_bins : dist->n; + x = is_left ? qdist_xmin(dist) : qdist_xmax(dist); + step = (qdist_xmax(dist) - qdist_xmin(dist)) / n; + + if (opt & QDIST_PR_100X) { + x *= 100.0; + step *= 100.0; + } + if (opt & QDIST_PR_NOBINRANGE) { + lparen = rparen = ""; + x1 = x; + x2 = x; /* unnecessary, but a dumb compiler might not figure it out */ + } else { + lparen = "["; + rparen = is_left ? ")" : "]"; + if (is_left) { + x1 = x; + x2 = x + step; + } else { + x1 = x - step; + x2 = x; + } + } + g_string_append_printf(s, "%s%.*f", lparen, dec, x1); + if (!(opt & QDIST_PR_NOBINRANGE)) { + g_string_append_printf(s, ",%.*f%s", dec, x2, rparen); + } + g_string_append(s, percent); + out: + return g_string_free(s, FALSE); +} + +/* + * Print the distribution's histogram into a string. + * + * See also: qdist_pr_plain(). + * + * Callers must free the returned string with g_free(). + */ +char *qdist_pr(const struct qdist *dist, size_t n_bins, uint32_t opt) +{ + const char *border = opt & QDIST_PR_BORDER ? "|" : ""; + char *llabel, *rlabel; + char *hgram; + GString *s; + + if (dist->n == 0) { + return NULL; + } + + s = g_string_new(""); + + llabel = qdist_pr_label(dist, n_bins, opt, true); + rlabel = qdist_pr_label(dist, n_bins, opt, false); + hgram = qdist_pr_plain(dist, n_bins); + g_string_append_printf(s, "%s%s%s%s%s", + llabel, border, hgram, border, rlabel); + g_free(llabel); + g_free(rlabel); + g_free(hgram); + + return g_string_free(s, FALSE); +} + +static inline double qdist_x(const struct qdist *dist, int index) +{ + if (dist->n == 0) { + return NAN; + } + return dist->entries[index].x; +} + +double qdist_xmin(const struct qdist *dist) +{ + return qdist_x(dist, 0); +} + +double qdist_xmax(const struct qdist *dist) +{ + return qdist_x(dist, dist->n - 1); +} + +size_t qdist_unique_entries(const struct qdist *dist) +{ + return dist->n; +} + +unsigned long qdist_sample_count(const struct qdist *dist) +{ + unsigned long count = 0; + size_t i; + + for (i = 0; i < dist->n; i++) { + struct qdist_entry *e = &dist->entries[i]; + + count += e->count; + } + return count; +} + +static double qdist_pairwise_avg(const struct qdist *dist, size_t index, + size_t n, unsigned long count) +{ + /* amortize the recursion by using a base case > 2 */ + if (n <= 8) { + size_t i; + double ret = 0; + + for (i = 0; i < n; i++) { + struct qdist_entry *e = &dist->entries[index + i]; + + ret += e->x * e->count / count; + } + return ret; + } else { + size_t n2 = n / 2; + + return qdist_pairwise_avg(dist, index, n2, count) + + qdist_pairwise_avg(dist, index + n2, n - n2, count); + } +} + +double qdist_avg(const struct qdist *dist) +{ + unsigned long count; + + count = qdist_sample_count(dist); + if (!count) { + return NAN; + } + return qdist_pairwise_avg(dist, 0, dist->n, count); +} -- cgit v1.2.1 From 2e11264aafe476c7a53accde4a23cfc2395a02fd Mon Sep 17 00:00:00 2001 From: "Emilio G. Cota" Date: Wed, 8 Jun 2016 14:55:28 -0400 Subject: qht: QEMU's fast, resizable and scalable Hash Table This is a fast, scalable chained hash table with optional auto-resizing, allowing reads that are concurrent with reads, and reads/writes that are concurrent with writes to separate buckets. A hash table with these features will be necessary for the scalability of the ongoing MTTCG work; before those changes arrive we can already benefit from the single-threaded speedup that qht also provides. Signed-off-by: Emilio G. Cota Message-Id: <1465412133-3029-11-git-send-email-cota@braap.org> Signed-off-by: Richard Henderson --- util/Makefile.objs | 1 + util/qht.c | 833 +++++++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 834 insertions(+) create mode 100644 util/qht.c (limited to 'util') diff --git a/util/Makefile.objs b/util/Makefile.objs index 702435e839..45f8794864 100644 --- a/util/Makefile.objs +++ b/util/Makefile.objs @@ -33,3 +33,4 @@ util-obj-y += timed-average.o util-obj-y += base64.o util-obj-y += log.o util-obj-y += qdist.o +util-obj-y += qht.o diff --git a/util/qht.c b/util/qht.c new file mode 100644 index 0000000000..6f749098f4 --- /dev/null +++ b/util/qht.c @@ -0,0 +1,833 @@ +/* + * qht.c - QEMU Hash Table, designed to scale for read-mostly workloads. + * + * Copyright (C) 2016, Emilio G. Cota + * + * License: GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + * Assumptions: + * - NULL cannot be inserted/removed as a pointer value. + * - Trying to insert an already-existing hash-pointer pair is OK. However, + * it is not OK to insert into the same hash table different hash-pointer + * pairs that have the same pointer value, but not the hashes. + * - Lookups are performed under an RCU read-critical section; removals + * must wait for a grace period to elapse before freeing removed objects. + * + * Features: + * - Reads (i.e. lookups and iterators) can be concurrent with other reads. + * Lookups that are concurrent with writes to the same bucket will retry + * via a seqlock; iterators acquire all bucket locks and therefore can be + * concurrent with lookups and are serialized wrt writers. + * - Writes (i.e. insertions/removals) can be concurrent with writes to + * different buckets; writes to the same bucket are serialized through a lock. + * - Optional auto-resizing: the hash table resizes up if the load surpasses + * a certain threshold. Resizing is done concurrently with readers; writes + * are serialized with the resize operation. + * + * The key structure is the bucket, which is cacheline-sized. Buckets + * contain a few hash values and pointers; the u32 hash values are stored in + * full so that resizing is fast. Having this structure instead of directly + * chaining items has two advantages: + * - Failed lookups fail fast, and touch a minimum number of cache lines. + * - Resizing the hash table with concurrent lookups is easy. + * + * There are two types of buckets: + * 1. "head" buckets are the ones allocated in the array of buckets in qht_map. + * 2. all "non-head" buckets (i.e. all others) are members of a chain that + * starts from a head bucket. + * Note that the seqlock and spinlock of a head bucket applies to all buckets + * chained to it; these two fields are unused in non-head buckets. + * + * On removals, we move the last valid item in the chain to the position of the + * just-removed entry. This makes lookups slightly faster, since the moment an + * invalid entry is found, the (failed) lookup is over. + * + * Resizing is done by taking all bucket spinlocks (so that no other writers can + * race with us) and then copying all entries into a new hash map. Then, the + * ht->map pointer is set, and the old map is freed once no RCU readers can see + * it anymore. + * + * Writers check for concurrent resizes by comparing ht->map before and after + * acquiring their bucket lock. If they don't match, a resize has occured + * while the bucket spinlock was being acquired. + * + * Related Work: + * - Idea of cacheline-sized buckets with full hashes taken from: + * David, Guerraoui & Trigonakis, "Asynchronized Concurrency: + * The Secret to Scaling Concurrent Search Data Structures", ASPLOS'15. + * - Why not RCU-based hash tables? They would allow us to get rid of the + * seqlock, but resizing would take forever since RCU read critical + * sections in QEMU take quite a long time. + * More info on relativistic hash tables: + * + Triplett, McKenney & Walpole, "Resizable, Scalable, Concurrent Hash + * Tables via Relativistic Programming", USENIX ATC'11. + * + Corbet, "Relativistic hash tables, part 1: Algorithms", @ lwn.net, 2014. + * https://lwn.net/Articles/612021/ + */ +#include "qemu/qht.h" +#include "qemu/atomic.h" +#include "qemu/rcu.h" + +//#define QHT_DEBUG + +/* + * We want to avoid false sharing of cache lines. Most systems have 64-byte + * cache lines so we go with it for simplicity. + * + * Note that systems with smaller cache lines will be fine (the struct is + * almost 64-bytes); systems with larger cache lines might suffer from + * some false sharing. + */ +#define QHT_BUCKET_ALIGN 64 + +/* define these to keep sizeof(qht_bucket) within QHT_BUCKET_ALIGN */ +#if HOST_LONG_BITS == 32 +#define QHT_BUCKET_ENTRIES 6 +#else /* 64-bit */ +#define QHT_BUCKET_ENTRIES 4 +#endif + +/* + * Note: reading partially-updated pointers in @pointers could lead to + * segfaults. We thus access them with atomic_read/set; this guarantees + * that the compiler makes all those accesses atomic. We also need the + * volatile-like behavior in atomic_read, since otherwise the compiler + * might refetch the pointer. + * atomic_read's are of course not necessary when the bucket lock is held. + * + * If both ht->lock and b->lock are grabbed, ht->lock should always + * be grabbed first. + */ +struct qht_bucket { + QemuSpin lock; + QemuSeqLock sequence; + uint32_t hashes[QHT_BUCKET_ENTRIES]; + void *pointers[QHT_BUCKET_ENTRIES]; + struct qht_bucket *next; +} QEMU_ALIGNED(QHT_BUCKET_ALIGN); + +QEMU_BUILD_BUG_ON(sizeof(struct qht_bucket) > QHT_BUCKET_ALIGN); + +/** + * struct qht_map - structure to track an array of buckets + * @rcu: used by RCU. Keep it as the top field in the struct to help valgrind + * find the whole struct. + * @buckets: array of head buckets. It is constant once the map is created. + * @n_buckets: number of head buckets. It is constant once the map is created. + * @n_added_buckets: number of added (i.e. "non-head") buckets + * @n_added_buckets_threshold: threshold to trigger an upward resize once the + * number of added buckets surpasses it. + * + * Buckets are tracked in what we call a "map", i.e. this structure. + */ +struct qht_map { + struct rcu_head rcu; + struct qht_bucket *buckets; + size_t n_buckets; + size_t n_added_buckets; + size_t n_added_buckets_threshold; +}; + +/* trigger a resize when n_added_buckets > n_buckets / div */ +#define QHT_NR_ADDED_BUCKETS_THRESHOLD_DIV 8 + +static void qht_do_resize(struct qht *ht, struct qht_map *new); +static void qht_grow_maybe(struct qht *ht); + +#ifdef QHT_DEBUG + +#define qht_debug_assert(X) do { assert(X); } while (0) + +static void qht_bucket_debug__locked(struct qht_bucket *b) +{ + bool seen_empty = false; + bool corrupt = false; + int i; + + do { + for (i = 0; i < QHT_BUCKET_ENTRIES; i++) { + if (b->pointers[i] == NULL) { + seen_empty = true; + continue; + } + if (seen_empty) { + fprintf(stderr, "%s: b: %p, pos: %i, hash: 0x%x, p: %p\n", + __func__, b, i, b->hashes[i], b->pointers[i]); + corrupt = true; + } + } + b = b->next; + } while (b); + qht_debug_assert(!corrupt); +} + +static void qht_map_debug__all_locked(struct qht_map *map) +{ + int i; + + for (i = 0; i < map->n_buckets; i++) { + qht_bucket_debug__locked(&map->buckets[i]); + } +} +#else + +#define qht_debug_assert(X) do { (void)(X); } while (0) + +static inline void qht_bucket_debug__locked(struct qht_bucket *b) +{ } + +static inline void qht_map_debug__all_locked(struct qht_map *map) +{ } +#endif /* QHT_DEBUG */ + +static inline size_t qht_elems_to_buckets(size_t n_elems) +{ + return pow2ceil(n_elems / QHT_BUCKET_ENTRIES); +} + +static inline void qht_head_init(struct qht_bucket *b) +{ + memset(b, 0, sizeof(*b)); + qemu_spin_init(&b->lock); + seqlock_init(&b->sequence); +} + +static inline +struct qht_bucket *qht_map_to_bucket(struct qht_map *map, uint32_t hash) +{ + return &map->buckets[hash & (map->n_buckets - 1)]; +} + +/* acquire all bucket locks from a map */ +static void qht_map_lock_buckets(struct qht_map *map) +{ + size_t i; + + for (i = 0; i < map->n_buckets; i++) { + struct qht_bucket *b = &map->buckets[i]; + + qemu_spin_lock(&b->lock); + } +} + +static void qht_map_unlock_buckets(struct qht_map *map) +{ + size_t i; + + for (i = 0; i < map->n_buckets; i++) { + struct qht_bucket *b = &map->buckets[i]; + + qemu_spin_unlock(&b->lock); + } +} + +/* + * Call with at least a bucket lock held. + * @map should be the value read before acquiring the lock (or locks). + */ +static inline bool qht_map_is_stale__locked(struct qht *ht, struct qht_map *map) +{ + return map != ht->map; +} + +/* + * Grab all bucket locks, and set @pmap after making sure the map isn't stale. + * + * Pairs with qht_map_unlock_buckets(), hence the pass-by-reference. + * + * Note: callers cannot have ht->lock held. + */ +static inline +void qht_map_lock_buckets__no_stale(struct qht *ht, struct qht_map **pmap) +{ + struct qht_map *map; + + map = atomic_rcu_read(&ht->map); + qht_map_lock_buckets(map); + if (likely(!qht_map_is_stale__locked(ht, map))) { + *pmap = map; + return; + } + qht_map_unlock_buckets(map); + + /* we raced with a resize; acquire ht->lock to see the updated ht->map */ + qemu_mutex_lock(&ht->lock); + map = ht->map; + qht_map_lock_buckets(map); + qemu_mutex_unlock(&ht->lock); + *pmap = map; + return; +} + +/* + * Get a head bucket and lock it, making sure its parent map is not stale. + * @pmap is filled with a pointer to the bucket's parent map. + * + * Unlock with qemu_spin_unlock(&b->lock). + * + * Note: callers cannot have ht->lock held. + */ +static inline +struct qht_bucket *qht_bucket_lock__no_stale(struct qht *ht, uint32_t hash, + struct qht_map **pmap) +{ + struct qht_bucket *b; + struct qht_map *map; + + map = atomic_rcu_read(&ht->map); + b = qht_map_to_bucket(map, hash); + + qemu_spin_lock(&b->lock); + if (likely(!qht_map_is_stale__locked(ht, map))) { + *pmap = map; + return b; + } + qemu_spin_unlock(&b->lock); + + /* we raced with a resize; acquire ht->lock to see the updated ht->map */ + qemu_mutex_lock(&ht->lock); + map = ht->map; + b = qht_map_to_bucket(map, hash); + qemu_spin_lock(&b->lock); + qemu_mutex_unlock(&ht->lock); + *pmap = map; + return b; +} + +static inline bool qht_map_needs_resize(struct qht_map *map) +{ + return atomic_read(&map->n_added_buckets) > map->n_added_buckets_threshold; +} + +static inline void qht_chain_destroy(struct qht_bucket *head) +{ + struct qht_bucket *curr = head->next; + struct qht_bucket *prev; + + while (curr) { + prev = curr; + curr = curr->next; + qemu_vfree(prev); + } +} + +/* pass only an orphan map */ +static void qht_map_destroy(struct qht_map *map) +{ + size_t i; + + for (i = 0; i < map->n_buckets; i++) { + qht_chain_destroy(&map->buckets[i]); + } + qemu_vfree(map->buckets); + g_free(map); +} + +static struct qht_map *qht_map_create(size_t n_buckets) +{ + struct qht_map *map; + size_t i; + + map = g_malloc(sizeof(*map)); + map->n_buckets = n_buckets; + + map->n_added_buckets = 0; + map->n_added_buckets_threshold = n_buckets / + QHT_NR_ADDED_BUCKETS_THRESHOLD_DIV; + + /* let tiny hash tables to at least add one non-head bucket */ + if (unlikely(map->n_added_buckets_threshold == 0)) { + map->n_added_buckets_threshold = 1; + } + + map->buckets = qemu_memalign(QHT_BUCKET_ALIGN, + sizeof(*map->buckets) * n_buckets); + for (i = 0; i < n_buckets; i++) { + qht_head_init(&map->buckets[i]); + } + return map; +} + +void qht_init(struct qht *ht, size_t n_elems, unsigned int mode) +{ + struct qht_map *map; + size_t n_buckets = qht_elems_to_buckets(n_elems); + + ht->mode = mode; + qemu_mutex_init(&ht->lock); + map = qht_map_create(n_buckets); + atomic_rcu_set(&ht->map, map); +} + +/* call only when there are no readers/writers left */ +void qht_destroy(struct qht *ht) +{ + qht_map_destroy(ht->map); + memset(ht, 0, sizeof(*ht)); +} + +static void qht_bucket_reset__locked(struct qht_bucket *head) +{ + struct qht_bucket *b = head; + int i; + + seqlock_write_begin(&head->sequence); + do { + for (i = 0; i < QHT_BUCKET_ENTRIES; i++) { + if (b->pointers[i] == NULL) { + goto done; + } + b->hashes[i] = 0; + atomic_set(&b->pointers[i], NULL); + } + b = b->next; + } while (b); + done: + seqlock_write_end(&head->sequence); +} + +/* call with all bucket locks held */ +static void qht_map_reset__all_locked(struct qht_map *map) +{ + size_t i; + + for (i = 0; i < map->n_buckets; i++) { + qht_bucket_reset__locked(&map->buckets[i]); + } + qht_map_debug__all_locked(map); +} + +void qht_reset(struct qht *ht) +{ + struct qht_map *map; + + qht_map_lock_buckets__no_stale(ht, &map); + qht_map_reset__all_locked(map); + qht_map_unlock_buckets(map); +} + +bool qht_reset_size(struct qht *ht, size_t n_elems) +{ + struct qht_map *new; + struct qht_map *map; + size_t n_buckets; + bool resize = false; + + n_buckets = qht_elems_to_buckets(n_elems); + + qemu_mutex_lock(&ht->lock); + map = ht->map; + if (n_buckets != map->n_buckets) { + new = qht_map_create(n_buckets); + resize = true; + } + + qht_map_lock_buckets(map); + qht_map_reset__all_locked(map); + if (resize) { + qht_do_resize(ht, new); + } + qht_map_unlock_buckets(map); + qemu_mutex_unlock(&ht->lock); + + return resize; +} + +static inline +void *qht_do_lookup(struct qht_bucket *head, qht_lookup_func_t func, + const void *userp, uint32_t hash) +{ + struct qht_bucket *b = head; + int i; + + do { + for (i = 0; i < QHT_BUCKET_ENTRIES; i++) { + if (b->hashes[i] == hash) { + void *p = atomic_read(&b->pointers[i]); + + if (likely(p) && likely(func(p, userp))) { + return p; + } + } + } + b = atomic_rcu_read(&b->next); + } while (b); + + return NULL; +} + +static __attribute__((noinline)) +void *qht_lookup__slowpath(struct qht_bucket *b, qht_lookup_func_t func, + const void *userp, uint32_t hash) +{ + unsigned int version; + void *ret; + + do { + version = seqlock_read_begin(&b->sequence); + ret = qht_do_lookup(b, func, userp, hash); + } while (seqlock_read_retry(&b->sequence, version)); + return ret; +} + +void *qht_lookup(struct qht *ht, qht_lookup_func_t func, const void *userp, + uint32_t hash) +{ + struct qht_bucket *b; + struct qht_map *map; + unsigned int version; + void *ret; + + map = atomic_rcu_read(&ht->map); + b = qht_map_to_bucket(map, hash); + + version = seqlock_read_begin(&b->sequence); + ret = qht_do_lookup(b, func, userp, hash); + if (likely(!seqlock_read_retry(&b->sequence, version))) { + return ret; + } + /* + * Removing the do/while from the fastpath gives a 4% perf. increase when + * running a 100%-lookup microbenchmark. + */ + return qht_lookup__slowpath(b, func, userp, hash); +} + +/* call with head->lock held */ +static bool qht_insert__locked(struct qht *ht, struct qht_map *map, + struct qht_bucket *head, void *p, uint32_t hash, + bool *needs_resize) +{ + struct qht_bucket *b = head; + struct qht_bucket *prev = NULL; + struct qht_bucket *new = NULL; + int i; + + do { + for (i = 0; i < QHT_BUCKET_ENTRIES; i++) { + if (b->pointers[i]) { + if (unlikely(b->pointers[i] == p)) { + return false; + } + } else { + goto found; + } + } + prev = b; + b = b->next; + } while (b); + + b = qemu_memalign(QHT_BUCKET_ALIGN, sizeof(*b)); + memset(b, 0, sizeof(*b)); + new = b; + i = 0; + atomic_inc(&map->n_added_buckets); + if (unlikely(qht_map_needs_resize(map)) && needs_resize) { + *needs_resize = true; + } + + found: + /* found an empty key: acquire the seqlock and write */ + seqlock_write_begin(&head->sequence); + if (new) { + atomic_rcu_set(&prev->next, b); + } + b->hashes[i] = hash; + atomic_set(&b->pointers[i], p); + seqlock_write_end(&head->sequence); + return true; +} + +static __attribute__((noinline)) void qht_grow_maybe(struct qht *ht) +{ + struct qht_map *map; + + /* + * If the lock is taken it probably means there's an ongoing resize, + * so bail out. + */ + if (qemu_mutex_trylock(&ht->lock)) { + return; + } + map = ht->map; + /* another thread might have just performed the resize we were after */ + if (qht_map_needs_resize(map)) { + struct qht_map *new = qht_map_create(map->n_buckets * 2); + + qht_map_lock_buckets(map); + qht_do_resize(ht, new); + qht_map_unlock_buckets(map); + } + qemu_mutex_unlock(&ht->lock); +} + +bool qht_insert(struct qht *ht, void *p, uint32_t hash) +{ + struct qht_bucket *b; + struct qht_map *map; + bool needs_resize = false; + bool ret; + + /* NULL pointers are not supported */ + qht_debug_assert(p); + + b = qht_bucket_lock__no_stale(ht, hash, &map); + ret = qht_insert__locked(ht, map, b, p, hash, &needs_resize); + qht_bucket_debug__locked(b); + qemu_spin_unlock(&b->lock); + + if (unlikely(needs_resize) && ht->mode & QHT_MODE_AUTO_RESIZE) { + qht_grow_maybe(ht); + } + return ret; +} + +static inline bool qht_entry_is_last(struct qht_bucket *b, int pos) +{ + if (pos == QHT_BUCKET_ENTRIES - 1) { + if (b->next == NULL) { + return true; + } + return b->next->pointers[0] == NULL; + } + return b->pointers[pos + 1] == NULL; +} + +static void +qht_entry_move(struct qht_bucket *to, int i, struct qht_bucket *from, int j) +{ + qht_debug_assert(!(to == from && i == j)); + qht_debug_assert(to->pointers[i]); + qht_debug_assert(from->pointers[j]); + + to->hashes[i] = from->hashes[j]; + atomic_set(&to->pointers[i], from->pointers[j]); + + from->hashes[j] = 0; + atomic_set(&from->pointers[j], NULL); +} + +/* + * Find the last valid entry in @head, and swap it with @orig[pos], which has + * just been invalidated. + */ +static inline void qht_bucket_remove_entry(struct qht_bucket *orig, int pos) +{ + struct qht_bucket *b = orig; + struct qht_bucket *prev = NULL; + int i; + + if (qht_entry_is_last(orig, pos)) { + orig->hashes[pos] = 0; + atomic_set(&orig->pointers[pos], NULL); + return; + } + do { + for (i = 0; i < QHT_BUCKET_ENTRIES; i++) { + if (b->pointers[i]) { + continue; + } + if (i > 0) { + return qht_entry_move(orig, pos, b, i - 1); + } + qht_debug_assert(prev); + return qht_entry_move(orig, pos, prev, QHT_BUCKET_ENTRIES - 1); + } + prev = b; + b = b->next; + } while (b); + /* no free entries other than orig[pos], so swap it with the last one */ + qht_entry_move(orig, pos, prev, QHT_BUCKET_ENTRIES - 1); +} + +/* call with b->lock held */ +static inline +bool qht_remove__locked(struct qht_map *map, struct qht_bucket *head, + const void *p, uint32_t hash) +{ + struct qht_bucket *b = head; + int i; + + do { + for (i = 0; i < QHT_BUCKET_ENTRIES; i++) { + void *q = b->pointers[i]; + + if (unlikely(q == NULL)) { + return false; + } + if (q == p) { + qht_debug_assert(b->hashes[i] == hash); + seqlock_write_begin(&head->sequence); + qht_bucket_remove_entry(b, i); + seqlock_write_end(&head->sequence); + return true; + } + } + b = b->next; + } while (b); + return false; +} + +bool qht_remove(struct qht *ht, const void *p, uint32_t hash) +{ + struct qht_bucket *b; + struct qht_map *map; + bool ret; + + /* NULL pointers are not supported */ + qht_debug_assert(p); + + b = qht_bucket_lock__no_stale(ht, hash, &map); + ret = qht_remove__locked(map, b, p, hash); + qht_bucket_debug__locked(b); + qemu_spin_unlock(&b->lock); + return ret; +} + +static inline void qht_bucket_iter(struct qht *ht, struct qht_bucket *b, + qht_iter_func_t func, void *userp) +{ + int i; + + do { + for (i = 0; i < QHT_BUCKET_ENTRIES; i++) { + if (b->pointers[i] == NULL) { + return; + } + func(ht, b->pointers[i], b->hashes[i], userp); + } + b = b->next; + } while (b); +} + +/* call with all of the map's locks held */ +static inline void qht_map_iter__all_locked(struct qht *ht, struct qht_map *map, + qht_iter_func_t func, void *userp) +{ + size_t i; + + for (i = 0; i < map->n_buckets; i++) { + qht_bucket_iter(ht, &map->buckets[i], func, userp); + } +} + +void qht_iter(struct qht *ht, qht_iter_func_t func, void *userp) +{ + struct qht_map *map; + + map = atomic_rcu_read(&ht->map); + qht_map_lock_buckets(map); + /* Note: ht here is merely for carrying ht->mode; ht->map won't be read */ + qht_map_iter__all_locked(ht, map, func, userp); + qht_map_unlock_buckets(map); +} + +static void qht_map_copy(struct qht *ht, void *p, uint32_t hash, void *userp) +{ + struct qht_map *new = userp; + struct qht_bucket *b = qht_map_to_bucket(new, hash); + + /* no need to acquire b->lock because no thread has seen this map yet */ + qht_insert__locked(ht, new, b, p, hash, NULL); +} + +/* + * Call with ht->lock and all bucket locks held. + * + * Creating the @new map here would add unnecessary delay while all the locks + * are held--holding up the bucket locks is particularly bad, since no writes + * can occur while these are held. Thus, we let callers create the new map, + * hopefully without the bucket locks held. + */ +static void qht_do_resize(struct qht *ht, struct qht_map *new) +{ + struct qht_map *old; + + old = ht->map; + g_assert_cmpuint(new->n_buckets, !=, old->n_buckets); + + qht_map_iter__all_locked(ht, old, qht_map_copy, new); + qht_map_debug__all_locked(new); + + atomic_rcu_set(&ht->map, new); + call_rcu(old, qht_map_destroy, rcu); +} + +bool qht_resize(struct qht *ht, size_t n_elems) +{ + size_t n_buckets = qht_elems_to_buckets(n_elems); + size_t ret = false; + + qemu_mutex_lock(&ht->lock); + if (n_buckets != ht->map->n_buckets) { + struct qht_map *new; + struct qht_map *old = ht->map; + + new = qht_map_create(n_buckets); + qht_map_lock_buckets(old); + qht_do_resize(ht, new); + qht_map_unlock_buckets(old); + ret = true; + } + qemu_mutex_unlock(&ht->lock); + + return ret; +} + +/* pass @stats to qht_statistics_destroy() when done */ +void qht_statistics_init(struct qht *ht, struct qht_stats *stats) +{ + struct qht_map *map; + int i; + + map = atomic_rcu_read(&ht->map); + + stats->head_buckets = map->n_buckets; + stats->used_head_buckets = 0; + stats->entries = 0; + qdist_init(&stats->chain); + qdist_init(&stats->occupancy); + + for (i = 0; i < map->n_buckets; i++) { + struct qht_bucket *head = &map->buckets[i]; + struct qht_bucket *b; + unsigned int version; + size_t buckets; + size_t entries; + int j; + + do { + version = seqlock_read_begin(&head->sequence); + buckets = 0; + entries = 0; + b = head; + do { + for (j = 0; j < QHT_BUCKET_ENTRIES; j++) { + if (atomic_read(&b->pointers[j]) == NULL) { + break; + } + entries++; + } + buckets++; + b = atomic_rcu_read(&b->next); + } while (b); + } while (seqlock_read_retry(&head->sequence, version)); + + if (entries) { + qdist_inc(&stats->chain, buckets); + qdist_inc(&stats->occupancy, + (double)entries / QHT_BUCKET_ENTRIES / buckets); + stats->used_head_buckets++; + stats->entries += entries; + } else { + qdist_inc(&stats->occupancy, 0); + } + } +} + +void qht_statistics_destroy(struct qht_stats *stats) +{ + qdist_destroy(&stats->occupancy); + qdist_destroy(&stats->chain); +} -- cgit v1.2.1