| 
									
										
										
										
											2013-03-23 16:11:31 -07:00
										 |  |  | /*
 | 
					
						
							|  |  |  |  * Primary bucket allocation code | 
					
						
							|  |  |  |  * | 
					
						
							|  |  |  |  * Copyright 2012 Google, Inc. | 
					
						
							|  |  |  |  * | 
					
						
							|  |  |  |  * Allocation in bcache is done in terms of buckets: | 
					
						
							|  |  |  |  * | 
					
						
							|  |  |  |  * Each bucket has associated an 8 bit gen; this gen corresponds to the gen in | 
					
						
							|  |  |  |  * btree pointers - they must match for the pointer to be considered valid. | 
					
						
							|  |  |  |  * | 
					
						
							|  |  |  |  * Thus (assuming a bucket has no dirty data or metadata in it) we can reuse a | 
					
						
							|  |  |  |  * bucket simply by incrementing its gen. | 
					
						
							|  |  |  |  * | 
					
						
							|  |  |  |  * The gens (along with the priorities; it's really the gens are important but | 
					
						
							|  |  |  |  * the code is named as if it's the priorities) are written in an arbitrary list | 
					
						
							|  |  |  |  * of buckets on disk, with a pointer to them in the journal header. | 
					
						
							|  |  |  |  * | 
					
						
							|  |  |  |  * When we invalidate a bucket, we have to write its new gen to disk and wait | 
					
						
							|  |  |  |  * for that write to complete before we use it - otherwise after a crash we | 
					
						
							|  |  |  |  * could have pointers that appeared to be good but pointed to data that had | 
					
						
							|  |  |  |  * been overwritten. | 
					
						
							|  |  |  |  * | 
					
						
							|  |  |  |  * Since the gens and priorities are all stored contiguously on disk, we can | 
					
						
							|  |  |  |  * batch this up: We fill up the free_inc list with freshly invalidated buckets, | 
					
						
							|  |  |  |  * call prio_write(), and when prio_write() finishes we pull buckets off the | 
					
						
							|  |  |  |  * free_inc list and optionally discard them. | 
					
						
							|  |  |  |  * | 
					
						
							|  |  |  |  * free_inc isn't the only freelist - if it was, we'd often to sleep while | 
					
						
							|  |  |  |  * priorities and gens were being written before we could allocate. c->free is a | 
					
						
							|  |  |  |  * smaller freelist, and buckets on that list are always ready to be used. | 
					
						
							|  |  |  |  * | 
					
						
							|  |  |  |  * If we've got discards enabled, that happens when a bucket moves from the | 
					
						
							|  |  |  |  * free_inc list to the free list. | 
					
						
							|  |  |  |  * | 
					
						
							|  |  |  |  * There is another freelist, because sometimes we have buckets that we know | 
					
						
							|  |  |  |  * have nothing pointing into them - these we can reuse without waiting for | 
					
						
							|  |  |  |  * priorities to be rewritten. These come from freed btree nodes and buckets | 
					
						
							|  |  |  |  * that garbage collection discovered no longer had valid keys pointing into | 
					
						
							|  |  |  |  * them (because they were overwritten). That's the unused list - buckets on the | 
					
						
							|  |  |  |  * unused list move to the free list, optionally being discarded in the process. | 
					
						
							|  |  |  |  * | 
					
						
							|  |  |  |  * It's also important to ensure that gens don't wrap around - with respect to | 
					
						
							|  |  |  |  * either the oldest gen in the btree or the gen on disk. This is quite | 
					
						
							|  |  |  |  * difficult to do in practice, but we explicitly guard against it anyways - if | 
					
						
							|  |  |  |  * a bucket is in danger of wrapping around we simply skip invalidating it that | 
					
						
							|  |  |  |  * time around, and we garbage collect or rewrite the priorities sooner than we | 
					
						
							|  |  |  |  * would have otherwise. | 
					
						
							|  |  |  |  * | 
					
						
							|  |  |  |  * bch_bucket_alloc() allocates a single bucket from a specific cache. | 
					
						
							|  |  |  |  * | 
					
						
							|  |  |  |  * bch_bucket_alloc_set() allocates one or more buckets from different caches | 
					
						
							|  |  |  |  * out of a cache set. | 
					
						
							|  |  |  |  * | 
					
						
							|  |  |  |  * free_some_buckets() drives all the processes described above. It's called | 
					
						
							|  |  |  |  * from bch_bucket_alloc() and a few other places that need to make sure free | 
					
						
							|  |  |  |  * buckets are ready. | 
					
						
							|  |  |  |  * | 
					
						
							|  |  |  |  * invalidate_buckets_(lru|fifo)() find buckets that are available to be | 
					
						
							|  |  |  |  * invalidated, and then invalidate them and stick them on the free_inc list - | 
					
						
							|  |  |  |  * in either lru or fifo order. | 
					
						
							|  |  |  |  */ | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | #include "bcache.h"
 | 
					
						
							|  |  |  | #include "btree.h"
 | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2013-07-24 17:16:09 -07:00
										 |  |  | #include <linux/blkdev.h>
 | 
					
						
							| 
									
										
										
										
											2013-07-10 18:31:58 -07:00
										 |  |  | #include <linux/freezer.h>
 | 
					
						
							| 
									
										
										
										
											2013-04-24 19:01:12 -07:00
										 |  |  | #include <linux/kthread.h>
 | 
					
						
							| 
									
										
										
										
											2013-03-23 16:11:31 -07:00
										 |  |  | #include <linux/random.h>
 | 
					
						
							| 
									
										
										
										
											2013-04-26 15:39:55 -07:00
										 |  |  | #include <trace/events/bcache.h>
 | 
					
						
							| 
									
										
										
										
											2013-03-23 16:11:31 -07:00
										 |  |  | 
 | 
					
						
							|  |  |  | /* Bucket heap / gen */ | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | uint8_t bch_inc_gen(struct cache *ca, struct bucket *b) | 
					
						
							|  |  |  | { | 
					
						
							|  |  |  | 	uint8_t ret = ++b->gen; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	ca->set->need_gc = max(ca->set->need_gc, bucket_gc_gen(b)); | 
					
						
							|  |  |  | 	WARN_ON_ONCE(ca->set->need_gc > BUCKET_GC_GEN_MAX); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	return ret; | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | void bch_rescale_priorities(struct cache_set *c, int sectors) | 
					
						
							|  |  |  | { | 
					
						
							|  |  |  | 	struct cache *ca; | 
					
						
							|  |  |  | 	struct bucket *b; | 
					
						
							|  |  |  | 	unsigned next = c->nbuckets * c->sb.bucket_size / 1024; | 
					
						
							|  |  |  | 	unsigned i; | 
					
						
							|  |  |  | 	int r; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	atomic_sub(sectors, &c->rescale); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	do { | 
					
						
							|  |  |  | 		r = atomic_read(&c->rescale); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		if (r >= 0) | 
					
						
							|  |  |  | 			return; | 
					
						
							|  |  |  | 	} while (atomic_cmpxchg(&c->rescale, r, r + next) != r); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	mutex_lock(&c->bucket_lock); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	c->min_prio = USHRT_MAX; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	for_each_cache(ca, c, i) | 
					
						
							|  |  |  | 		for_each_bucket(b, ca) | 
					
						
							|  |  |  | 			if (b->prio && | 
					
						
							|  |  |  | 			    b->prio != BTREE_PRIO && | 
					
						
							|  |  |  | 			    !atomic_read(&b->pin)) { | 
					
						
							|  |  |  | 				b->prio--; | 
					
						
							|  |  |  | 				c->min_prio = min(c->min_prio, b->prio); | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	mutex_unlock(&c->bucket_lock); | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2014-03-17 16:55:55 -07:00
										 |  |  | /*
 | 
					
						
							|  |  |  |  * Background allocation thread: scans for buckets to be invalidated, | 
					
						
							|  |  |  |  * invalidates them, rewrites prios/gens (marking them as invalidated on disk), | 
					
						
							|  |  |  |  * then optionally issues discard commands to the newly free buckets, then puts | 
					
						
							|  |  |  |  * them on the various freelists. | 
					
						
							|  |  |  |  */ | 
					
						
							| 
									
										
										
										
											2013-03-23 16:11:31 -07:00
										 |  |  | 
 | 
					
						
							|  |  |  | static inline bool can_inc_bucket_gen(struct bucket *b) | 
					
						
							|  |  |  | { | 
					
						
							| 
									
										
										
										
											2014-03-17 16:55:55 -07:00
										 |  |  | 	return bucket_gc_gen(b) < BUCKET_GC_GEN_MAX; | 
					
						
							| 
									
										
										
										
											2013-03-23 16:11:31 -07:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2014-03-17 16:55:55 -07:00
										 |  |  | bool bch_can_invalidate_bucket(struct cache *ca, struct bucket *b) | 
					
						
							| 
									
										
										
										
											2013-03-23 16:11:31 -07:00
										 |  |  | { | 
					
						
							| 
									
										
										
										
											2014-03-17 16:55:55 -07:00
										 |  |  | 	BUG_ON(!ca->set->gc_mark_valid); | 
					
						
							| 
									
										
										
										
											2013-03-23 16:11:31 -07:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2014-03-13 13:46:29 -07:00
										 |  |  | 	return (!GC_MARK(b) || | 
					
						
							|  |  |  | 		GC_MARK(b) == GC_MARK_RECLAIMABLE) && | 
					
						
							| 
									
										
										
										
											2013-03-23 16:11:31 -07:00
										 |  |  | 		!atomic_read(&b->pin) && | 
					
						
							|  |  |  | 		can_inc_bucket_gen(b); | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2014-03-17 16:55:55 -07:00
										 |  |  | void __bch_invalidate_one_bucket(struct cache *ca, struct bucket *b) | 
					
						
							| 
									
										
										
										
											2013-03-23 16:11:31 -07:00
										 |  |  | { | 
					
						
							| 
									
										
										
										
											2014-03-17 16:55:55 -07:00
										 |  |  | 	lockdep_assert_held(&ca->set->bucket_lock); | 
					
						
							|  |  |  | 	BUG_ON(GC_MARK(b) && GC_MARK(b) != GC_MARK_RECLAIMABLE); | 
					
						
							| 
									
										
										
										
											2014-02-12 18:43:32 -08:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	if (GC_SECTORS_USED(b)) | 
					
						
							| 
									
										
										
										
											2014-03-17 16:55:55 -07:00
										 |  |  | 		trace_bcache_invalidate(ca, b - ca->buckets); | 
					
						
							| 
									
										
										
										
											2014-02-12 18:43:32 -08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2013-03-23 16:11:31 -07:00
										 |  |  | 	bch_inc_gen(ca, b); | 
					
						
							|  |  |  | 	b->prio = INITIAL_PRIO; | 
					
						
							|  |  |  | 	atomic_inc(&b->pin); | 
					
						
							| 
									
										
										
										
											2014-03-17 16:55:55 -07:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | static void bch_invalidate_one_bucket(struct cache *ca, struct bucket *b) | 
					
						
							|  |  |  | { | 
					
						
							|  |  |  | 	__bch_invalidate_one_bucket(ca, b); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	fifo_push(&ca->free_inc, b - ca->buckets); | 
					
						
							| 
									
										
										
										
											2013-03-23 16:11:31 -07:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2013-11-12 13:49:10 -08:00
										 |  |  | /*
 | 
					
						
							|  |  |  |  * Determines what order we're going to reuse buckets, smallest bucket_prio() | 
					
						
							|  |  |  |  * first: we also take into account the number of sectors of live data in that | 
					
						
							|  |  |  |  * bucket, and in order for that multiply to make sense we have to scale bucket | 
					
						
							|  |  |  |  * | 
					
						
							|  |  |  |  * Thus, we scale the bucket priorities so that the bucket with the smallest | 
					
						
							|  |  |  |  * prio is worth 1/8th of what INITIAL_PRIO is worth. | 
					
						
							|  |  |  |  */ | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | #define bucket_prio(b)							\
 | 
					
						
							|  |  |  | ({									\ | 
					
						
							|  |  |  | 	unsigned min_prio = (INITIAL_PRIO - ca->set->min_prio) / 8;	\ | 
					
						
							|  |  |  | 									\ | 
					
						
							|  |  |  | 	(b->prio - ca->set->min_prio + min_prio) * GC_SECTORS_USED(b);	\ | 
					
						
							|  |  |  | }) | 
					
						
							| 
									
										
										
										
											2013-03-23 16:11:31 -07:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2013-03-25 11:46:44 -07:00
										 |  |  | #define bucket_max_cmp(l, r)	(bucket_prio(l) < bucket_prio(r))
 | 
					
						
							|  |  |  | #define bucket_min_cmp(l, r)	(bucket_prio(l) > bucket_prio(r))
 | 
					
						
							| 
									
										
										
										
											2013-03-23 16:11:31 -07:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2013-03-25 11:46:44 -07:00
										 |  |  | static void invalidate_buckets_lru(struct cache *ca) | 
					
						
							|  |  |  | { | 
					
						
							| 
									
										
										
										
											2013-03-23 16:11:31 -07:00
										 |  |  | 	struct bucket *b; | 
					
						
							|  |  |  | 	ssize_t i; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	ca->heap.used = 0; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	for_each_bucket(b, ca) { | 
					
						
							| 
									
										
										
										
											2014-03-17 16:55:55 -07:00
										 |  |  | 		if (!bch_can_invalidate_bucket(ca, b)) | 
					
						
							| 
									
										
										
										
											2013-04-30 19:14:40 -07:00
										 |  |  | 			continue; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		if (!heap_full(&ca->heap)) | 
					
						
							|  |  |  | 			heap_add(&ca->heap, b, bucket_max_cmp); | 
					
						
							|  |  |  | 		else if (bucket_max_cmp(b, heap_peek(&ca->heap))) { | 
					
						
							|  |  |  | 			ca->heap.data[0] = b; | 
					
						
							|  |  |  | 			heap_sift(&ca->heap, 0, bucket_max_cmp); | 
					
						
							| 
									
										
										
										
											2013-03-23 16:11:31 -07:00
										 |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	for (i = ca->heap.used / 2 - 1; i >= 0; --i) | 
					
						
							|  |  |  | 		heap_sift(&ca->heap, i, bucket_min_cmp); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	while (!fifo_full(&ca->free_inc)) { | 
					
						
							|  |  |  | 		if (!heap_pop(&ca->heap, b, bucket_min_cmp)) { | 
					
						
							| 
									
										
										
										
											2013-04-30 19:14:40 -07:00
										 |  |  | 			/*
 | 
					
						
							|  |  |  | 			 * We don't want to be calling invalidate_buckets() | 
					
						
							| 
									
										
										
										
											2013-03-23 16:11:31 -07:00
										 |  |  | 			 * multiple times when it can't do anything | 
					
						
							|  |  |  | 			 */ | 
					
						
							|  |  |  | 			ca->invalidate_needs_gc = 1; | 
					
						
							| 
									
										
										
										
											2013-10-24 17:19:26 -07:00
										 |  |  | 			wake_up_gc(ca->set); | 
					
						
							| 
									
										
										
										
											2013-03-23 16:11:31 -07:00
										 |  |  | 			return; | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2014-03-17 16:55:55 -07:00
										 |  |  | 		bch_invalidate_one_bucket(ca, b); | 
					
						
							| 
									
										
										
										
											2013-03-23 16:11:31 -07:00
										 |  |  | 	} | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | static void invalidate_buckets_fifo(struct cache *ca) | 
					
						
							|  |  |  | { | 
					
						
							|  |  |  | 	struct bucket *b; | 
					
						
							|  |  |  | 	size_t checked = 0; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	while (!fifo_full(&ca->free_inc)) { | 
					
						
							|  |  |  | 		if (ca->fifo_last_bucket <  ca->sb.first_bucket || | 
					
						
							|  |  |  | 		    ca->fifo_last_bucket >= ca->sb.nbuckets) | 
					
						
							|  |  |  | 			ca->fifo_last_bucket = ca->sb.first_bucket; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		b = ca->buckets + ca->fifo_last_bucket++; | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2014-03-17 16:55:55 -07:00
										 |  |  | 		if (bch_can_invalidate_bucket(ca, b)) | 
					
						
							|  |  |  | 			bch_invalidate_one_bucket(ca, b); | 
					
						
							| 
									
										
										
										
											2013-03-23 16:11:31 -07:00
										 |  |  | 
 | 
					
						
							|  |  |  | 		if (++checked >= ca->sb.nbuckets) { | 
					
						
							|  |  |  | 			ca->invalidate_needs_gc = 1; | 
					
						
							| 
									
										
										
										
											2013-10-24 17:19:26 -07:00
										 |  |  | 			wake_up_gc(ca->set); | 
					
						
							| 
									
										
										
										
											2013-03-23 16:11:31 -07:00
										 |  |  | 			return; | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | static void invalidate_buckets_random(struct cache *ca) | 
					
						
							|  |  |  | { | 
					
						
							|  |  |  | 	struct bucket *b; | 
					
						
							|  |  |  | 	size_t checked = 0; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	while (!fifo_full(&ca->free_inc)) { | 
					
						
							|  |  |  | 		size_t n; | 
					
						
							|  |  |  | 		get_random_bytes(&n, sizeof(n)); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		n %= (size_t) (ca->sb.nbuckets - ca->sb.first_bucket); | 
					
						
							|  |  |  | 		n += ca->sb.first_bucket; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		b = ca->buckets + n; | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2014-03-17 16:55:55 -07:00
										 |  |  | 		if (bch_can_invalidate_bucket(ca, b)) | 
					
						
							|  |  |  | 			bch_invalidate_one_bucket(ca, b); | 
					
						
							| 
									
										
										
										
											2013-03-23 16:11:31 -07:00
										 |  |  | 
 | 
					
						
							|  |  |  | 		if (++checked >= ca->sb.nbuckets / 2) { | 
					
						
							|  |  |  | 			ca->invalidate_needs_gc = 1; | 
					
						
							| 
									
										
										
										
											2013-10-24 17:19:26 -07:00
										 |  |  | 			wake_up_gc(ca->set); | 
					
						
							| 
									
										
										
										
											2013-03-23 16:11:31 -07:00
										 |  |  | 			return; | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | static void invalidate_buckets(struct cache *ca) | 
					
						
							|  |  |  | { | 
					
						
							| 
									
										
										
										
											2014-03-17 16:55:55 -07:00
										 |  |  | 	BUG_ON(ca->invalidate_needs_gc); | 
					
						
							| 
									
										
										
										
											2013-03-23 16:11:31 -07:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	switch (CACHE_REPLACEMENT(&ca->sb)) { | 
					
						
							|  |  |  | 	case CACHE_REPLACEMENT_LRU: | 
					
						
							|  |  |  | 		invalidate_buckets_lru(ca); | 
					
						
							|  |  |  | 		break; | 
					
						
							|  |  |  | 	case CACHE_REPLACEMENT_FIFO: | 
					
						
							|  |  |  | 		invalidate_buckets_fifo(ca); | 
					
						
							|  |  |  | 		break; | 
					
						
							|  |  |  | 	case CACHE_REPLACEMENT_RANDOM: | 
					
						
							|  |  |  | 		invalidate_buckets_random(ca); | 
					
						
							|  |  |  | 		break; | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | #define allocator_wait(ca, cond)					\
 | 
					
						
							|  |  |  | do {									\ | 
					
						
							| 
									
										
										
										
											2013-04-30 19:14:40 -07:00
										 |  |  | 	while (1) {							\ | 
					
						
							| 
									
										
										
										
											2013-04-24 19:01:12 -07:00
										 |  |  | 		set_current_state(TASK_INTERRUPTIBLE);			\ | 
					
						
							| 
									
										
										
										
											2013-04-30 19:14:40 -07:00
										 |  |  | 		if (cond)						\ | 
					
						
							|  |  |  | 			break;						\ | 
					
						
							| 
									
										
										
										
											2013-03-23 16:11:31 -07:00
										 |  |  | 									\ | 
					
						
							|  |  |  | 		mutex_unlock(&(ca)->set->bucket_lock);			\ | 
					
						
							| 
									
										
										
										
											2013-07-10 18:31:58 -07:00
										 |  |  | 		if (kthread_should_stop())				\ | 
					
						
							| 
									
										
										
										
											2013-04-24 19:01:12 -07:00
										 |  |  | 			return 0;					\ | 
					
						
							| 
									
										
										
										
											2013-03-23 16:11:31 -07:00
										 |  |  | 									\ | 
					
						
							| 
									
										
										
										
											2013-07-10 18:31:58 -07:00
										 |  |  | 		try_to_freeze();					\ | 
					
						
							| 
									
										
										
										
											2013-03-23 16:11:31 -07:00
										 |  |  | 		schedule();						\ | 
					
						
							|  |  |  | 		mutex_lock(&(ca)->set->bucket_lock);			\ | 
					
						
							|  |  |  | 	}								\ | 
					
						
							| 
									
										
										
										
											2013-04-24 19:01:12 -07:00
										 |  |  | 	__set_current_state(TASK_RUNNING);				\ | 
					
						
							| 
									
										
										
										
											2013-03-23 16:11:31 -07:00
										 |  |  | } while (0) | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2013-12-17 01:29:34 -08:00
										 |  |  | static int bch_allocator_push(struct cache *ca, long bucket) | 
					
						
							|  |  |  | { | 
					
						
							|  |  |  | 	unsigned i; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	/* Prios/gens are actually the most important reserve */ | 
					
						
							|  |  |  | 	if (fifo_push(&ca->free[RESERVE_PRIO], bucket)) | 
					
						
							|  |  |  | 		return true; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	for (i = 0; i < RESERVE_NR; i++) | 
					
						
							|  |  |  | 		if (fifo_push(&ca->free[i], bucket)) | 
					
						
							|  |  |  | 			return true; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	return false; | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2013-04-24 19:01:12 -07:00
										 |  |  | static int bch_allocator_thread(void *arg) | 
					
						
							| 
									
										
										
										
											2013-03-23 16:11:31 -07:00
										 |  |  | { | 
					
						
							| 
									
										
										
										
											2013-04-24 19:01:12 -07:00
										 |  |  | 	struct cache *ca = arg; | 
					
						
							| 
									
										
										
										
											2013-03-23 16:11:31 -07:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	mutex_lock(&ca->set->bucket_lock); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	while (1) { | 
					
						
							| 
									
										
										
										
											2013-04-30 19:14:40 -07:00
										 |  |  | 		/*
 | 
					
						
							|  |  |  | 		 * First, we pull buckets off of the unused and free_inc lists, | 
					
						
							|  |  |  | 		 * possibly issue discards to them, then we add the bucket to | 
					
						
							|  |  |  | 		 * the free list: | 
					
						
							|  |  |  | 		 */ | 
					
						
							| 
									
										
										
										
											2014-03-17 16:55:55 -07:00
										 |  |  | 		while (!fifo_empty(&ca->free_inc)) { | 
					
						
							| 
									
										
										
										
											2013-03-23 16:11:31 -07:00
										 |  |  | 			long bucket; | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2014-03-17 16:55:55 -07:00
										 |  |  | 			fifo_pop(&ca->free_inc, bucket); | 
					
						
							| 
									
										
										
										
											2013-03-23 16:11:31 -07:00
										 |  |  | 
 | 
					
						
							|  |  |  | 			if (ca->discard) { | 
					
						
							| 
									
										
										
										
											2013-07-24 17:16:09 -07:00
										 |  |  | 				mutex_unlock(&ca->set->bucket_lock); | 
					
						
							|  |  |  | 				blkdev_issue_discard(ca->bdev, | 
					
						
							|  |  |  | 					bucket_to_sector(ca->set, bucket), | 
					
						
							| 
									
										
										
										
											2014-04-21 18:22:35 -07:00
										 |  |  | 					ca->sb.bucket_size, GFP_KERNEL, 0); | 
					
						
							| 
									
										
										
										
											2013-07-24 17:16:09 -07:00
										 |  |  | 				mutex_lock(&ca->set->bucket_lock); | 
					
						
							| 
									
										
										
										
											2013-03-23 16:11:31 -07:00
										 |  |  | 			} | 
					
						
							| 
									
										
										
										
											2013-07-24 17:16:09 -07:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2013-12-17 01:29:34 -08:00
										 |  |  | 			allocator_wait(ca, bch_allocator_push(ca, bucket)); | 
					
						
							| 
									
										
										
										
											2014-03-17 17:15:53 -07:00
										 |  |  | 			wake_up(&ca->set->btree_cache_wait); | 
					
						
							| 
									
										
										
										
											2013-07-24 17:29:09 -07:00
										 |  |  | 			wake_up(&ca->set->bucket_wait); | 
					
						
							| 
									
										
										
										
											2013-03-23 16:11:31 -07:00
										 |  |  | 		} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2013-04-30 19:14:40 -07:00
										 |  |  | 		/*
 | 
					
						
							|  |  |  | 		 * We've run out of free buckets, we need to find some buckets | 
					
						
							|  |  |  | 		 * we can invalidate. First, invalidate them in memory and add | 
					
						
							|  |  |  | 		 * them to the free_inc list: | 
					
						
							|  |  |  | 		 */ | 
					
						
							| 
									
										
										
										
											2013-03-23 16:11:31 -07:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2014-03-17 16:55:55 -07:00
										 |  |  | retry_invalidate: | 
					
						
							| 
									
										
										
										
											2013-04-30 19:14:40 -07:00
										 |  |  | 		allocator_wait(ca, ca->set->gc_mark_valid && | 
					
						
							| 
									
										
										
										
											2014-03-17 16:55:55 -07:00
										 |  |  | 			       !ca->invalidate_needs_gc); | 
					
						
							| 
									
										
										
										
											2013-04-30 19:14:40 -07:00
										 |  |  | 		invalidate_buckets(ca); | 
					
						
							| 
									
										
										
										
											2013-03-23 16:11:31 -07:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2013-04-30 19:14:40 -07:00
										 |  |  | 		/*
 | 
					
						
							|  |  |  | 		 * Now, we write their new gens to disk so we can start writing | 
					
						
							|  |  |  | 		 * new stuff to them: | 
					
						
							|  |  |  | 		 */ | 
					
						
							|  |  |  | 		allocator_wait(ca, !atomic_read(&ca->set->prio_blocked)); | 
					
						
							| 
									
										
										
										
											2014-03-17 16:55:55 -07:00
										 |  |  | 		if (CACHE_SYNC(&ca->set->sb)) { | 
					
						
							|  |  |  | 			/*
 | 
					
						
							|  |  |  | 			 * This could deadlock if an allocation with a btree | 
					
						
							|  |  |  | 			 * node locked ever blocked - having the btree node | 
					
						
							|  |  |  | 			 * locked would block garbage collection, but here we're | 
					
						
							|  |  |  | 			 * waiting on garbage collection before we invalidate | 
					
						
							|  |  |  | 			 * and free anything. | 
					
						
							|  |  |  | 			 * | 
					
						
							|  |  |  | 			 * But this should be safe since the btree code always | 
					
						
							|  |  |  | 			 * uses btree_check_reserve() before allocating now, and | 
					
						
							|  |  |  | 			 * if it fails it blocks without btree nodes locked. | 
					
						
							|  |  |  | 			 */ | 
					
						
							|  |  |  | 			if (!fifo_full(&ca->free_inc)) | 
					
						
							|  |  |  | 				goto retry_invalidate; | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2013-03-23 16:11:31 -07:00
										 |  |  | 			bch_prio_write(ca); | 
					
						
							| 
									
										
										
										
											2014-03-17 16:55:55 -07:00
										 |  |  | 		} | 
					
						
							| 
									
										
										
										
											2013-03-23 16:11:31 -07:00
										 |  |  | 	} | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2014-03-17 16:55:55 -07:00
										 |  |  | /* Allocation */ | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2013-12-17 01:29:34 -08:00
										 |  |  | long bch_bucket_alloc(struct cache *ca, unsigned reserve, bool wait) | 
					
						
							| 
									
										
										
										
											2013-03-23 16:11:31 -07:00
										 |  |  | { | 
					
						
							| 
									
										
										
										
											2013-07-24 17:29:09 -07:00
										 |  |  | 	DEFINE_WAIT(w); | 
					
						
							|  |  |  | 	struct bucket *b; | 
					
						
							|  |  |  | 	long r; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	/* fastpath */ | 
					
						
							| 
									
										
										
										
											2013-12-17 01:29:34 -08:00
										 |  |  | 	if (fifo_pop(&ca->free[RESERVE_NONE], r) || | 
					
						
							|  |  |  | 	    fifo_pop(&ca->free[reserve], r)) | 
					
						
							| 
									
										
										
										
											2013-07-24 17:29:09 -07:00
										 |  |  | 		goto out; | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2014-02-12 18:43:32 -08:00
										 |  |  | 	if (!wait) { | 
					
						
							|  |  |  | 		trace_bcache_alloc_fail(ca, reserve); | 
					
						
							| 
									
										
										
										
											2013-07-24 17:29:09 -07:00
										 |  |  | 		return -1; | 
					
						
							| 
									
										
										
										
											2014-02-12 18:43:32 -08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2013-07-24 17:29:09 -07:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2013-12-17 01:29:34 -08:00
										 |  |  | 	do { | 
					
						
							| 
									
										
										
										
											2013-07-24 17:29:09 -07:00
										 |  |  | 		prepare_to_wait(&ca->set->bucket_wait, &w, | 
					
						
							|  |  |  | 				TASK_UNINTERRUPTIBLE); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		mutex_unlock(&ca->set->bucket_lock); | 
					
						
							|  |  |  | 		schedule(); | 
					
						
							|  |  |  | 		mutex_lock(&ca->set->bucket_lock); | 
					
						
							| 
									
										
										
										
											2013-12-17 01:29:34 -08:00
										 |  |  | 	} while (!fifo_pop(&ca->free[RESERVE_NONE], r) && | 
					
						
							|  |  |  | 		 !fifo_pop(&ca->free[reserve], r)); | 
					
						
							| 
									
										
										
										
											2013-07-24 17:29:09 -07:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	finish_wait(&ca->set->bucket_wait, &w); | 
					
						
							|  |  |  | out: | 
					
						
							| 
									
										
										
										
											2013-04-24 19:01:12 -07:00
										 |  |  | 	wake_up_process(ca->alloc_thread); | 
					
						
							| 
									
										
										
										
											2013-03-23 16:11:31 -07:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2014-02-12 18:43:32 -08:00
										 |  |  | 	trace_bcache_alloc(ca, reserve); | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2013-10-24 16:36:03 -07:00
										 |  |  | 	if (expensive_debug_checks(ca->set)) { | 
					
						
							| 
									
										
										
										
											2013-03-23 16:11:31 -07:00
										 |  |  | 		size_t iter; | 
					
						
							|  |  |  | 		long i; | 
					
						
							| 
									
										
										
										
											2013-12-17 01:29:34 -08:00
										 |  |  | 		unsigned j; | 
					
						
							| 
									
										
										
										
											2013-03-23 16:11:31 -07:00
										 |  |  | 
 | 
					
						
							|  |  |  | 		for (iter = 0; iter < prio_buckets(ca) * 2; iter++) | 
					
						
							|  |  |  | 			BUG_ON(ca->prio_buckets[iter] == (uint64_t) r); | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2013-12-17 01:29:34 -08:00
										 |  |  | 		for (j = 0; j < RESERVE_NR; j++) | 
					
						
							|  |  |  | 			fifo_for_each(i, &ca->free[j], iter) | 
					
						
							|  |  |  | 				BUG_ON(i == r); | 
					
						
							| 
									
										
										
										
											2013-03-23 16:11:31 -07:00
										 |  |  | 		fifo_for_each(i, &ca->free_inc, iter) | 
					
						
							|  |  |  | 			BUG_ON(i == r); | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2013-10-24 16:36:03 -07:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2013-07-24 17:29:09 -07:00
										 |  |  | 	b = ca->buckets + r; | 
					
						
							| 
									
										
										
										
											2013-03-23 16:11:31 -07:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2013-07-24 17:29:09 -07:00
										 |  |  | 	BUG_ON(atomic_read(&b->pin) != 1); | 
					
						
							| 
									
										
										
										
											2013-03-23 16:11:31 -07:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2013-07-24 17:29:09 -07:00
										 |  |  | 	SET_GC_SECTORS_USED(b, ca->sb.bucket_size); | 
					
						
							| 
									
										
										
										
											2013-03-23 16:11:31 -07:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2013-12-17 01:29:34 -08:00
										 |  |  | 	if (reserve <= RESERVE_PRIO) { | 
					
						
							| 
									
										
										
										
											2013-07-24 17:29:09 -07:00
										 |  |  | 		SET_GC_MARK(b, GC_MARK_METADATA); | 
					
						
							| 
									
										
										
										
											2013-11-07 17:53:19 -08:00
										 |  |  | 		SET_GC_MOVE(b, 0); | 
					
						
							| 
									
										
										
										
											2013-07-24 17:29:09 -07:00
										 |  |  | 		b->prio = BTREE_PRIO; | 
					
						
							|  |  |  | 	} else { | 
					
						
							|  |  |  | 		SET_GC_MARK(b, GC_MARK_RECLAIMABLE); | 
					
						
							| 
									
										
										
										
											2013-11-07 17:53:19 -08:00
										 |  |  | 		SET_GC_MOVE(b, 0); | 
					
						
							| 
									
										
										
										
											2013-07-24 17:29:09 -07:00
										 |  |  | 		b->prio = INITIAL_PRIO; | 
					
						
							| 
									
										
										
										
											2013-03-23 16:11:31 -07:00
										 |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2013-07-24 17:29:09 -07:00
										 |  |  | 	return r; | 
					
						
							| 
									
										
										
										
											2013-03-23 16:11:31 -07:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2014-03-17 16:55:55 -07:00
										 |  |  | void __bch_bucket_free(struct cache *ca, struct bucket *b) | 
					
						
							|  |  |  | { | 
					
						
							|  |  |  | 	SET_GC_MARK(b, 0); | 
					
						
							|  |  |  | 	SET_GC_SECTORS_USED(b, 0); | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2013-03-23 16:11:31 -07:00
										 |  |  | void bch_bucket_free(struct cache_set *c, struct bkey *k) | 
					
						
							|  |  |  | { | 
					
						
							|  |  |  | 	unsigned i; | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2014-03-17 16:55:55 -07:00
										 |  |  | 	for (i = 0; i < KEY_PTRS(k); i++) | 
					
						
							|  |  |  | 		__bch_bucket_free(PTR_CACHE(c, k, i), | 
					
						
							|  |  |  | 				  PTR_BUCKET(c, k, i)); | 
					
						
							| 
									
										
										
										
											2013-03-23 16:11:31 -07:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2013-12-17 01:29:34 -08:00
										 |  |  | int __bch_bucket_alloc_set(struct cache_set *c, unsigned reserve, | 
					
						
							| 
									
										
										
										
											2013-07-24 17:29:09 -07:00
										 |  |  | 			   struct bkey *k, int n, bool wait) | 
					
						
							| 
									
										
										
										
											2013-03-23 16:11:31 -07:00
										 |  |  | { | 
					
						
							|  |  |  | 	int i; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	lockdep_assert_held(&c->bucket_lock); | 
					
						
							|  |  |  | 	BUG_ON(!n || n > c->caches_loaded || n > 8); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	bkey_init(k); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	/* sort by free space/prio of oldest data in caches */ | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	for (i = 0; i < n; i++) { | 
					
						
							|  |  |  | 		struct cache *ca = c->cache_by_alloc[i]; | 
					
						
							| 
									
										
										
										
											2013-12-17 01:29:34 -08:00
										 |  |  | 		long b = bch_bucket_alloc(ca, reserve, wait); | 
					
						
							| 
									
										
										
										
											2013-03-23 16:11:31 -07:00
										 |  |  | 
 | 
					
						
							|  |  |  | 		if (b == -1) | 
					
						
							|  |  |  | 			goto err; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		k->ptr[i] = PTR(ca->buckets[b].gen, | 
					
						
							|  |  |  | 				bucket_to_sector(c, b), | 
					
						
							|  |  |  | 				ca->sb.nr_this_dev); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		SET_KEY_PTRS(k, i + 1); | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	return 0; | 
					
						
							|  |  |  | err: | 
					
						
							|  |  |  | 	bch_bucket_free(c, k); | 
					
						
							| 
									
										
										
										
											2013-07-24 16:46:42 -07:00
										 |  |  | 	bkey_put(c, k); | 
					
						
							| 
									
										
										
										
											2013-03-23 16:11:31 -07:00
										 |  |  | 	return -1; | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2013-12-17 01:29:34 -08:00
										 |  |  | int bch_bucket_alloc_set(struct cache_set *c, unsigned reserve, | 
					
						
							| 
									
										
										
										
											2013-07-24 17:29:09 -07:00
										 |  |  | 			 struct bkey *k, int n, bool wait) | 
					
						
							| 
									
										
										
										
											2013-03-23 16:11:31 -07:00
										 |  |  | { | 
					
						
							|  |  |  | 	int ret; | 
					
						
							|  |  |  | 	mutex_lock(&c->bucket_lock); | 
					
						
							| 
									
										
										
										
											2013-12-17 01:29:34 -08:00
										 |  |  | 	ret = __bch_bucket_alloc_set(c, reserve, k, n, wait); | 
					
						
							| 
									
										
										
										
											2013-03-23 16:11:31 -07:00
										 |  |  | 	mutex_unlock(&c->bucket_lock); | 
					
						
							|  |  |  | 	return ret; | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2013-07-24 18:11:11 -07:00
										 |  |  | /* Sector allocator */ | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | struct open_bucket { | 
					
						
							|  |  |  | 	struct list_head	list; | 
					
						
							|  |  |  | 	unsigned		last_write_point; | 
					
						
							|  |  |  | 	unsigned		sectors_free; | 
					
						
							|  |  |  | 	BKEY_PADDED(key); | 
					
						
							|  |  |  | }; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | /*
 | 
					
						
							|  |  |  |  * We keep multiple buckets open for writes, and try to segregate different | 
					
						
							|  |  |  |  * write streams for better cache utilization: first we look for a bucket where | 
					
						
							|  |  |  |  * the last write to it was sequential with the current write, and failing that | 
					
						
							|  |  |  |  * we look for a bucket that was last used by the same task. | 
					
						
							|  |  |  |  * | 
					
						
							|  |  |  |  * The ideas is if you've got multiple tasks pulling data into the cache at the | 
					
						
							|  |  |  |  * same time, you'll get better cache utilization if you try to segregate their | 
					
						
							|  |  |  |  * data and preserve locality. | 
					
						
							|  |  |  |  * | 
					
						
							|  |  |  |  * For example, say you've starting Firefox at the same time you're copying a | 
					
						
							|  |  |  |  * bunch of files. Firefox will likely end up being fairly hot and stay in the | 
					
						
							|  |  |  |  * cache awhile, but the data you copied might not be; if you wrote all that | 
					
						
							|  |  |  |  * data to the same buckets it'd get invalidated at the same time. | 
					
						
							|  |  |  |  * | 
					
						
							|  |  |  |  * Both of those tasks will be doing fairly random IO so we can't rely on | 
					
						
							|  |  |  |  * detecting sequential IO to segregate their data, but going off of the task | 
					
						
							|  |  |  |  * should be a sane heuristic. | 
					
						
							|  |  |  |  */ | 
					
						
							|  |  |  | static struct open_bucket *pick_data_bucket(struct cache_set *c, | 
					
						
							|  |  |  | 					    const struct bkey *search, | 
					
						
							|  |  |  | 					    unsigned write_point, | 
					
						
							|  |  |  | 					    struct bkey *alloc) | 
					
						
							|  |  |  | { | 
					
						
							|  |  |  | 	struct open_bucket *ret, *ret_task = NULL; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	list_for_each_entry_reverse(ret, &c->data_buckets, list) | 
					
						
							|  |  |  | 		if (!bkey_cmp(&ret->key, search)) | 
					
						
							|  |  |  | 			goto found; | 
					
						
							|  |  |  | 		else if (ret->last_write_point == write_point) | 
					
						
							|  |  |  | 			ret_task = ret; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	ret = ret_task ?: list_first_entry(&c->data_buckets, | 
					
						
							|  |  |  | 					   struct open_bucket, list); | 
					
						
							|  |  |  | found: | 
					
						
							|  |  |  | 	if (!ret->sectors_free && KEY_PTRS(alloc)) { | 
					
						
							|  |  |  | 		ret->sectors_free = c->sb.bucket_size; | 
					
						
							|  |  |  | 		bkey_copy(&ret->key, alloc); | 
					
						
							|  |  |  | 		bkey_init(alloc); | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	if (!ret->sectors_free) | 
					
						
							|  |  |  | 		ret = NULL; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	return ret; | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | /*
 | 
					
						
							|  |  |  |  * Allocates some space in the cache to write to, and k to point to the newly | 
					
						
							|  |  |  |  * allocated space, and updates KEY_SIZE(k) and KEY_OFFSET(k) (to point to the | 
					
						
							|  |  |  |  * end of the newly allocated space). | 
					
						
							|  |  |  |  * | 
					
						
							|  |  |  |  * May allocate fewer sectors than @sectors, KEY_SIZE(k) indicates how many | 
					
						
							|  |  |  |  * sectors were actually allocated. | 
					
						
							|  |  |  |  * | 
					
						
							|  |  |  |  * If s->writeback is true, will not fail. | 
					
						
							|  |  |  |  */ | 
					
						
							|  |  |  | bool bch_alloc_sectors(struct cache_set *c, struct bkey *k, unsigned sectors, | 
					
						
							|  |  |  | 		       unsigned write_point, unsigned write_prio, bool wait) | 
					
						
							|  |  |  | { | 
					
						
							|  |  |  | 	struct open_bucket *b; | 
					
						
							|  |  |  | 	BKEY_PADDED(key) alloc; | 
					
						
							|  |  |  | 	unsigned i; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	/*
 | 
					
						
							|  |  |  | 	 * We might have to allocate a new bucket, which we can't do with a | 
					
						
							|  |  |  | 	 * spinlock held. So if we have to allocate, we drop the lock, allocate | 
					
						
							|  |  |  | 	 * and then retry. KEY_PTRS() indicates whether alloc points to | 
					
						
							|  |  |  | 	 * allocated bucket(s). | 
					
						
							|  |  |  | 	 */ | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	bkey_init(&alloc.key); | 
					
						
							|  |  |  | 	spin_lock(&c->data_bucket_lock); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	while (!(b = pick_data_bucket(c, k, write_point, &alloc.key))) { | 
					
						
							|  |  |  | 		unsigned watermark = write_prio | 
					
						
							| 
									
										
										
										
											2013-12-17 01:29:34 -08:00
										 |  |  | 			? RESERVE_MOVINGGC | 
					
						
							|  |  |  | 			: RESERVE_NONE; | 
					
						
							| 
									
										
										
										
											2013-07-24 18:11:11 -07:00
										 |  |  | 
 | 
					
						
							|  |  |  | 		spin_unlock(&c->data_bucket_lock); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		if (bch_bucket_alloc_set(c, watermark, &alloc.key, 1, wait)) | 
					
						
							|  |  |  | 			return false; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		spin_lock(&c->data_bucket_lock); | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	/*
 | 
					
						
							|  |  |  | 	 * If we had to allocate, we might race and not need to allocate the | 
					
						
							|  |  |  | 	 * second time we call find_data_bucket(). If we allocated a bucket but | 
					
						
							|  |  |  | 	 * didn't use it, drop the refcount bch_bucket_alloc_set() took: | 
					
						
							|  |  |  | 	 */ | 
					
						
							|  |  |  | 	if (KEY_PTRS(&alloc.key)) | 
					
						
							| 
									
										
										
										
											2013-07-24 16:46:42 -07:00
										 |  |  | 		bkey_put(c, &alloc.key); | 
					
						
							| 
									
										
										
										
											2013-07-24 18:11:11 -07:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	for (i = 0; i < KEY_PTRS(&b->key); i++) | 
					
						
							|  |  |  | 		EBUG_ON(ptr_stale(c, &b->key, i)); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	/* Set up the pointer to the space we're allocating: */ | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	for (i = 0; i < KEY_PTRS(&b->key); i++) | 
					
						
							|  |  |  | 		k->ptr[i] = b->key.ptr[i]; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	sectors = min(sectors, b->sectors_free); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	SET_KEY_OFFSET(k, KEY_OFFSET(k) + sectors); | 
					
						
							|  |  |  | 	SET_KEY_SIZE(k, sectors); | 
					
						
							|  |  |  | 	SET_KEY_PTRS(k, KEY_PTRS(&b->key)); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	/*
 | 
					
						
							|  |  |  | 	 * Move b to the end of the lru, and keep track of what this bucket was | 
					
						
							|  |  |  | 	 * last used for: | 
					
						
							|  |  |  | 	 */ | 
					
						
							|  |  |  | 	list_move_tail(&b->list, &c->data_buckets); | 
					
						
							|  |  |  | 	bkey_copy_key(&b->key, k); | 
					
						
							|  |  |  | 	b->last_write_point = write_point; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	b->sectors_free	-= sectors; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	for (i = 0; i < KEY_PTRS(&b->key); i++) { | 
					
						
							|  |  |  | 		SET_PTR_OFFSET(&b->key, i, PTR_OFFSET(&b->key, i) + sectors); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		atomic_long_add(sectors, | 
					
						
							|  |  |  | 				&PTR_CACHE(c, &b->key, i)->sectors_written); | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	if (b->sectors_free < c->sb.block_size) | 
					
						
							|  |  |  | 		b->sectors_free = 0; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	/*
 | 
					
						
							|  |  |  | 	 * k takes refcounts on the buckets it points to until it's inserted | 
					
						
							|  |  |  | 	 * into the btree, but if we're done with this bucket we just transfer | 
					
						
							|  |  |  | 	 * get_data_bucket()'s refcount. | 
					
						
							|  |  |  | 	 */ | 
					
						
							|  |  |  | 	if (b->sectors_free) | 
					
						
							|  |  |  | 		for (i = 0; i < KEY_PTRS(&b->key); i++) | 
					
						
							|  |  |  | 			atomic_inc(&PTR_BUCKET(c, &b->key, i)->pin); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	spin_unlock(&c->data_bucket_lock); | 
					
						
							|  |  |  | 	return true; | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2013-03-23 16:11:31 -07:00
										 |  |  | /* Init */ | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2013-07-24 18:11:11 -07:00
										 |  |  | void bch_open_buckets_free(struct cache_set *c) | 
					
						
							|  |  |  | { | 
					
						
							|  |  |  | 	struct open_bucket *b; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	while (!list_empty(&c->data_buckets)) { | 
					
						
							|  |  |  | 		b = list_first_entry(&c->data_buckets, | 
					
						
							|  |  |  | 				     struct open_bucket, list); | 
					
						
							|  |  |  | 		list_del(&b->list); | 
					
						
							|  |  |  | 		kfree(b); | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | int bch_open_buckets_alloc(struct cache_set *c) | 
					
						
							|  |  |  | { | 
					
						
							|  |  |  | 	int i; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	spin_lock_init(&c->data_bucket_lock); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	for (i = 0; i < 6; i++) { | 
					
						
							|  |  |  | 		struct open_bucket *b = kzalloc(sizeof(*b), GFP_KERNEL); | 
					
						
							|  |  |  | 		if (!b) | 
					
						
							|  |  |  | 			return -ENOMEM; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		list_add(&b->list, &c->data_buckets); | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	return 0; | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2013-04-24 19:01:12 -07:00
										 |  |  | int bch_cache_allocator_start(struct cache *ca) | 
					
						
							|  |  |  | { | 
					
						
							| 
									
										
										
										
											2013-07-10 18:31:58 -07:00
										 |  |  | 	struct task_struct *k = kthread_run(bch_allocator_thread, | 
					
						
							|  |  |  | 					    ca, "bcache_allocator"); | 
					
						
							|  |  |  | 	if (IS_ERR(k)) | 
					
						
							|  |  |  | 		return PTR_ERR(k); | 
					
						
							| 
									
										
										
										
											2013-04-24 19:01:12 -07:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2013-07-10 18:31:58 -07:00
										 |  |  | 	ca->alloc_thread = k; | 
					
						
							| 
									
										
										
										
											2013-04-24 19:01:12 -07:00
										 |  |  | 	return 0; | 
					
						
							|  |  |  | } |