diff options
author | Harvey Harrison <harvey.harrison@gmail.com> | 2008-05-01 04:35:17 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2008-05-01 08:04:02 -0700 |
commit | 24c03d47d0481ed7b172b398f6c9b7ca1fafb9fa (patch) | |
tree | d24b94e09b12a2c16cc0cf49c273af846fcc6f13 /block | |
parent | e37d05dad7ff9744efd8ea95a70d389e9a65a6fc (diff) | |
download | linux-3.10-24c03d47d0481ed7b172b398f6c9b7ca1fafb9fa.tar.gz linux-3.10-24c03d47d0481ed7b172b398f6c9b7ca1fafb9fa.tar.bz2 linux-3.10-24c03d47d0481ed7b172b398f6c9b7ca1fafb9fa.zip |
block: remove remaining __FUNCTION__ occurrences
__FUNCTION__ is gcc specific, use __func__
Signed-off-by: Harvey Harrison <harvey.harrison@gmail.com>
Cc: Jens Axboe <jens.axboe@oracle.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'block')
-rw-r--r-- | block/blk-barrier.c | 3 | ||||
-rw-r--r-- | block/blk-core.c | 5 | ||||
-rw-r--r-- | block/blk-settings.c | 20 | ||||
-rw-r--r-- | block/blk-tag.c | 8 | ||||
-rw-r--r-- | block/bsg.c | 2 | ||||
-rw-r--r-- | block/elevator.c | 5 |
6 files changed, 20 insertions, 23 deletions
diff --git a/block/blk-barrier.c b/block/blk-barrier.c index 66e55288178..a09ead19f9c 100644 --- a/block/blk-barrier.c +++ b/block/blk-barrier.c @@ -26,8 +26,7 @@ int blk_queue_ordered(struct request_queue *q, unsigned ordered, { if (ordered & (QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_POSTFLUSH) && prepare_flush_fn == NULL) { - printk(KERN_ERR "%s: prepare_flush_fn required\n", - __FUNCTION__); + printk(KERN_ERR "%s: prepare_flush_fn required\n", __func__); return -EINVAL; } diff --git a/block/blk-core.c b/block/blk-core.c index 5d09f8c5602..b754a4a2f9b 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -136,7 +136,7 @@ static void req_bio_endio(struct request *rq, struct bio *bio, if (unlikely(nbytes > bio->bi_size)) { printk(KERN_ERR "%s: want %u bytes done, %u left\n", - __FUNCTION__, nbytes, bio->bi_size); + __func__, nbytes, bio->bi_size); nbytes = bio->bi_size; } @@ -1566,8 +1566,7 @@ static int __end_that_request_first(struct request *req, int error, if (unlikely(bio->bi_idx >= bio->bi_vcnt)) { blk_dump_rq_flags(req, "__end_that"); printk(KERN_ERR "%s: bio idx %d >= vcnt %d\n", - __FUNCTION__, bio->bi_idx, - bio->bi_vcnt); + __func__, bio->bi_idx, bio->bi_vcnt); break; } diff --git a/block/blk-settings.c b/block/blk-settings.c index 6089384ab06..bb93d4c3277 100644 --- a/block/blk-settings.c +++ b/block/blk-settings.c @@ -168,8 +168,8 @@ void blk_queue_max_sectors(struct request_queue *q, unsigned int max_sectors) { if ((max_sectors << 9) < PAGE_CACHE_SIZE) { max_sectors = 1 << (PAGE_CACHE_SHIFT - 9); - printk(KERN_INFO "%s: set to minimum %d\n", __FUNCTION__, - max_sectors); + printk(KERN_INFO "%s: set to minimum %d\n", + __func__, max_sectors); } if (BLK_DEF_MAX_SECTORS > max_sectors) @@ -196,8 +196,8 @@ void blk_queue_max_phys_segments(struct request_queue *q, { if (!max_segments) { max_segments = 1; - printk(KERN_INFO "%s: set to minimum %d\n", __FUNCTION__, - max_segments); + printk(KERN_INFO "%s: set to minimum %d\n", + __func__, max_segments); } q->max_phys_segments = max_segments; @@ -220,8 +220,8 @@ void blk_queue_max_hw_segments(struct request_queue *q, { if (!max_segments) { max_segments = 1; - printk(KERN_INFO "%s: set to minimum %d\n", __FUNCTION__, - max_segments); + printk(KERN_INFO "%s: set to minimum %d\n", + __func__, max_segments); } q->max_hw_segments = max_segments; @@ -241,8 +241,8 @@ void blk_queue_max_segment_size(struct request_queue *q, unsigned int max_size) { if (max_size < PAGE_CACHE_SIZE) { max_size = PAGE_CACHE_SIZE; - printk(KERN_INFO "%s: set to minimum %d\n", __FUNCTION__, - max_size); + printk(KERN_INFO "%s: set to minimum %d\n", + __func__, max_size); } q->max_segment_size = max_size; @@ -357,8 +357,8 @@ void blk_queue_segment_boundary(struct request_queue *q, unsigned long mask) { if (mask < PAGE_CACHE_SIZE - 1) { mask = PAGE_CACHE_SIZE - 1; - printk(KERN_INFO "%s: set to minimum %lx\n", __FUNCTION__, - mask); + printk(KERN_INFO "%s: set to minimum %lx\n", + __func__, mask); } q->seg_boundary_mask = mask; diff --git a/block/blk-tag.c b/block/blk-tag.c index e176ddbe599..de64e042997 100644 --- a/block/blk-tag.c +++ b/block/blk-tag.c @@ -112,7 +112,7 @@ init_tag_map(struct request_queue *q, struct blk_queue_tag *tags, int depth) if (q && depth > q->nr_requests * 2) { depth = q->nr_requests * 2; printk(KERN_ERR "%s: adjusted depth to %d\n", - __FUNCTION__, depth); + __func__, depth); } tag_index = kzalloc(depth * sizeof(struct request *), GFP_ATOMIC); @@ -296,13 +296,13 @@ void blk_queue_end_tag(struct request_queue *q, struct request *rq) if (unlikely(bqt->tag_index[tag] == NULL)) printk(KERN_ERR "%s: tag %d is missing\n", - __FUNCTION__, tag); + __func__, tag); bqt->tag_index[tag] = NULL; if (unlikely(!test_bit(tag, bqt->tag_map))) { printk(KERN_ERR "%s: attempt to clear non-busy tag (%d)\n", - __FUNCTION__, tag); + __func__, tag); return; } /* @@ -340,7 +340,7 @@ int blk_queue_start_tag(struct request_queue *q, struct request *rq) if (unlikely((rq->cmd_flags & REQ_QUEUED))) { printk(KERN_ERR "%s: request %p for device [%s] already tagged %d", - __FUNCTION__, rq, + __func__, rq, rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->tag); BUG(); } diff --git a/block/bsg.c b/block/bsg.c index 23ea4fd1a66..fa796b605f5 100644 --- a/block/bsg.c +++ b/block/bsg.c @@ -57,7 +57,7 @@ enum { #undef BSG_DEBUG #ifdef BSG_DEBUG -#define dprintk(fmt, args...) printk(KERN_ERR "%s: " fmt, __FUNCTION__, ##args) +#define dprintk(fmt, args...) printk(KERN_ERR "%s: " fmt, __func__, ##args) #else #define dprintk(fmt, args...) #endif diff --git a/block/elevator.c b/block/elevator.c index ac5310ef827..980f8ae147b 100644 --- a/block/elevator.c +++ b/block/elevator.c @@ -650,7 +650,7 @@ void elv_insert(struct request_queue *q, struct request *rq, int where) default: printk(KERN_ERR "%s: bad insertion point %d\n", - __FUNCTION__, where); + __func__, where); BUG(); } @@ -808,8 +808,7 @@ struct request *elv_next_request(struct request_queue *q) rq->cmd_flags |= REQ_QUIET; end_queued_request(rq, 0); } else { - printk(KERN_ERR "%s: bad return=%d\n", __FUNCTION__, - ret); + printk(KERN_ERR "%s: bad return=%d\n", __func__, ret); break; } } |