Linux块设备IO子系统(一)(5)

struct bio
--47-->指向链表中下一个bio的指针bi_next
--50-->bi_rw低位表示读写READ/WRITE, 高位表示优先级
--90-->bio对象包含bio_vec对象的数目
--91-->这个bio能承载的最大的io_vec的数目
--95-->该bio描述的第一个io_vec
--104-->表示这个bio包含的bio_vec变量的数组,即这个bio对应的某一个page中的一"段"内存

bio_vec

描述指定page中的一块连续的区域,在bio中描述的就是一个page中的一个"段"(segment)

25 struct bio_vec { 26 struct page *bv_page; 27 unsigned int bv_len; 28 unsigned int bv_offset; 29 };

struct bio_vec
--26-->描述的page
--27-->描述的长度
--28-->描述的起始地址偏移量

bio_iter

用于记录当前bvec被处理的情况,用于遍历bio

31 struct bvec_iter { 32 sector_t bi_sector; /* device address in 512 byt 33 sectors */ 34 unsigned int bi_size; /* residual I/O count */ 35 36 unsigned int bi_idx; /* current index into bvl_ve 37 38 unsigned int bi_bvec_done; /* number of bytes completed 39 current bvec */ 40 }; __rq_for_each_bio()

遍历一个request中的每一个bio

738 #define __rq_for_each_bio(_bio, rq) \ 739 if ((rq->bio)) \ 740 for (_bio = (rq)->bio; _bio; _bio = _bio->bi_next) bio_for_each_segment()

遍历一个bio中的每一个segment

242 #define bio_for_each_segment(bvl, bio, iter) \ 243 __bio_for_each_segment(bvl, bio, iter, (bio)->bi_iter) rq_for_each_segment()

遍历一个request中的每一个segment

742 #define rq_for_each_segment(bvl, _rq, _iter) \ 743 __rq_for_each_bio(_iter.bio, _rq) \ 744 bio_for_each_segment(bvl, _iter.bio, _iter.iter) 小结

遍历request_queue,绑定函数的一个必要的工作就是将request_queue中的数据取出, 所以遍历是必不可少的, 针对有IO调度的设备, 我们需要从中提取请求再继续操作, 对于没有IO调度的设备, 我们可以直接从request_queue中提取bio进行操作, 这两种处理函数的接口就不一样,下面的例子是对LDD3中的代码进行了修剪而来的,相应的API使用的是3.14版本,可以看出这两种模式的使用方法的不同。

sbull_init
        └── setup_device
                ├──sbull_make_request
                │        ├──sbull_xfer_bio
                │        └──sbull_transfer
                └──sbull_full_request
                        ├──blk_fetch_request
                        └──sbull_xfer_request
                                ├── __rq_for_each_bio
                                └── sbull_xfer_bio
                                        └──sbull_transfer

/* * Handle an I/O request. * 实现扇区的读写 */ static void sbull_transfer(struct sbull_dev *dev, unsigned long sector,unsigned long nsect, char *buffer, int write) { unsigned long offset = sector*KERNEL_SECTOR_SIZE; unsigned long nbytes = nsect*KERNEL_SECTOR_SIZE; if (write) memcpy(dev->data + offset, buffer, nbytes); else memcpy(buffer, dev->data + offset, nbytes); } /* * Transfer a single BIO. */ static int sbull_xfer_bio(struct sbull_dev *dev, struct bio *bio) { struct bvec_iter i; //用来遍历bio_vec对象 struct bio_vec bvec; sector_t sector = bio->bi_iter.bi_sector; /* Do each segment independently. */ bio_for_each_segment(bvec, bio, i) { //bvec会遍历bio中每一个bio_vec对象 char *buffer = __bio_kmap_atomic(bio, i, KM_USER0); sbull_transfer(dev, sector, bio_cur_bytes(bio)>>9 ,buffer, bio_data_dir(bio) == WRITE); sector += bio_cur_bytes(bio)>>9; __bio_kunmap_atomic(bio, KM_USER0); } return 0; /* Always "succeed" */ } /* * Transfer a full request. */ static int sbull_xfer_request(struct sbull_dev *dev, struct request *req) { struct bio *bio; int nsect = 0; __rq_for_each_bio(bio, req) { sbull_xfer_bio(dev, bio); nsect += bio->bi_size/KERNEL_SECTOR_SIZE; } return nsect; } /* * Smarter request function that "handles clustering".*/ static void sbull_full_request(struct request_queue *q) { struct request *req; int nsect; struct sbull_dev *dev ; int i = 0; while ((req = blk_fetch_request(q)) != NULL) { dev = req->rq_disk->private_data; nsect = sbull_xfer_request(dev, req); __blk_end_request(req, 0, (nsect<<9)); printk ("i = %d\n", ++i); } } //The direct make request version static void sbull_make_request(struct request_queue *q, struct bio *bio) { struct sbull_dev *dev = q->queuedata; int status; status = sbull_xfer_bio(dev, bio); bio_endio(bio, status); return; } /* * The device operations structure. */ static struct block_device_operations sbull_ops = { .owner = THIS_MODULE, .open = sbull_open, .release= sbull_release, .getgeo = sbull_getgeo, }; /* * Set up our internal device. */ static void setup_device(struct sbull_dev *dev, int which) { /* * Get some memory. */ memset (dev, 0, sizeof (struct sbull_dev)); dev->size = nsectors * hardsect_size; dev->data = vmalloc(dev->size); /* * The I/O queue, depending on whether we are using our own * make_request function or not. */ switch (request_mode) { case RM_NOQUEUE: dev->queue = blk_alloc_queue(GFP_KERNEL); blk_queue_make_request(dev->queue, sbull_make_request); break; case RM_FULL: dev->queue = blk_init_queue(sbull_full_request, &dev->lock); break; } dev->queue->queuedata = dev; /* * And the gendisk structure. */ dev->gd = alloc_disk(SBULL_MINORS); dev->gd->major = sbull_major; dev->gd->first_minor = which*SBULL_MINORS; dev->gd->fops = &sbull_ops; dev->gd->queue = dev->queue; dev->gd->private_data = dev; snprintf (dev->gd->disk_name, 32, "sbull%c", which + 'a'); set_capacity(dev->gd, nsectors*(hardsect_size/KERNEL_SECTOR_SIZE)); add_disk(dev->gd); return; } static int __init sbull_init(void) { int i; /* * Get registered. */ sbull_major = register_blkdev(sbull_major, "sbull"); /* * Allocate the device array, and initialize each one. */ Devices = (struct sbull_dev *)kmalloc(ndevices*sizeof (struct sbull_dev), GFP_KERNEL); for (i = 0; i < ndevices; i++) setup_device(Devices + i, i); return 0; }

内容版权声明:除非注明,否则皆为本站原创文章。

转载注明出处:https://www.heiqu.com/968b4814f3cd35fb80c8ee80956c07d2.html