sbull 가상 디스크 드라이브 작성
sbull(Simple Block Utility for Loading Localities) 드라이버는 시스템 메모리를 사용하는 블록 장치를 구현하며 본질적으로 RAM 디스크 드라이버에 속합니다.문자 장치의 IO 작업은 직접적으로 돌아가지 않으며 블록 장치의 IO 작업은 짝짓기와 통합됩니다.구동 작업은 요청을 처리하는 것입니다. 이 요청에 대한 줄 서기와 통합 작업은 IO 스케줄링 알고리즘으로 처리됩니다.그래서 블록 설비의 핵심 업무는 요청 처리 함수나 제조 요청이다.Block_devices_operations 구조체에는 읽기와 쓰기 같은 구성원 함수가 없고 열기, 방출, 입출력 제어 등 함수만 포함됩니다.
블록 장치 프로세스:
(1) 먼저 모듈 모형을 구축한다
MODULE_LICENSE("Dual BSD/GPL");
static struct block_device_operations sbull_ops = {
.owner = THIS_MODULE,
.open = sbull_open,
.release = sbull_release,
.media_changed = sbull_media_changed,
.revalidate_disk = sbull_revalidate,
.ioctl = sbull_ioctl,
.getgeo = sbull_getgeo,
};
module_init(sbull_init);
module_exit(sbull_exit);
(2) 메모리로 가상한 블록 장치 sbull 정의dev
struct sbull_dev {
int size; /* Device size in sectors */
u8 *data; /* The data array */
short users; /* How many users */
short media_change; /* Flag a media change? */
spinlock_t lock; /* For mutual exclusion */
struct request_queue *queue; /* The device request queue */
struct gendisk *gd; /* The gendisk structure */
struct timer_list timer; /* For simulated media changes */
};
이 설비 구조체는 우리 업무의 핵심이다. 어떤 구성원이 필요한지 모르든지 상관없다. 그 말을 작성할 때 설비가 그 성질과 기능을 표현해야 하기 때문에 그에 상응하는 첨가를 하면 OK이다.
(3) 장치 초기화
sbull_init( ){
1,sbullmajor = register_blkdev(sbull_major, "sbull");
2. Static struct sbulldev *Devices = kmalloc(ndevices*sizeof (struct sbull_dev), GFP_KERNEL);//여기서 Kmalloc의 결과를 판단해서 더 처리할 수 있습니다.
셋째, setupdevice () 는 장치 등록 함수로 이동합니다
}//초기화 함수 끝
(4) 우리의 이 설비를 등록하기 시작한다
setup_device () {//장치를 설치하기 전에 초기화되었는지 확인하십시오
이 구성원들은 모두 초기화를 해야 한다.
1. int size; /* Device size in sectors */
u8 *data; /* The data array */
short users; /* How many users */
spinlock_t lock; /* For mutual exclusion */
struct request_queue *queue; /* The device request queue */
struct gendisk *gd; /* The gendisk structure */
2.adddisk(dev->gd);//이 중요한 등록 함수}//함수 끝
(5) 섹터 크기, 데이터 그룹,user, 잠금 lock의 초기화:
1. dev->size = nsectors*hardsectsize;
dev->data = vmalloc(dev->size);
spin_lock_init(&dev->lock);
2.//dev->queue = blkalloc_queue(GFP_KERNEL); //RM_NOQUEUE
//dev->queue = blk_init_queue(sbull_full_request, &dev->lock); //RM_FULL
dev->queue = blk_init_queue(sbull_request, &dev->lock); //RM_SIMPLE
(6) 내장 하드웨어 섹터 크기 및 timer 아날로그 초기화 알림
blk_queue_logical_block_size(dev->queue, hardsect_size);
init_timer(&dev->timer);
dev->timer.data = (unsigned long) dev;
dev->timer.function = sbull_invalidate;gendisk 초기화
1. gendisk 초기화:
dev->gd = alloc_disk(SBULL_MINORS);
2. gendisk 구성원 초기화
dev->gd->major = sbull_major;
dev->gd->first_minor = which*SBULL_MINORS;
dev->gd->fops = &sbull_ops;
dev->gd->queue = dev->queue;
dev->gd->private_data = dev;
(7)gendisk 용량을 xxx 로 설정크기 섹터 크기
set_capacity(dev->gd , nsectors*(hardsect_size / KERNEL_SECTOR_SIZE));
(8) 나머지는 devicesoperation Fabric에 명시된 인터페이스는 다음과 같습니다.
.open = sbull_open,
.release = sbull_release,
.media_changed = sbull_media_changed,
.revalidate_disk = sbull_revalidate,
.ioctl = sbull_ioctl,
.getgeo = sbull_getgeo,
1,sbullopen(struct block_device *bdev,fmode_t mode )
2. sbullrelease(struct gendisk *bd_disk, fmode_t mode)
3, sbullmedia_changed(struct gendisk *gd)
4.sbullrevalidate(struct gendisk *gd)
5.sbullinvalidate(unsigned long ldev)
6, sbullioctl (struct block_device *bdev, fmode_t mode,
unsigned int cmd, unsigned long arg)
7.sbullgetgeo(struct block_device *bdev, struct hd_geometry *geo)
다음은 코드입니다.
/*
* Sample disk driver for 2.6.35.
*/
//#include <linux/autoconf.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/kernel.h> /* printk() */
#include <linux/slab.h> /* kmalloc() */
#include <linux/fs.h> /* everything... */
#include <linux/errno.h> /* error codes */
#include <linux/timer.h>
#include <linux/types.h> /* size_t */
#include <linux/fcntl.h> /* O_ACCMODE */
#include <linux/hdreg.h> /* HDIO_GETGEO */
#include <linux/kdev_t.h>
#include <linux/vmalloc.h>
#include <linux/genhd.h>
#include <linux/blkdev.h>
#include <linux/buffer_head.h> /* invalidate_bdev */
#include <linux/bio.h>
MODULE_LICENSE("Dual BSD/GPL");
static int sbull_major = 0;
module_param(sbull_major, int, 0);
static int hardsect_size = 512;
module_param(hardsect_size, int, 0);
static int nsectors = 25600; /* How big the drive is */
module_param(nsectors, int, 0);
static int ndevices = 1;
module_param(ndevices, int, 0);
/*
* The different "request modes" we can use.
*/
enum {
RM_SIMPLE = 0, /* The extra-simple request function */
RM_FULL = 1, /* The full-blown version */
RM_NOQUEUE = 2, /* Use make_request */
};
//static int request_mode = RM_FULL;
//static int request_mode = RM_SIMPLE;
static int request_mode = RM_NOQUEUE;
module_param(request_mode, int, 0);
/*
* Minor number and partition management.
*/
#define SBULL_MINORS 16
#define MINOR_SHIFT 4
#define DEVNUM(kdevnum) (MINOR(kdev_t_to_nr(kdevnum)) >> MINOR_SHIFT
/*
* We can tweak our hardware sector size, but the kernel talks to us
* in terms of small sectors, always.
*/
#define KERNEL_SECTOR_SIZE 512
/*
* After this much idle time, the driver will simulate a media change.
*/
#define INVALIDATE_DELAY 60*HZ
/*
* The internal representation of our device.
*/
struct sbull_dev {
int size; /* Device size in sectors */
u8 *data; /* The data array */
short users; /* How many users */
short media_change; /* Flag a media change? */
spinlock_t lock; /* For mutual exclusion */
struct request_queue *queue; /* The device request queue */
struct gendisk *gd; /* The gendisk structure */
struct timer_list timer; /* For simulated media changes */
};
static struct sbull_dev *Devices = NULL;
/*
* Handle an I/O request.
*/
static void sbull_transfer(struct sbull_dev *dev, unsigned long sector,
unsigned long nsect, char *buffer, int write)
{
unsigned long offset = sector*KERNEL_SECTOR_SIZE;
unsigned long nbytes = nsect*KERNEL_SECTOR_SIZE;
//printk("<0>""in %s offset=%d nbytes=%d write=%d
",__FUNCTION__,offset,nbytes,write);
//buffer[10]='\0';
//printk(buffer);
//printk("
");
if ((offset + nbytes) > dev->size) {
printk (KERN_NOTICE "Beyond-end write (%ld %ld)
", offset, nbytes);
return;
}
if (write)
memcpy(dev->data + offset, buffer, nbytes);
else
memcpy(buffer, dev->data + offset, nbytes);
}
/*The simple form of the request function.*/
static void sbull_request(struct request_queue *q)
{
struct request *req;
req = blk_fetch_request(q);
while (req != NULL) {
struct sbull_dev *dev = req->rq_disk->private_data;
if (! blk_fs_request(req)) {
printk (KERN_NOTICE "Skip non-fs request
");
__blk_end_request_all(req, -EIO);
continue;
}
// printk (KERN_NOTICE "Req dev %d dir %ld sec %ld, nr %d f %lx
",
// dev - Devices, rq_data_dir(req),
// req->sector, req->current_nr_sectors,
// req->flags);
// printk("sectors=%d
",req->current_nr_sectors);
sbull_transfer(dev, blk_rq_pos(req), blk_rq_cur_sectors(req),
req->buffer, rq_data_dir(req));
if ( ! __blk_end_request_cur(req, 0) ) {
req = NULL;
}
}
}
/*
* Transfer a single BIO.
*/
static int sbull_xfer_bio(struct sbull_dev *dev, struct bio *bio)
{
int i;
struct bio_vec *bvec;
sector_t sector = bio->bi_sector;
/* Do each segment independently. */
bio_for_each_segment(bvec, bio, i) {
char *buffer = __bio_kmap_atomic(bio, i, KM_USER0);
sbull_transfer(dev, sector, bio_cur_bytes(bio)>>9 ,
buffer, bio_data_dir(bio) == WRITE);
sector += bio_cur_bytes(bio)>>9;
__bio_kunmap_atomic(bio, KM_USER0);
}
return 0; /* Always "succeed" */
}
/*
* Transfer a full request.
*/
static int sbull_xfer_request(struct sbull_dev *dev, struct request *req)
{
struct bio *bio;
int nsect = 0;
__rq_for_each_bio(bio, req) {
sbull_xfer_bio(dev, bio);
nsect += bio->bi_size/KERNEL_SECTOR_SIZE;
}
return nsect;
}
/*
* Smarter request function that "handles clustering".*/
static void sbull_full_request(struct request_queue *q)
{
struct request *req;
int nsect;
struct sbull_dev *dev ;
req = blk_fetch_request(q);
dev = req->rq_disk->private_data;
while (req != NULL) {
if (! blk_fs_request(req)) {
printk (KERN_NOTICE "Skip non-fs request
");
__blk_end_request_all(req, -EIO);
continue;
}
// printk (KERN_NOTICE "Req dev %d dir %ld sec %ld, nr %d f %lx
",
// dev - Devices, rq_data_dir(req),
// req->sector, req->current_nr_sectors,
// req->flags);
// printk("sectors=%d
",req->current_nr_sectors);
nsect = sbull_xfer_request(dev, req);
__blk_end_request(req, 0, (nsect<<9));
// req = blk_fetch_request(q);
req = NULL;
}
}
//The direct make request version
static int sbull_make_request(struct request_queue *q, struct bio *bio)
{
struct sbull_dev *dev = q->queuedata;
int status;
status = sbull_xfer_bio(dev, bio);
//bio_endio(bio, bio->bi_size, status);
bio_endio(bio, status);
return 0;
}
/*
* Open and close.
*/
static int sbull_open(struct block_device *bdev,fmode_t mode )
{
struct sbull_dev *dev = bdev->bd_disk->private_data;
//printk("<0>" "fdfjdlksjfdlkj
");
del_timer_sync(&dev->timer);
spin_lock(&dev->lock);
if (! dev->users)
check_disk_change(bdev);
dev->users++;
spin_unlock(&dev->lock);
return 0;
}
static int sbull_release(struct gendisk *bd_disk, fmode_t mode)
{
struct sbull_dev *dev = bd_disk->private_data;
spin_lock(&dev->lock);
dev->users--;
if (!dev->users) {
dev->timer.expires = jiffies + INVALIDATE_DELAY;
add_timer(&dev->timer);
}
spin_unlock(&dev->lock);
return 0;
}
/*
* Look for a (simulated) media change.
*/
int sbull_media_changed(struct gendisk *gd)
{
struct sbull_dev *dev = gd->private_data;
return dev->media_change;
}
/*
* Revalidate. WE DO NOT TAKE THE LOCK HERE, for fear of deadlocking
* with open. That needs to be reevaluated.
*/
int sbull_revalidate(struct gendisk *gd)
{
struct sbull_dev *dev = gd->private_data;
if (dev->media_change) {
dev->media_change = 0;
memset (dev->data, 0, dev->size);
}
return 0;
}
/*
* The "invalidate" function runs out of the device timer; it sets
* a flag to simulate the removal of the media.
*/
void sbull_invalidate(unsigned long ldev)
{
struct sbull_dev *dev = (struct sbull_dev *) ldev;
spin_lock(&dev->lock);
if (dev->users || !dev->data)
printk (KERN_WARNING "sbull: timer sanity check failed
");
else
dev->media_change = 1;
spin_unlock(&dev->lock);
}
/*
* The ioctl() implementation
*/
int sbull_ioctl (struct block_device *bdev, fmode_t mode,
unsigned int cmd, unsigned long arg)
{
return 0;
}
static int sbull_getgeo(struct block_device *bdev, struct hd_geometry *geo)
{
unsigned long size;
struct sbull_dev *pdev = bdev->bd_disk->private_data;
size = pdev->size;
geo->cylinders = (size & ~0x3f) >> 6;
geo->heads = 4;
geo->sectors = 16;
geo->start = 0;
return 0;
}
/*
* The device operations structure.
*/
static struct block_device_operations sbull_ops = {
.owner = THIS_MODULE,
.open = sbull_open,
.release = sbull_release,
.media_changed = sbull_media_changed,
.revalidate_disk = sbull_revalidate,
.ioctl = sbull_ioctl,
.getgeo = sbull_getgeo,
};
/*
* Set up our internal device.
*/
static void setup_device(struct sbull_dev *dev, int which)
{
/*
* Get some memory.
*/
memset (dev, 0, sizeof (struct sbull_dev));
dev->size = nsectors*hardsect_size;
dev->data = vmalloc(dev->size);
if (dev->data == NULL) {
printk (KERN_NOTICE "vmalloc failure.
");
return;
}
spin_lock_init(&dev->lock);
/*
* The timer which "invalidates" the device.
*/
init_timer(&dev->timer);
dev->timer.data = (unsigned long) dev;
dev->timer.function = sbull_invalidate;
/*
* The I/O queue, depending on whether we are using our own
* make_request function or not.
*/
switch (request_mode) {
case RM_NOQUEUE:
dev->queue = blk_alloc_queue(GFP_KERNEL);
if (dev->queue == NULL)
goto out_vfree;
blk_queue_make_request(dev->queue, sbull_make_request);
break;
case RM_FULL:
dev->queue = blk_init_queue(sbull_full_request, &dev->lock);
if (dev->queue == NULL)
goto out_vfree;
break;
default:
printk(KERN_NOTICE "Bad request mode %d, using simple
", request_mode);
/* fall into.. */
case RM_SIMPLE:
dev->queue = blk_init_queue(sbull_request, &dev->lock);
if (dev->queue == NULL)
goto out_vfree;
break;
}
// blk_queue_hardsect_size(dev->queue, hardsect_size);
blk_queue_logical_block_size(dev->queue, hardsect_size);
dev->queue->queuedata = dev;
/*
* And the gendisk structure.
*/
dev->gd = alloc_disk(SBULL_MINORS);
if (! dev->gd) {
printk (KERN_NOTICE "alloc_disk failure
");
goto out_vfree;
}
dev->gd->major = sbull_major;
dev->gd->first_minor = which*SBULL_MINORS;
dev->gd->fops = &sbull_ops;
dev->gd->queue = dev->queue;
dev->gd->private_data = dev;
snprintf (dev->gd->disk_name, 32, "sbull%c", which + 'a');
set_capacity(dev->gd, nsectors*(hardsect_size/KERNEL_SECTOR_SIZE));
add_disk(dev->gd);
return;
out_vfree:
if (dev->data)
vfree(dev->data);
}
static int __init sbull_init(void)
{
int i;
/*
* Get registered.
*/
// printk("<0>" "add by lht
");
sbull_major = register_blkdev(sbull_major, "sbull");
if (sbull_major <= 0) {
printk(KERN_WARNING "sbull: unable to get major number
");
return -EBUSY;
}
/*
* Allocate the device array, and initialize each one.
*/
Devices = kmalloc(ndevices*sizeof (struct sbull_dev), GFP_KERNEL);
if (Devices == NULL)
goto out_unregister;
for (i = 0; i < ndevices; i++)
setup_device(Devices + i, i);
return 0;
out_unregister:
unregister_blkdev(sbull_major, "sbd");
return -ENOMEM;
}
static void sbull_exit(void)
{
int i;
for (i = 0; i < ndevices; i++) {
struct sbull_dev *dev = Devices + i;
del_timer_sync(&dev->timer);
if (dev->gd) {
del_gendisk(dev->gd);
put_disk(dev->gd);
}
if (dev->queue) {
if (request_mode == RM_NOQUEUE)
// blk_put_queue(dev->queue);
kobject_put(&(dev->queue)->kobj);
else
blk_cleanup_queue(dev->queue);
}
if (dev->data)
vfree(dev->data);
}
unregister_blkdev(sbull_major, "sbull");
kfree(Devices);
}
module_init(sbull_init);
module_exit(sbull_exit);
이 내용에 흥미가 있습니까?
현재 기사가 여러분의 문제를 해결하지 못하는 경우 AI 엔진은 머신러닝 분석(스마트 모델이 방금 만들어져 부정확한 경우가 있을 수 있음)을 통해 가장 유사한 기사를 추천합니다:
Java 타이머 요약현재 시간을 기준으로 지정된 밀리초를 지연한 후 지정된 시간 간격으로 TimerTask 작업을 무한대로 수행합니다.(fixed-delay execution) 현재 시간을 기준으로 지정된 밀리초를 지연한 후 지정된 시...
텍스트를 자유롭게 공유하거나 복사할 수 있습니다.하지만 이 문서의 URL은 참조 URL로 남겨 두십시오.
CC BY-SA 2.5, CC BY-SA 3.0 및 CC BY-SA 4.0에 따라 라이센스가 부여됩니다.