Binder驱动(一)

    xiaoxiao2021-03-25  144

    前言: 我们这一节将介绍binder内核的核心方法,大概各自都干些什么事情。

    用户态和驱动态函数方法规则: 用户态 驱动态 open() -> binder_open() mmap() -> binder_mmap() ioctl() -> binder_ioctl()

    1binder_init()

    binder_init()函数由device_initcall(binder_init);调用 其中device_initcall(binder_init)函数是binder函数的入口,这个函数在系统启动时候加载 然后调用binder_init()函数

    static int __init binder_init(void) { int ret; //创建名字为binder的工作队列 binder_deferred_workqueue = create_singlethread_workqueue("binder"); if (!binder_deferred_workqueue) return -ENOMEM; ... ret = misc_register(&binder_miscdev);//[1.1] ... } return ret; }

    1.1misc_register()

    然后观察此函数所在文件名称发现也是一个驱动程序,所以也需要入口

    int misc_register(struct miscdevice * misc) { struct miscdevice *c; dev_t dev; int err = 0; INIT_LIST_HEAD(&misc->list); mutex_lock(&misc_mtx); list_for_each_entry(c, &misc_list, list) { if (c->minor == misc->minor) { mutex_unlock(&misc_mtx); return -EBUSY; } } if (misc->minor == MISC_DYNAMIC_MINOR) { int i = find_first_zero_bit(misc_minors, DYNAMIC_MINORS); if (i >= DYNAMIC_MINORS) { mutex_unlock(&misc_mtx); return -EBUSY; } misc->minor = DYNAMIC_MINORS - i - 1; set_bit(i, misc_minors); } dev = MKDEV(MISC_MAJOR, misc->minor); misc->this_device = device_create(misc_class, misc->parent, dev, misc, "%s", misc->name); if (IS_ERR(misc->this_device)) { int i = DYNAMIC_MINORS - misc->minor - 1; if (i < DYNAMIC_MINORS && i >= 0) clear_bit(i, misc_minors); err = PTR_ERR(misc->this_device); goto out; } list_add(&misc->list, &misc_list); out: mutex_unlock(&misc_mtx); return err; }

    1.1.1misc_init()

    static int __init misc_init(void) { int err; #ifdef CONFIG_PROC_FS proc_create("misc", 0, NULL, &misc_proc_fops); #endif misc_class = class_create(THIS_MODULE, "misc"); err = PTR_ERR(misc_class); if (IS_ERR(misc_class)) goto fail_remove; err = -EIO; if (register_chrdev(MISC_MAJOR,"misc",&misc_fops))//提供一个file_operations结构体 goto fail_printk; misc_class->devnode = misc_devnode; return 0; ... } static const struct file_operations misc_fops = { .owner = THIS_MODULE, .open = misc_open, .llseek = noop_llseek, };

    小结上面:

    1.Misc设备驱动 - file_operation结构体 open=misc_open - register_chrdev注册这个当前结构体 - 入口函数 2.binder驱动 - struct miscdevice binder_miscdev结构体构造了一个miscdevice fops = &binder_fops这里面包含open,ioctl,mmap操作 - misc_register进行注册 - 入口函数

    最核心的还是file_operation结构体 那么用户态比如现在调用open函数,下一步执行misc_open->从链表中找到misc_device取出它的file_operations然后调用这个结构体内定义的open函数

    2binder_open()

    打开设备节点,创建binder_proc进程结构体

    static int binder_open(struct inode *nodp, struct file *filp) { struct binder_proc *proc;//[2.1]binder进程 proc = kzalloc(sizeof(*proc), GFP_KERNEL);//给进程分配空间 get_task_struct(current); proc->tsk = current; INIT_LIST_HEAD(&proc->todo);//初始化todo链表 init_waitqueue_head(&proc->wait);//初始化wait队列 proc->default_priority = task_nice(current);//优先级相关 binder_lock(__func__); binder_stats_created(BINDER_STAT_PROC); hlist_add_head(&proc->proc_node, &binder_procs); proc->pid = current->group_leader->pid; INIT_LIST_HEAD(&proc->delivered_death); filp->private_data = proc; //file文件指针的private_data变量指向binder_proc数据 binder_unlock(__func__); return 0; }

    2.1 struct binder_proc

    在所有的open函数中都会创建一个binder_proc结构体来表示进程

    struct binder_proc { struct hlist_node proc_node; struct rb_root threads; struct rb_root nodes;//存属于当前进程的所有服务 struct rb_root refs_by_desc;//存binder_ref节点,引用"xx"服务 struct rb_root refs_by_node;//存binder_ref int pid; struct vm_area_struct *vma; struct mm_struct *vma_vm_mm; struct task_struct *tsk; struct files_struct *files; struct hlist_node deferred_work_node; int deferred_work; void *buffer; ptrdiff_t user_buffer_offset; struct list_head buffers; struct rb_root free_buffers; struct rb_root allocated_buffers; size_t free_async_space; struct page **pages; size_t buffer_size; uint32_t buffer_free; struct list_head todo; wait_queue_head_t wait; struct binder_stats stats; struct list_head delivered_death; int max_threads; int requested_threads; int requested_threads_started; int ready_threads; long default_priority; struct dentry *debugfs_entry; };

    3binder_mmap()

    作用是在内存中分配内存,并且交给proc中 - proc->buffer = area->addr - proc->user_buffer_offset 进行管理

    static int binder_mmap(struct file *filp, struct vm_area_struct *vma) { int ret; struct vm_struct *area;//内核虚拟空间 struct binder_proc *proc = filp->private_data;//在binder_open时候赋值的进程 const char *failure_string; struct binder_buffer *buffer; if ((vma->vm_end - vma->vm_start) > SZ_4M) vma->vm_end = vma->vm_start + SZ_4M;//虚拟内存不能超过4M ... mutex_lock(&binder_mmap_lock); area = get_vm_area(vma->vm_end - vma->vm_start, VM_IOREMAP);//分配内存 proc->buffer = area->addr;//将内核的空间首地址分配proc->buffer proc->user_buffer_offset = vma->vm_start - (uintptr_t)proc->buffer; mutex_unlock(&binder_mmap_lock); ... proc->pages = kzalloc(sizeof(proc->pages[0]) * ((vma->vm_end - vma->vm_start) / PAGE_SIZE), GFP_KERNEL); ... return ret; }

    4binder_ioctl()

    binder_ioctl的作用是将跨进程ipc操作

    ioctl(文件描述符,ioctl命令,数据类型) - 文件描述符:binder_open打开文件句柄 - ioctl命令最主要的是BINDER_WRITE_READ命令,也就是binder_ioctl中switch的分支:

    - #define BINDER_WRITE_READ _IOWR('b', 1, struct binder_write_read) - #define BINDER_SET_MAX_THREADS _IOW('b', 5, size_t) - #define BINDER_SET_CONTEXT_MGR _IOW('b', 7, int) - #define BINDER_THREAD_EXIT _IOW('b', 8, int) - #define BINDER_VERSION _IOWR('b', 9, struct binder_version)

    整个流程:

    copy_from_user(&bwr, ubuf, sizeof(bwr))//从用户空间获取数据拷贝到bwr中 if(bwr.write_size > 0){ //读数据 binder_thread_read(proc, thread, (void __user *)bwr.read_buffer, bwr.read_size, &bwr.read_consumed, filp->f_flags & O_NONBLOCK); }else if(bwr.read_size > 0){ //写数据 binder_thread_write(proc, thread, (void __user *)bwr.write_buffer, bwr.write_size, &bwr.write_consumed); }

    在binder_ioctl中会创建binder_proc而且创建或找到对应的thread

    static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { int ret; struct binder_proc *proc = filp->private_data; struct binder_thread *thread;//binder线程 unsigned int size = _IOC_SIZE(cmd); void __user *ubuf = (void __user *)arg; ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);//进行睡眠直到唤醒操作发生 binder_lock(__func__);//进行锁操作 thread = binder_get_thread(proc);//[4.1]从进程中得到线程 switch (cmd) { case BINDER_WRITE_READ: { struct binder_write_read bwr; if (size != sizeof(struct binder_write_read)) { ret = -EINVAL; goto err; } if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {//从用户空间复制binder_write_read头信息到本地bwr ret = -EFAULT; goto err; } if (bwr.write_size > 0) {//需要写入 ret = binder_thread_write(proc, thread, (void __user *)bwr.write_buffer, bwr.write_size, &bwr.write_consumed); ... } if (bwr.read_size > 0) {//需要读取 ret = binder_thread_read(proc, thread, (void __user *)bwr.read_buffer, bwr.read_size, &bwr.read_consumed, filp->f_flags & O_NONBLOCK); if (!list_empty(&proc->todo)) wake_up_interruptible(&proc->wait); ... } if (copy_to_user(ubuf, &bwr, sizeof(bwr))) { ret = -EFAULT; goto err; } break; } ... return ret; }

    4.1binder_get_thread()

    通过进程指针查找线程表,如果有当前线程则进行获取,如果没有当前线程将当前线程的资料整理之后放入进程中,并且返回。 static struct binder_thread *binder_get_thread(struct binder_proc *proc) { struct binder_thread *thread = NULL; struct rb_node *parent = NULL; struct rb_node **p = &proc->threads.rb_node;//拿到存取线程的红黑树 while (*p) {//在红黑树中查找当前线程 parent = *p; thread = rb_entry(parent, struct binder_thread, rb_node); if (current->pid < thread->pid) p = &(*p)->rb_left; else if (current->pid > thread->pid) p = &(*p)->rb_right; else break; } if (*p == NULL) {//当线程为null时候进行创建线程 thread = kzalloc(sizeof(*thread), GFP_KERNEL); binder_stats_created(BINDER_STAT_THREAD); thread->proc = proc; thread->pid = current->pid;//保存当前线程的pid init_waitqueue_head(&thread->wait); INIT_LIST_HEAD(&thread->todo); rb_link_node(&thread->rb_node, parent, p); rb_insert_color(&thread->rb_node, &proc->threads); thread->looper |= BINDER_LOOPER_STATE_NEED_RETURN; thread->return_error = BR_OK; thread->return_error2 = BR_OK; } return thread; }
    转载请注明原文地址: https://ju.6miu.com/read-3680.html

    最新回复(0)