瀏覽代碼

chardev: use per-dev context for io_add_watch_poll

It was only passed in by chr_update_read_handlers().  However when
reconnect, we'll lose that context information.  So if a chardev was
running on another context (rather than the default context, the NULL
pointer), it'll switch back to the default context if reconnection
happens.  But, it should really stick to the old context.

Convert all the callers of io_add_watch_poll() to use the internally
cached gcontext.  Then the context should be able to survive even after
reconnections.

Signed-off-by: Peter Xu <peterx@redhat.com>
Message-Id: <1505975754-21555-4-git-send-email-peterx@redhat.com>
Reviewed-by: Marc-André Lureau <marcandre.lureau@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Peter Xu 8 年之前
父節點
當前提交
6bbb6c0644
共有 4 個文件被更改,包括 5 次插入5 次删除
  1. 1 1
      chardev/char-fd.c
  2. 1 1
      chardev/char-pty.c
  3. 2 2
      chardev/char-socket.c
  4. 1 1
      chardev/char-udp.c

+ 1 - 1
chardev/char-fd.c

@@ -94,7 +94,7 @@ static void fd_chr_update_read_handler(Chardev *chr,
         chr->gsource = io_add_watch_poll(chr, s->ioc_in,
         chr->gsource = io_add_watch_poll(chr, s->ioc_in,
                                            fd_chr_read_poll,
                                            fd_chr_read_poll,
                                            fd_chr_read, chr,
                                            fd_chr_read, chr,
-                                           context);
+                                           chr->gcontext);
     }
     }
 }
 }
 
 

+ 1 - 1
chardev/char-pty.c

@@ -219,7 +219,7 @@ static void pty_chr_state(Chardev *chr, int connected)
             chr->gsource = io_add_watch_poll(chr, s->ioc,
             chr->gsource = io_add_watch_poll(chr, s->ioc,
                                                pty_chr_read_poll,
                                                pty_chr_read_poll,
                                                pty_chr_read,
                                                pty_chr_read,
-                                               chr, NULL);
+                                               chr, chr->gcontext);
         }
         }
     }
     }
 }
 }

+ 2 - 2
chardev/char-socket.c

@@ -516,7 +516,7 @@ static void tcp_chr_connect(void *opaque)
         chr->gsource = io_add_watch_poll(chr, s->ioc,
         chr->gsource = io_add_watch_poll(chr, s->ioc,
                                            tcp_chr_read_poll,
                                            tcp_chr_read_poll,
                                            tcp_chr_read,
                                            tcp_chr_read,
-                                           chr, NULL);
+                                           chr, chr->gcontext);
     }
     }
     qemu_chr_be_event(chr, CHR_EVENT_OPENED);
     qemu_chr_be_event(chr, CHR_EVENT_OPENED);
 }
 }
@@ -535,7 +535,7 @@ static void tcp_chr_update_read_handler(Chardev *chr,
         chr->gsource = io_add_watch_poll(chr, s->ioc,
         chr->gsource = io_add_watch_poll(chr, s->ioc,
                                            tcp_chr_read_poll,
                                            tcp_chr_read_poll,
                                            tcp_chr_read, chr,
                                            tcp_chr_read, chr,
-                                           context);
+                                           chr->gcontext);
     }
     }
 }
 }
 
 

+ 1 - 1
chardev/char-udp.c

@@ -110,7 +110,7 @@ static void udp_chr_update_read_handler(Chardev *chr,
         chr->gsource = io_add_watch_poll(chr, s->ioc,
         chr->gsource = io_add_watch_poll(chr, s->ioc,
                                            udp_chr_read_poll,
                                            udp_chr_read_poll,
                                            udp_chr_read, chr,
                                            udp_chr_read, chr,
-                                           context);
+                                           chr->gcontext);
     }
     }
 }
 }