1
0
mirror of https://github.com/libuv/libuv synced 2025-03-28 21:13:16 +00:00

compiles - obviously doesn't work

This commit is contained in:
Ryan Dahl 2011-03-28 03:54:18 -07:00
parent 668c0c6b27
commit b17dcd55d7
5 changed files with 63 additions and 25 deletions

View File

@ -2,7 +2,7 @@ test/echo-server: test/echo-server.c ol.a
$(CC) -o test/echo-server test/echo-server.c ol.a -lm
ol.a: ol-unix.o ev/ev.o
ar rcs ol.a ol-unix.o ev/ev.o
$(AR) rcs ol.a ol-unix.o ev/ev.o
ol-unix.o: ol-unix.c ol.h ol-unix.h
$(CC) -c ol-unix.c -o ol-unix.o -lm
@ -17,9 +17,9 @@ ev/config.h:
.PHONY: clean distclean
clean:
rm *.o *.a
$(RM) -f *.o *.a
$(MAKE) -C ev clean
distclean:
rm *.o *.a
$(RM) -f *.o *.a
$(MAKE) -C ev clean

View File

@ -52,11 +52,9 @@ high-concurrency servers. Instead a system called <a
href="http://msdn.microsoft.com/en-us/library/ms686358(v=vs.85).aspx">overlapped
I/O</a> is used. <a
href="http://msdn.microsoft.com/en-us/library/aa365198(VS.85).aspx">I/O
completion ports</a> (IOCP) are the objects used to poll overlapped I/O
completion ports</a> (IOCPs) are the objects used to poll overlapped I/O
for completion.
<p>
IOCP are similar to the Unix I/O multiplexers
IOCPs are similar to the Unix I/O multiplexers
<ul>
<li> <a
href="http://en.wikipedia.org/wiki/Kqueue">kqueue</a> on Macintosh and
@ -66,25 +64,25 @@ BSDs,
<li> <a
href="http://developers.sun.com/solaris/articles/event_completion.html">event
completion ports</a> on Solaris,
<li> <a href="">poll</a> on modern Unixes, or
<li> <a href="">poll</a> on modern Unixes, and
<li> <a
href="http://www.kernel.org/doc/man-pages/online/pages/man2/select.2.html"><code>select()</code></a>,
which is available everywhere but is inefficient.
which is available everywhere.
</ul>
The fundamental variation is that in Unixes you generally ask the kernel to
The fundamental variation is that in a Unix you generally ask the kernel to
wait for state change in a file descriptor's readability or writablity. With
overlapped I/O and IOCP the programmers waits for asynchronous function
overlapped I/O and IOCPs the programmers waits for asynchronous function
calls to complete.
For example, instead of waiting for a socket to become writable and then
using <a
href="http://www.kernel.org/doc/man-pages/online/pages/man2/send.2.html"><code>send(2)</code></a>
on it, as you commonly do in Unix operating systems, with overlapped I/O you
on it, as you commonly would do in a Unix, with overlapped I/O you
would rather <a
href="http://msdn.microsoft.com/en-us/library/ms742203(v=vs.85).aspx"><code>WSASend()</code></a>
the data and then wait for it to have been sent.
<p> Unix non-blocking I/O is not beautiful. A major feature of Unix is the
unified treatment of all things as files (or more precisely as file
<p> Unix non-blocking I/O is not beautiful. A principle abstraction in Unix
is the unified treatment of many things as files (or more precisely as file
descriptors). <code>write(2)</code>, <code>read(2)</code>, and
<code>close(2)</code> work with TCP sockets just as they do on regular
files. Well&mdash;kind of. Synchronous operations work similarly on different
@ -121,7 +119,7 @@ file descriptors with a I/O multiplexer&mdash;not POSIX AIO.
Common practice for accessing the disk asynchronously is still done using custom
userland thread pools&mdash;not POSIX AIO.
<p>Windows IOCP does support both sockets and regular file I/O which
<p>Windows IOCPs does support both sockets and regular file I/O which
greatly simplifies the handling of disks. Although the function names are
not exactly the same in Windows for sockets and regular files, the
they act similar.
@ -279,7 +277,7 @@ layer but in the author's opinion, none are completely satisfactory.
<code>read(2)</code> calls do not guarantee that they won't block.
Therefore libeio is provided for calling various disk-related
syscalls in a managed thread pool. Unfortunately the abstraction layer
which libev targets is not appropriate for IOCP&mdash;libev works strictly
which libev targets is not appropriate for IOCPs&mdash;libev works strictly
with file descriptors and does not the concept of a <i>socket</i>.
Furthermore users on Unix will be using libeio for file I/O which is not
ideal for porting to Windows. On windows libev currently uses
@ -295,14 +293,14 @@ layer but in the author's opinion, none are completely satisfactory.
libevent and rejected it</a>&mdash;it's interesting to read his reasons
why. <a
href="http://google-opensource.blogspot.com/2010/01/libevent-20x-like-libevent-14x-only.html">A
major rewrite</a> was done for version 2 to support Windows IOCP but <a
major rewrite</a> was done for version 2 to support Windows IOCPs but <a
href="http://www.mail-archive.com/libevent-users@monkey.org/msg01730.html">anecdotal
evidence</a> suggests that it is still not working correctly.
<p><b><a
href="http://www.boost.org/doc/libs/1_43_0/doc/html/boost_asio.html">Boost
ASIO</a>.</b> It basically does what you want on Windows and Unix for
sockets. That is, epoll on Linux, kqueue on Macintosh, IOCP on Windows.
sockets. That is, epoll on Linux, kqueue on Macintosh, IOCPs on Windows.
It does not support file I/O. In the author's opinion is it too large
for a not extremely difficult problem (~300 files, ~12000 semicolons).

View File

@ -3,17 +3,16 @@
#include <stdlib.h>
#include <errno.h>
#include <assert.h>
#include <string.h> /* strnlen */
void ol_tcp_io(EV_P_ ev_io* watcher, int revents);
void ol_tcp_connect(ol_handle* handle, ol_req* req);
int ol_close_error(ol_handle* handle, ol_err err);
static int ol_err_new(int e) {
if (e == 0) {
return e;
} else {
}
return e;
}
@ -28,6 +27,16 @@ struct sockaddr_in ol_ip4_addr(char *ip, int port) {
}
int ol_close(ol_handle* handle) {
return ol_close_error(handle, 0);
}
int ol_run() {
ev_run(0);
}
ol_handle* ol_handle_new(ol_close_cb close_cb, void* data) {
ol_handle *handle = calloc(sizeof(ol_handle), 1);
handle->close_cb = close_cb;
@ -65,15 +74,16 @@ int ol_listen(ol_handle* handle, int backlog, ol_accept_cb cb) {
}
void ol_close_error(ol_handle* handle, ol_err err) {
int ol_close_error(ol_handle* handle, ol_err err) {
ev_io_stop(&handle->_.read_watcher);
close(handle->_.fd);
handle->_.fd = -1;
if (handle->close_cb) {
handle->close_cb(handle, err);
}
return err;
}
@ -197,3 +207,31 @@ int ol_connect(ol_handle* handle, ol_req *req_in, struct sockaddr* addr) {
return ol_err_new(r);
}
int ol_write(ol_handle* handle, ol_req *req, ol_buf* bufs, int bufcnt) {
// stub
assert(0);
return 0;
}
int ol_write2(ol_handle* handle, const char* msg) {
size_t len = strnlen(msg, 1024 * 1024);
ol_buf b;
b.base = (char*)msg;
b.len = len;
return ol_write(handle, NULL, &b, 1);
}
int ol_read(ol_handle* handle, ol_req *req, ol_buf* bufs, int bufcnt) {
// stub
assert(0);
return 0;
}
void ol_free(ol_handle* handle) {
free(handle);
// lists?
return;
}

3
ol.h
View File

@ -85,6 +85,7 @@ int ol_listen(ol_handle* handle, int backlog, ol_accept_cb cb);
int ol_connect(ol_handle* handle, ol_req *req, struct sockaddr* addr);
int ol_read(ol_handle* handle, ol_req *req, ol_buf* bufs, int bufcnt);
int ol_write(ol_handle* handle, ol_req *req, ol_buf* bufs, int bufcnt);
int ol_write2(ol_handle* handle, const char* msg);
int ol_shutdown(ol_handle* handle, ol_req *req);
// Request handle to be closed. close_cb will be made
@ -93,7 +94,7 @@ int ol_close(ol_handle* handle);
// Must be called for all handles after close_cb. Handles that arrive
// via the accept_cb must use ol_free().
int ol_free(ol_handle* handle);
void ol_free(ol_handle* handle);

View File

@ -1,6 +1,7 @@
#include "../ol.h"
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#define BUFSIZE 1024