2011-04-18 11:10:18 -07:00
|
|
|
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
|
|
|
|
*
|
|
|
|
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
|
|
* of this software and associated documentation files (the "Software"), to
|
|
|
|
* deal in the Software without restriction, including without limitation the
|
|
|
|
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
|
|
|
|
* sell copies of the Software, and to permit persons to whom the Software is
|
|
|
|
* furnished to do so, subject to the following conditions:
|
|
|
|
*
|
|
|
|
* The above copyright notice and this permission notice shall be included in
|
|
|
|
* all copies or substantial portions of the Software.
|
|
|
|
*
|
|
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
|
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
|
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
|
|
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
|
|
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
|
|
|
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
|
|
|
* IN THE SOFTWARE.
|
|
|
|
*/
|
|
|
|
|
2011-04-19 17:01:45 +02:00
|
|
|
/*
|
2011-04-19 04:26:32 +02:00
|
|
|
* TODO: Add explanation of why we want on_close to be called from fresh
|
2011-04-15 10:27:09 -07:00
|
|
|
* stack.
|
|
|
|
*/
|
2011-04-05 04:24:03 +02:00
|
|
|
|
2011-04-19 04:26:32 +02:00
|
|
|
#include "../oio.h"
|
|
|
|
#include "task.h"
|
|
|
|
|
|
|
|
|
2011-05-04 17:10:33 +02:00
|
|
|
const char MESSAGE[] = "Failure is for the weak. Everyone dies alone.";
|
|
|
|
|
|
|
|
oio_handle client;
|
|
|
|
oio_req connect_req, write_req, timeout_req, shutdown_req;
|
|
|
|
|
2011-04-05 04:24:03 +02:00
|
|
|
int nested = 0;
|
|
|
|
int close_cb_called = 0;
|
2011-05-04 17:10:33 +02:00
|
|
|
int connect_cb_called = 0;
|
|
|
|
int write_cb_called = 0;
|
|
|
|
int timeout_cb_called = 0;
|
|
|
|
int bytes_received = 0;
|
|
|
|
int shutdown_cb_called = 0;
|
2011-04-05 04:24:03 +02:00
|
|
|
|
|
|
|
|
2011-05-04 17:10:33 +02:00
|
|
|
void close_cb(oio_handle* handle, int status) {
|
2011-04-20 22:28:36 +02:00
|
|
|
ASSERT(status == 0);
|
2011-05-04 17:10:33 +02:00
|
|
|
ASSERT(nested == 0 && "close_cb must be called from a fresh stack");
|
|
|
|
|
2011-04-05 04:24:03 +02:00
|
|
|
close_cb_called++;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-05-04 17:10:33 +02:00
|
|
|
void shutdown_cb(oio_req* req, int status) {
|
|
|
|
ASSERT(status == 0);
|
|
|
|
ASSERT(nested == 0 && "shutdown_cb must be called from a fresh stack");
|
|
|
|
|
|
|
|
shutdown_cb_called++;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void read_cb(oio_handle* handle, int nread, oio_buf buf) {
|
|
|
|
ASSERT(nested == 0 && "read_cb must be called from a fresh stack");
|
|
|
|
|
|
|
|
if (nread == -1) {
|
|
|
|
ASSERT(oio_last_error().code == OIO_EOF);
|
|
|
|
|
|
|
|
nested++;
|
|
|
|
if (oio_close(handle)) {
|
|
|
|
FATAL("oio_close failed");
|
|
|
|
}
|
|
|
|
nested--;
|
|
|
|
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
bytes_received += nread;
|
|
|
|
free(buf.base);
|
|
|
|
|
|
|
|
/* We call shutdown here because when bytes_received == sizeof MESSAGE */
|
|
|
|
/* there will be no more data sent nor received, so here it would be */
|
|
|
|
/* possible for a backend to to call shutdown_cb immediately and *not* */
|
|
|
|
/* from a fresh stack. */
|
|
|
|
if (bytes_received == sizeof MESSAGE) {
|
|
|
|
nested++;
|
|
|
|
oio_req_init(&shutdown_req, handle, shutdown_cb);
|
|
|
|
if (oio_shutdown(&shutdown_req)) {
|
|
|
|
FATAL("oio_shutdown failed");
|
|
|
|
}
|
|
|
|
nested--;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void timeout_cb(oio_req* req, int64_t skew, int status) {
|
|
|
|
ASSERT(status == 0);
|
|
|
|
ASSERT(nested == 0 && "timeout_cb must be called from a fresh stack");
|
|
|
|
|
|
|
|
nested++;
|
|
|
|
if (oio_read_start(&client, read_cb)) {
|
|
|
|
FATAL("oio_read_start failed");
|
|
|
|
}
|
|
|
|
nested--;
|
|
|
|
|
|
|
|
timeout_cb_called++;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void write_cb(oio_req* req, int status) {
|
|
|
|
ASSERT(status == 0);
|
|
|
|
ASSERT(nested == 0 && "write_cb must be called from a fresh stack");
|
|
|
|
|
|
|
|
/* After the data has been sent, we're going to wait for a while, then */
|
|
|
|
/* start reading. This makes us certain that the message has been echoed */
|
|
|
|
/* back to our receive buffer when we start reading. This maximizes the */
|
|
|
|
/* tempation for the backend to use dirty stack for calling read_cb. */
|
|
|
|
nested++;
|
|
|
|
oio_req_init(&timeout_req, NULL, timeout_cb);
|
|
|
|
if (oio_timeout(&timeout_req, 500)) {
|
|
|
|
FATAL("oio_timeout failed");
|
|
|
|
}
|
|
|
|
nested--;
|
|
|
|
|
|
|
|
write_cb_called++;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void connect_cb(oio_req* req, int status) {
|
|
|
|
oio_buf buf;
|
|
|
|
|
|
|
|
ASSERT(status == 0);
|
|
|
|
ASSERT(nested == 0 && "connect_cb must be called from a fresh stack");
|
|
|
|
|
|
|
|
nested++;
|
|
|
|
|
|
|
|
buf.base = (char*) &MESSAGE;
|
|
|
|
buf.len = sizeof MESSAGE;
|
|
|
|
|
|
|
|
oio_req_init(&write_req, req->handle, write_cb);
|
|
|
|
|
|
|
|
if (oio_write(&write_req, &buf, 1)) {
|
|
|
|
FATAL("oio_write failed");
|
|
|
|
}
|
|
|
|
|
|
|
|
nested--;
|
|
|
|
|
|
|
|
connect_cb_called++;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-05-03 02:35:11 +02:00
|
|
|
static oio_buf alloc_cb(oio_handle* handle, size_t size) {
|
2011-05-04 17:10:33 +02:00
|
|
|
oio_buf buf;
|
|
|
|
buf.len = size;
|
|
|
|
buf.base = (char*) malloc(size);
|
|
|
|
ASSERT(buf.base);
|
2011-05-03 02:35:11 +02:00
|
|
|
return buf;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-05-04 17:10:33 +02:00
|
|
|
TEST_IMPL(callback_stack) {
|
|
|
|
struct sockaddr_in addr = oio_ip4_addr("127.0.0.1", TEST_PORT);
|
2011-04-05 04:24:03 +02:00
|
|
|
|
2011-05-03 02:35:11 +02:00
|
|
|
oio_init(alloc_cb);
|
2011-04-07 04:51:59 +02:00
|
|
|
|
2011-05-04 17:10:33 +02:00
|
|
|
if (oio_tcp_init(&client, &close_cb, NULL)) {
|
2011-04-18 13:01:50 -07:00
|
|
|
FATAL("oio_tcp_init failed");
|
2011-04-15 10:27:09 -07:00
|
|
|
}
|
2011-04-05 04:24:03 +02:00
|
|
|
|
|
|
|
nested++;
|
2011-05-04 17:10:33 +02:00
|
|
|
oio_req_init(&connect_req, &client, connect_cb);
|
|
|
|
if (oio_connect(&connect_req, (struct sockaddr*) &addr)) {
|
|
|
|
FATAL("oio_connect failed");
|
2011-04-15 10:27:09 -07:00
|
|
|
}
|
2011-04-05 04:24:03 +02:00
|
|
|
nested--;
|
|
|
|
|
2011-04-07 11:02:54 +02:00
|
|
|
oio_run();
|
2011-04-05 04:24:03 +02:00
|
|
|
|
2011-04-18 00:29:13 -07:00
|
|
|
ASSERT(nested == 0);
|
2011-05-04 17:10:33 +02:00
|
|
|
ASSERT(connect_cb_called == 1 && "connect_cb must be called exactly once");
|
|
|
|
ASSERT(write_cb_called == 1 && "write_cb must be called exactly once");
|
|
|
|
ASSERT(timeout_cb_called == 1 && "timeout_cb must be called exactly once");
|
|
|
|
ASSERT(bytes_received == sizeof MESSAGE);
|
|
|
|
ASSERT(shutdown_cb_called == 1 && "shutdown_cb must be called exactly once");
|
|
|
|
ASSERT(close_cb_called == 1 && "close_cb must be called exactly once");
|
2011-04-05 04:24:03 +02:00
|
|
|
|
|
|
|
return 0;
|
2011-04-15 10:27:09 -07:00
|
|
|
}
|