实现了一个比Nginx速度更快的HTTP服务器(3)

efd = epoll_create1 ( 0 );
if ( efd == -1 )
{
...
}

event.data.fd = listen_sock;
event.events = EPOLLIN | EPOLLET;
s = epoll_ctl ( efd, EPOLL_CTL_ADD, listen_sock, &event );
if ( s == -1 )
{
...
}

/* Buffer where events are returned */
events = calloc ( MAXEVENTS, sizeof event );

这里的EPOLLIN表示监听“可读”事件。

在主循环中epoll_wait():

while ( 1 )
{
int n, i;

n = epoll_wait ( efd, events, MAXEVENTS, -1 );
if ( n == -1 )
{
perror ( "epoll_wait" );
}
for ( i = 0; i < n; i++ )
{
if ( ( events[i].events & EPOLLERR ) ||
( events[i].events & EPOLLHUP ) )
{
fprintf ( stderr, "epoll error\n" );
close ( events[i].data.fd );
continue;
}

handle_request ( events[i].data.fd );

}
}

epoll_wait()会在发生事件后停止阻塞,继续执行,并把发生了事件的event的file descriptor放入events中,返回数组大小。注意的是,这里要循环处理所有的fd。


接下来是关键部分:

void handle_request ( int sock )
{
if ( sock == listen_sock )
{
accept_sock ( sock );
}
else
{
struct process* process = find_process_by_sock ( sock );
if ( process != 0 )
{
switch ( process->status )
{
case STATUS_READ_REQUEST_HEADER:
read_request ( process );
break;
case STATUS_SEND_RESPONSE_HEADER:
send_response_header ( process );
break;
case STATUS_SEND_RESPONSE:
send_response ( process );
break;
default:
break;
}
}
}
}

根据epoll返回的fd,做不同处理:如果是监听的socket,则accept();否则,根据sock的fd查找相应的process结构体,从中取回状态信息,返回到之前的处理状态中。这样就能实现信春哥,死后原地复活的状态恢复机制了。

在accept中,将accept出来的连接也设置为非阻塞,然后在process数组中找一个还没使用的空位,初始化,然后把这个socket存到process结构体中:

struct process* accept_sock ( int listen_sock )
{
int s;
// 在ET模式下必须循环accept到返回-1为止
while ( 1 )
{
struct sockaddr in_addr;
socklen_t in_len;
int infd;
char hbuf[NI_MAXHOST], sbuf[NI_MAXSERV];
if ( current_total_processes >= MAX_PORCESS )
{
// 请求已满,accept之后直接挂断
infd = accept ( listen_sock, &in_addr, &in_len );
if ( infd == -1 )
{
if ( ( errno == EAGAIN ) ||
( errno == EWOULDBLOCK ) )
{
break;
}
else
{
perror ( "accept" );
break;
}
}
close ( infd );

return;
}

in_len = sizeof in_addr;
infd = accept ( listen_sock, &in_addr, &in_len );
if ( infd == -1 )
{
if ( ( errno == EAGAIN ) ||
( errno == EWOULDBLOCK ) )
{
break;
}
else
{
perror ( "accept" );
break;
}
}

getnameinfo ( &in_addr, in_len,
hbuf, sizeof hbuf,
sbuf, sizeof sbuf,
NI_NUMERICHOST | NI_NUMERICSERV );

//设置为非阻塞
s = setNonblocking ( infd );
if ( s == -1 )
abort ();
int on = 1;
setsockopt ( infd, SOL_TCP, TCP_CORK, &on, sizeof ( on ) );
//添加监视sock的读取状态
event.data.fd = infd;
event.events = EPOLLIN | EPOLLET;
s = epoll_ctl ( efd, EPOLL_CTL_ADD, infd, &event );
if ( s == -1 )
{
perror ( "epoll_ctl" );
abort ();
}
struct process* process = find_empty_process_for_sock ( infd );
current_total_processes++;
reset_process ( process );
process->sock = infd;
process->fd = NO_FILE;
process->status = STATUS_READ_REQUEST_HEADER;
}
}

三个不同状态对应三个不同函数进行处理,我就不全贴了,以read_request为例:

内容版权声明:除非注明,否则皆为本站原创文章。

转载注明出处:http://www.heiqu.com/wyyzwp.html