#include <unistd.h>
#include <sys/types.h>
#include <fcntl.h>
#include <sys/socket.h>
#include <netinet/in.h>
#include <arpa/inet.h>
#include <signal.h>
#include <fcntl.h>
#include <sys/wait.h>
#include <sys/epoll.h> #include <stdlib.h>
#include <stdio.h>
#include <errno.h>
#include <string.h> #include <vector>
#include <algorithm>
#include <iostream> typedef std::vector<struct epoll_event> EventList; #define ERR_EXIT(m) \
do \
{ \
perror(m); \
exit(EXIT_FAILURE); \
} while() int main(void)
{
// TCP是全双工的信道, 可以看作两条单工信道, TCP连接两端的两个端点各负责一条. 当对端调用close时, 虽然本意是关闭整个两条信道,
// 但本端只是收到FIN包. 按照TCP协议的语义, 表示对端只是关闭了其所负责的那一条单工信道, 仍然可以继续接收数据. 也就是说, 因为TCP协议的限制,
// 一个端点无法获知对端的socket是调用了close还是shutdown.
// 对一个已经收到FIN包的socket调用read方法,
// 如果接收缓冲已空, 则返回0, 这就是常说的表示连接关闭. 但第一次对其调用write方法时, 如果发送缓冲没问题, 会返回正确写入(发送).
// 但发送的报文会导致对端发送RST报文, 因为对端的socket已经调用了close, 完全关闭, 既不发送, 也不接收数据. 所以,
// 第二次调用write方法(假设在收到RST之后), 会生成SIGPIPE信号, 导致进程退出.
// 为了避免进程退出, 可以捕获SIGPIPE信号, 或者忽略它, 给它设置SIG_IGN信号处理函数:
// 这样, 第二次调用write方法时, 会返回-1, 同时errno置为SIGPIPE. 程序便能知道对端已经关闭.
signal(SIGPIPE, SIG_IGN);//防止进程退出
//忽略SIGCHLD信号,这常用于并发服务器的性能的一个技巧
//因为并发服务器常常fork很多子进程,子进程终结之后需要
//服务器进程去wait清理资源。如果将此信号的处理方式设为
//忽略,可让内核把僵尸子进程转交给init进程去处理,省去了
//大量僵尸进程占用系统资源。(Linux Only)
signal(SIGCHLD, SIG_IGN); int idlefd = open("/dev/null", O_RDONLY | O_CLOEXEC);
int listenfd;
//if ((listenfd = socket(PF_INET, SOCK_STREAM, IPPROTO_TCP)) < 0)
if ((listenfd = socket(PF_INET, SOCK_STREAM | SOCK_NONBLOCK | SOCK_CLOEXEC, IPPROTO_TCP)) < )
ERR_EXIT("socket"); struct sockaddr_in servaddr;
memset(&servaddr, , sizeof(servaddr));
servaddr.sin_family = AF_INET;
servaddr.sin_port = htons();
servaddr.sin_addr.s_addr = htonl(INADDR_ANY); int on = ;
if (setsockopt(listenfd, SOL_SOCKET, SO_REUSEADDR, &on, sizeof(on)) < )
ERR_EXIT("setsockopt"); if (bind(listenfd, (struct sockaddr*)&servaddr, sizeof(servaddr)) < )
ERR_EXIT("bind");
if (listen(listenfd, SOMAXCONN) < )
ERR_EXIT("listen"); //create epoll object
int epollfd;
epollfd = epoll_create1(EPOLL_CLOEXEC); //add EPOLLIN event to epoll
struct epoll_event event;
event.data.fd = listenfd;
event.events = EPOLLIN/* | EPOLLET*/;
epoll_ctl(epollfd, EPOLL_CTL_ADD, listenfd, &event); EventList events();
struct sockaddr_in peeraddr;
socklen_t peerlen;
int connfd; std::vector<int> clients; int nready;
while ()
{
//return active event
nready = epoll_wait(epollfd, &*events.begin(), static_cast<int>(events.size()), -);
if (nready == -)
{
if (errno == EINTR)
continue; ERR_EXIT("epoll_wait");
}
if (nready == ) // nothing happended
continue; //double capacity
if ((size_t)nready == events.size())
events.resize(events.size()*); //treat all active event
for (int i = ; i < nready; ++i)
{
//如果是主socket的事件的话,则表示有新连接进入了,进行新连接的处理
if (events[i].data.fd == listenfd)
{
peerlen = sizeof(peeraddr);
connfd = ::accept4(listenfd, (struct sockaddr*)&peeraddr,
&peerlen, SOCK_NONBLOCK | SOCK_CLOEXEC); if (connfd == -)
{
//防止文件句柄超过最大数量
if (errno == EMFILE)
{
close(idlefd);
idlefd = accept(listenfd, NULL, NULL);
close(idlefd);
idlefd = open("/dev/null", O_RDONLY | O_CLOEXEC);
continue;
}
else
ERR_EXIT("accept4");
} std::cout<<"ip="<<inet_ntoa(peeraddr.sin_addr)<<
" port="<<ntohs(peeraddr.sin_port)<<std::endl; clients.push_back(connfd); event.data.fd = connfd;
event.events = EPOLLIN/* | EPOLLET*/;
epoll_ctl(epollfd, EPOLL_CTL_ADD, connfd, &event);
}
else if (events[i].events & EPOLLIN)
{
connfd = events[i].data.fd;
if (connfd < )
continue; char buf[] = {};
int ret = read(connfd, buf, );
if (ret == -)
ERR_EXIT("read");
if (ret == )
{
std::cout<<"client close"<<std::endl;
close(connfd);
event = events[i];
epoll_ctl(epollfd, EPOLL_CTL_DEL, connfd, &event);
clients.erase(std::remove(clients.begin(), clients.end(), connfd), clients.end());
continue;
} std::cout<<buf;
write(connfd, buf, strlen(buf));
} }
} return ;
}