I want to compare the performance of Unix domain sockets between two processes with different IPCs.
I have a basic program that creates a pair of sockets and then calls fork. It then measures the RTT to send 8192 bytes to another process and vice versa (different for each iteration).
#include <assert.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <time.h> #include <sys/time.h> #include <sys/types.h> #include <sys/socket.h> #include <unistd.h> int main(int argc, char **argv) { int i, pid, sockpair[2]; char buf[8192]; struct timespec tp1, tp2; assert(argc == 2); // Create a socket pair using Unix domain sockets with reliable, // in-order data transmission. socketpair(AF_UNIX, SOCK_STREAM, 0, sockpair); // We then fork to create a child process and then start the benchmark. pid = fork(); if (pid == 0) { // This is the child process. for (i = 0; i < atoi(argv[1]); i++) { assert(recv(sockpair[1], buf, sizeof(buf), 0) > 0); assert(send(sockpair[1], buf, sizeof(buf), 0) > 0); } } else { // This is the parent process. for (i = 0; i < atoi(argv[1]); i++) { memset(buf, i, sizeof(buf)); buf[sizeof(buf) - 1] = '\0'; assert(clock_gettime(CLOCK_REALTIME, &tp1) == 0); assert(send(sockpair[0], buf, sizeof(buf), 0) > 0); assert(recv(sockpair[0], buf, sizeof(buf), 0) > 0); assert(clock_gettime(CLOCK_REALTIME, &tp2) == 0); printf("%lu ns\n", tp2.tv_nsec - tp1.tv_nsec); } } return 0; }
However, I noticed that for each retest, the elapsed time for the first run (i = 0) is always an outlier:
79306 ns 18649 ns 19910 ns 19601 ns ...
I wonder if the kernel should do some final configuration on the first send() call β for example, allocate 8192 bytes in the kernel to buffer data between calls to send() and recv() ?
user4099632
source share