| 
		
			| сетевые странности [message #31517] | Wed, 02 July 2008 20:47  |  
			| 
				
				
					|  umask Messages: 23
 Registered: December 2007
 | Junior Member |  |  |  
	| доброе время суток! 
 Имеем.
 1. ОС Centos 5 (up2date) на хост ноде:
 cat /etc/redhat-release
 CentOS release 5.2 (Final)
 
 2. uname -a (на хост ноде)
 Linux host01.test.ru 2.6.18-53.1.19.el5.028stab053.14 #1 SMP Thu May 8 20:43:27 MSD 2008 i686 i686 i386 GNU/Linux
 
 3. VPS 1100
 
 ONBOOT="yes"
 
 # UBC parameters (in form of barrier:limit)
 KMEMSIZE="2147483647:2147483647"
 LOCKEDPAGES="2147483647:2147483647"
 PRIVVMPAGES="2147483647:2147483647"
 SHMPAGES="2147483647:2147483647"
 NUMPROC="2147483647:2147483647"
 PHYSPAGES="2147483647:2147483647"
 VMGUARPAGES="2147483647:2147483647"
 OOMGUARPAGES="2147483647:2147483647"
 NUMTCPSOCK="2147483647:2147483647"
 NUMFLOCK="2147483647:2147483647"
 NUMPTY="2147483647:2147483647"
 NUMSIGINFO="2147483647:2147483647"
 TCPSNDBUF="2147483647:2147483647"
 TCPRCVBUF="2147483647:2147483647"
 OTHERSOCKBUF="2147483647:2147483647"
 DGRAMRCVBUF="2147483647:2147483647"
 NUMOTHERSOCK="2147483647:2147483647"
 DCACHESIZE="2147483647:2147483647"
 NUMFILE="2147483647:2147483647"
 AVNUMPROC="180:180"
 NUMIPTENT="2147483647:2147483647"
 
 # Disk quota parameters (in form of softlimit:hardlimit)
 DISKSPACE="100000000:120000000"
 DISKINODES="10000000:15000000"
 QUOTATIME="0"
 
 # CPU fair sheduler parameter
 CPUUNITS="500000"
 
 
 IP_ADDRESS="192.168.1.10"
 HOSTNAME="a.host01.test.ru"
 VE_ROOT="/vz/root/$VEID"
 VE_PRIVATE="/vz/private/$VEID"
 OSTEMPLATE="centos-5-i386-minimal"
 ORIGIN_SAMPLE="vps.basic"
 NAMESERVER="192.168.1.1"
 SEARCHDOMAIN="testru"
 CPULIMIT="1000"
 
 т.е. все лимиты откручены.
 
 4. хост нода либо p4/1gb ram, либо dual xeon/4gb ram - результат один.
 
 5. на хост ноде cat /etc/sysctl.conf
 # Kernel sysctl configuration file for Red Hat Linux
 #
 # For binary values, 0 is disabled, 1 is enabled.  See sysctl(
  and # sysctl.conf(5) for more details.
 
 # Controls IP packet forwarding
 net.ipv4.ip_forward = 0
 
 # Controls source route verification
 net.ipv4.conf.default.rp_filter = 1
 
 # Do not accept source routing
 net.ipv4.conf.default.accept_source_route = 0
 
 # Controls the System Request debugging functionality of the kernel
 kernel.sysrq = 0
 
 # Controls whether core dumps will append the PID to the core filename
 # Useful for debugging multi-threaded applications
 kernel.core_uses_pid = 1
 
 # Controls the use of TCP syncookies
 net.ipv4.tcp_syncookies = 0
 
 # Controls the maximum size of a message, in bytes
 kernel.msgmnb = 65536
 
 # Controls the default maxmimum size of a mesage queue
 kernel.msgmax = 65536
 
 # Controls the maximum shared segment size, in bytes
 kernel.shmmax = 4294967295
 
 # Controls the maximum number of shared memory segments, in pages
 kernel.shmall = 268435456
 
 # On Hardware Node we generally need
 # packet forwarding enabled and proxy arp disabled
 net.ipv4.ip_forward = 1
 net.ipv4.conf.default.proxy_arp = 0
 # Enables source route verification
 net.ipv4.conf.all.rp_filter = 1
 # Enables the magic-sysrq key
 kernel.sysrq = 1
 # TCP Explict Congestion Notification
 #net.ipv4.tcp_ecn = 0
 # we do not want all our interfaces to send redirects
 net.ipv4.conf.default.send_redirects = 1
 net.ipv4.conf.all.send_redirects = 0
 
 # Local port range
 net.ipv4.ip_local_port_range = 8192 65535
 
 # Netfilter connection tracking table size
 net.ipv4.ip_conntrack_max = 258068
 
 # For servers that receive many connections at the same time,
 # the TIME-WAIT sockets for new connections can be reused.
 # This is useful in Web servers etc. See also net.ipv4.tcp_tw_recycle.
 net.ipv4.tcp_tw_reuse = 1
 
 # Enable fast recycling of TIME-WAIT sockets status
 net.ipv4.tcp_tw_recycle = 1
 
 # Tune VM subsystem to use swap only as last resort
 vm.swappiness = 1
 
 # Limit of socket listen() backlog, known in userspace as SOMAXCONN.
 # Defaults to 128.  See also tcp_max_syn_backlog for additional tuning
 # for TCP sockets.
 net.core.somaxconn = 2048
 
 # The maximum number of queued connection requests which have still not
 # received an acknowledgement from the  connecting  client.  If this
 # number is exceeded, the kernel will begin dropping requests.
 # The default value of 256 is increased to 1024 when the memory present
 # in the system is adequate or greater (>= 128Mb), and reduced to 128
 # for  those  systems  with very low memory (<= 32Mb).  It is recommended
 # that if this needs to be increased above 1024, TCP_SYNQ_HSIZE in
 # include/net/tcp.h be modified to keep
 # TCP_SYNQ_HSIZE*16<=tcp_max_syn_backlog,  and  the kernel be recompiled.
 net.ipv4.tcp_max_syn_backlog = 1024
 
 # Maximum number of packets in the global input queue.
 # for 1 GBit links recommended value near 3000
 net.core.netdev_max_backlog = 2500
 
 # prevent time wait bucket table overflow
 net.ipv4.tcp_max_tw_buckets_ub = 129034
 net.ipv4.tcp_max_tw_kmem_fraction = 384
 
 # This sets the max OS receive buffer size for all types of connections.
 net.core.rmem_max = 16777216
 
 # This sets the max OS send buffer size for all types of connections.
 net.core.wmem_max = 16777216
 
 # This sets the default OS receive buffer size for all types of connections.
 net.core.rmem_default = 65535
 
 # This sets the default OS send buffer size for all types of connections.
 net.core.wmem_default = 65535
 
 # TCP Autotuning setting. "The tcp_mem variable defines how the TCP stack
 # should behave when it comes to memory usage. ... The first value specified
 # in the tcp_mem variable tells the kernel the low threshold. Below this
 # point, the TCP stack do not bother at all about putting any pressure on the
 # memory usage by different TCP sockets. ... The second value tells the
 # kernel at which point to start pressuring memory usage down. ... The final
 # value tells the kernel how many memory pages it may use maximally.
 # If this value is reached, TCP streams and packets start getting dropped
 # until we reach a lower memory usage again. This value includes all
 # TCP sockets currently in use."
 net.ipv4.tcp_mem = 16777216 16777216 16777216
 
 
 # TCP Autotuning setting. "The first value tells the kernel the minimum
 # receive buffer for each TCP connection, and this buffer is always allocated
 # to a TCP socket, even under high pressure on the system. ... The second
 # value specified tells the kernel the default receive buffer allocated for
 # each TCP socket. This value overrides the /proc/sys/net/core/rmem_default
 # value used by other protocols. ... The third and last value specified in
 # this variable specifies the maximum receive buffer that can be allocated
 # for a TCP socket."
 net.ipv4.tcp_rmem = 4096 131072 16777216
 
 # TCP Autotuning setting. "This variable takes 3 different values which holds
 # information on how much TCP sendbuffer memory space each TCP socket has to
 # use. Every TCP socket has this much buffer space to use before the buffer
 # is filled up. Each of the three values are used under different conditions.
 # ... The first value in this variable tells the minimum TCP send buffer
 # space available for a single TCP socket. ... The second value in the variable
 # tells us the default buffer space allowed for a single TCP socket to use.
 # ... The third value tells the kernel the maximum TCP send buffer space."
 net.ipv4.tcp_wmem = 4096 131072 16777216
 
 # This will enusre that immediatly subsequent connections use these values.
 net.ipv4.route.flush=1
 
 # RFC 2018 TCP Selective Acknowledgements
 net.ipv4.tcp_sack = 0
 
 # RFC 1323 TCP timestamps
 net.ipv4.tcp_timestamps = 0
 
 net.ipv4.tcp_sack = 1
 net.ipv4.tcp_fack = 1
 
 
 # Enable TCP behaviour conformant with RFC 1337.  When disabled,
 # if a RST is received in TIME_WAIT state,  we close the socket
 # immediately without waiting for the end of the TIME_WAIT period.
 net.ipv4.tcp_rfc1337 = 1
 
 6. iptables и на хост ноде, и на VPS отключен (chkconfig iptables off).
 
 7. на VPS стоит nginx 0.6.31.
 cat /etc/nginx/nginx.conf
 
 
user  nginx nginx;
worker_processes  4;
worker_rlimit_nofile    16384;
error_log  /var/log/nginx/error.log debug;
events {
	worker_connections  16384;
	use epoll;
}
http {
	include       /etc/nginx/mime.types;
	default_type  text/plain;
	log_format  main  '$remote_addr - $remote_user [$time_local] "$request" '
			  '"$status" $body_bytes_sent "$http_referer" '
			  '"$http_user_agent" "$http_x_forwarded_for" '
			  '$request_time "$upstream_addr" [$upstream_response_time]';
	log_format compat '$remote_addr - $remote_user [$time_local] "$request" '
			  '"$status" $body_bytes_sent "$http_referer" '
			  '"$http_user_agent" "$http_x_forwarded_for"';
	sendfile       on;
	tcp_nopush     on;
	tcp_nodelay    on;
	client_header_timeout  60;
	client_body_timeout    60;
	send_timeout           30;
	keepalive_timeout  0;
	reset_timedout_connection  on;
        server {
                listen 80 default backlog=16384 rcvbuf=4096 sndbuf=4096 deferred;
                server_name .test.ru;
                error_log  /var/log/nginx/test.ru_error.log debug;
                access_log /var/log/nginx/test.ru_access_main.log main;
    		
	        location /nginx_status {
        	        stub_status on;
	                access_log   off;
	        }
                fastcgi_intercept_errors on;
                proxy_intercept_errors on;
                error_page   500 502 503 504  /50x_empty.html;
                error_page   400 401 402 403 404 405  /50x_empty.html;
                location = /50x_empty.html {
                        root   /home/nginx/htdocs;
                }
                location ~ \.(wml|php)$ {
                        proxy_read_timeout    3;
                        proxy_connect_timeout 3;
                        proxy_pass   http://127.0.0.1:8080;
                        proxy_set_header   X-Real-IP        $remote_addr;
                        proxy_set_header   X-Forwarded-For  $proxy_add_x_forwarded_for;
                        proxy_set_header   Host             $host;
                }
        }
}
 вместо nginx использовались так же tomcat и apache httpd-2.2.8/2.2.9.
 
 8. sysctl на VPS по умолчанию.
 
 9. вот такой скрипт на php (так же проверялось на java) - dummy.php
 <?
 $max = 0;
 for($i=0;$i<1000;$i++){
 $t = microtime(true);
 file_get_contents("http://192.168.1.10/nginx_status");
 $t = microtime(true)-$t;
 if ($t>$max) $max = $t;
 }
 echo $max;
 ?>
 
 10. Отдельное замечения - DNS нигде не используется (проверялось tcpdump многократно).
 
 11. dummy.php запускаем так:
 while : ; do php dummy.php ; done | grep -e "[1-9]\.[0-9]"
 чтобы видеть когда были ответы длинее секунды.
 
 п.11 выдаёт следующую печальную картину:
 3.0010089874268
 3.0013828277588
 3.001168012619
 3.0015661716461
 3.0009059906006
 3.0006580352783
 3.0018539428711
 3.0014488697052
 3.0009009838104
 3.0018038749695
 
 
 Теперь берём и запускаем nginx/tomcat/httpd на хост ноде и  !ВНИМАНИЕ! - проблема не наблюдается!
 
 На всякий случай:
 
 
cat /proc/user_beancounters 
Version: 2.5
       uid  resource           held    maxheld    barrier      limit    failcnt
     1100:  kmemsize        4334631    9382841 2147483647 2147483647          0
            lockedpages           0          0 2147483647 2147483647          0
            privvmpages      245588     246632 2147483647 2147483647          0
            shmpages              1          1 2147483647 2147483647          0
            dummy                 0          0          0          0          0
            numproc             118        122 2147483647 2147483647          0
            physpages         34130      34505 2147483647 2147483647          0
            vmguarpages           0          0 2147483647 2147483647          0
            oomguarpages      34130      34505 2147483647 2147483647          0
            numtcpsock           14         16 2147483647 2147483647          0
            numflock              1          2 2147483647 2147483647          0
            numpty                0          1 2147483647 2147483647          0
            numsiginfo            0          2 2147483647 2147483647          0
            tcpsndbuf        125216     125216 2147483647 2147483647          0
            tcprcvbuf        229376     229376 2147483647 2147483647          0
            othersockbuf      11180      13416 2147483647 2147483647          0
            dgramrcvbuf           0          0 2147483647 2147483647          0
            numothersock         11         13 2147483647 2147483647          0
            dcachesize            0          0 2147483647 2147483647          0
            numfile            2082       2158 2147483647 2147483647          0
            dummy                 0          0          0          0          0
            dummy                 0          0          0          0          0
            dummy                 0          0          0          0          0
            numiptent            10         10 2147483647 2147483647          0
        0:  kmemsize        3298695   16916897 2147483647 2147483647          0
            lockedpages        1083       1083 2147483647 2147483647          0
            privvmpages       12459      14060 2147483647 2147483647          0
            shmpages            656        672 2147483647 2147483647          0
            dummy                 0          0 2147483647 2147483647          0
            numproc              73         85 2147483647 2147483647          0
            physpages          4553       4994 2147483647 2147483647          0
            vmguarpages           0          0 2147483647 2147483647          0
            oomguarpages       4553       4994 2147483647 2147483647          0
            numtcpsock            3          3 2147483647 2147483647          0
            numflock              4          5 2147483647 2147483647          0
            numpty                1          1 2147483647 2147483647          0
            numsiginfo            0          2 2147483647 2147483647          0
            tcpsndbuf         35724      35724 2147483647 2147483647          0
            tcprcvbuf         49152      32768 2147483647 2147483647          0
            othersockbuf     154284     161420 2147483647 2147483647          0
            dgramrcvbuf           0       8380 2147483647 2147483647          0
            numothersock        122        126 2147483647 2147483647          0
            dcachesize            0          0 2147483647 2147483647          0
            numfile            1531       1771 2147483647 2147483647          0
            dummy                 0          0 2147483647 2147483647          0
            dummy                 0          0 2147483647 2147483647          0
            dummy                 0          0 2147483647 2147483647          0
            numiptent            10         10 2147483647 2147483647          0
 
 параметры sysctl я как только не пробовал крутить... не однократно устраивал двум испытуемым серверам перезагрузки, /var/log/messages и dmesg пусты - ничего, чтобы говорило о проблеме.
 
 Угробил почти сутки на поиск причины.
 
 Подскажите, куда копать...
 |  
	|  |  | 
	|  | 
	| 
		
			| Re: сетевые странности [message #31549 is a reply to message #31528] | Thu, 03 July 2008 13:37   |  
			| 
				
				
					|  maratrus Messages: 1495
 Registered: August 2007
 Location: Moscow
 | Senior Member |  |  |  
	| Здравствуйте, 
 пока что дельный совет дать не могу, но хотелось бы высказать несколько замечаний:
 
 1. Правильно ли я понимаю, что с HN время отклика никогда не превышает секунду (можно ли приветси вывод теста как с VE, так и с HN?).
 2. Хотелось бы выделить ту подсистему, в которой подозревается проблема (может тормозить сеть, а может процесс nginx внутри VE).
 Поэтому хотелось бы иметь какие-нибудь два простенькие тестики на сеть и CPU.
 3. В дополнение ко второму пункту
 насколько я понял
 CPULIMIT="1000" - хорошо, это абсолютный параметр
 CPUUNITS="500000" - этот менее наглядный, так как он относительноый, поэтому покажите пожалуйста вывод vzcpucheck с HN, чтобы узнать Power of the HN, и если потребуется, можно поиграться с этим параметром.
 И еще, много ли VE "ранятся" на HN?
 Спаcибо.
 |  
	|  |  | 
	| 
		
			| Re: сетевые странности [message #31550 is a reply to message #31549] | Thu, 03 July 2008 13:59   |  
			| 
				
				
					|  umask Messages: 23
 Registered: December 2007
 | Junior Member |  |  |  
	| 1. с HN всегда время ответа нормальное, меньше 0.1s. 
 2. Думаю, что виновата та часть сетевой подсистемы, которую правит openvz patch. Сеть не тормозит. Nginx не тормозит, cpu usage вообще не превышает 10%. Пробовали помимо nginx также tomcat и httpd на отдачу статики.
 
 > Поэтому хотелось бы иметь какие-нибудь два простенькие тестики на сеть и CPU.
 
 запускаем httperf на nginx, утсновленный на HW:
 
 
./httperf --server 192.168.1.1 --port 80 --uri=/index.html --num-conns 1000000 --rate 15000 --timeout 0.1
httperf --timeout=0.1 --client=0/1 --server=192.168.1.1 --port=80 --uri=/index.html --rate=15000 --send-buffer=4096 --recv-buffer=16384 --num-conns=1000000 --num-calls=1
Maximum connect burst length: 30
Total: connections 1000000 requests 1000000 replies 1000000 test-duration 66.760 s
Connection rate: 14979.0 conn/s (0.1 ms/conn, <=269 concurrent connections)
Connection time [ms]: min 0.5 avg 3.1 max 17.8 median 2.5 stddev 1.9
Connection time [ms]: connect 0.4
Connection length [replies/conn]: 1.000
Request rate: 14979.0 req/s (0.1 ms/req)
Request size [B]: 75.0
Reply rate [replies/s]: min 14972.3 avg 14979.1 max 14987.6 stddev 4.9 (13 samples)
Reply time [ms]: response 2.7 transfer 0.0
Reply size [B]: header 209.0 content 4.0 footer 0.0 (total 213.0)
Reply status: 1xx=0 2xx=1000000 3xx=0 4xx=0 5xx=0
CPU time [s]: user 14.09 system 51.83 (user 21.1% system 77.6% total 98.7%)
Net I/O: 4212.8 KB/s (34.5*10^6 bps)
Errors: total 0 client-timo 0 socket-timo 0 connrefused 0 connreset 0
Errors: fd-unavail 0 addrunavail 0 ftab-full 0 other 0
 3.  про CPUUNITS="500000" сказано в man vzctl
 
 
       --cpuunits num
           CPU weight for a VE. Argument is positive non-zero number, which passed to and used in kernel fair scheduler.
           The  larger  the  number  is, the more CPU time this VE get. Maximum value is 500000, minimal is 8. Number is
           relative to weights of all the other running VEs. If cpuunits not specified default value 1000 ia used.
           You can set CPU weight for VE0 (hardware node itself) as well (use vzctl  set  0  --cpuunits  num).  Usually,
           OpenVZ initscript (/etc/init.d/vz) takes care of setting this.
 поэтому и пол миллиона. Больше просто нельзя поставить даже.
 Изначально проблема была даже тогда, когда значение CPULIMIT и CPUUNITS был дефолтными.
 
 На HN запущена ровно одна VPS.
 Нагрузки ни на HN, ни на VPS нет никакой, кроме указаной.
 |  
	|  |  | 
	|  | 
	|  | 
	| 
		
			| Re: сетевые странности [message #31564 is a reply to message #31561] | Thu, 03 July 2008 15:03   |  
			| 
				
				
					|  umask Messages: 23
 Registered: December 2007
 | Junior Member |  |  |  
	| > И, совсем не обязательно, но не хотите ли попробовать то же самое проделать с veth? 
 а это может что-то значительно поменять?
 Я ни разу с veth не работал, только с venet (которые по умолчанию).
 
 Там сильно разные принципы?
 ping (RTT) будет значительно меньше, чем с venet?
 
 > Если запускать ваш php скрипт с HN результат тот же?
 
 сейчас тестирую. Оставлю нагрузку на пару часиков, чтоб наверняка. Но по первым 10 минутам видно, что на запросах извне проблема есть, а при запросах с HN всё ок.
 
 Кстати, про httperf. Если нагрузка идёт на хост-ноду, то всё ок. Если подобную (как выше) нагрузку подать на VPS, то сразу появляется куча таймаутов.
 dmesg и /var/log/messages всюду чисты.
 |  
	|  |  | 
	| 
		
			| Re: сетевые странности [message #31566 is a reply to message #31564] | Thu, 03 July 2008 15:26   |  
			| 
				
				
					|  maratrus Messages: 1495
 Registered: August 2007
 Location: Moscow
 | Senior Member |  |  |  
	| | Quote: |  | 
 а это может что-то значительно поменять?
 
 
 | 
 
 я больше из интереса спросил, хотелось бы, конечно, чтобы все поменялось в лучшую сторону
  . 
 
 
 | Quote: |  | 
 Там сильно разные принципы?
 
 
 | 
 
 Если Вы имеете в виду настройку, то не ососбо.
 На этой страничке, вроде, все хорошо описано http://wiki.openvz.org/Veth
 Короче говоря, вам нужно создать девайс, добавить на него IP внутри VE, проставить некоторые sysctl параметры, и прописать роуты до VE (с venet'ом vzctl делает это автоматически).
 
 
 | Quote: |  | 
 Но по первым 10 минутам видно, что на запросах извне проблема есть, а при запросах с HN всё ок.
 
 
 | 
 А с HN нет каких либо лимитов на traffic bandwidth?
 |  
	|  |  | 
	|  | 
	|  | 
	| 
		
			| Re: сетевые странности [message #31660 is a reply to message #31620] | Fri, 04 July 2008 20:34   |  
			| 
				
				
					|  umask Messages: 23
 Registered: December 2007
 | Junior Member |  |  |  
	| Все адреса были вымышлены. Сейчас раскрою. 
 (1) 217.23.140.131 - IP HN, с которой шёл тест.
 (2) 217.23.140.138 - IP HW, на которой запущена VPS с nginx
 (3) 212.158.166.207 - IP VPS, где работает nginx.
 
 *1*
 netstat -s c (1)
 
 
Ip:
    3613925 total packets received
    4 with invalid headers
    134 forwarded
    0 incoming packets discarded
    3613787 incoming packets delivered
    3613790 requests sent out
Icmp:
    2 ICMP messages received
    0 input ICMP message failed.
    ICMP input histogram:
        echo requests: 2
    9 ICMP messages sent
    0 ICMP messages failed
    ICMP output histogram:
        destination unreachable: 3
        time exceeded: 4
        echo replies: 2
Tcp:
    602193 active connections openings
    6 passive connection openings
    1 failed connection attempts
    1 connection resets received
    2 connections established
    3613655 segments received
    3613536 segments send out
    28 segments retransmited
    0 bad segments received.
    10 resets sent
Udp:
    82 packets received
    3 packets to unknown port received.
    0 packet receive errors
    83 packets sent
TcpExt:
    1 resets received for embryonic SYN_RECV sockets
    3 TCP sockets finished time wait in fast timer
    7 delayed acks sent
    81 packets directly queued to recvmsg prequeue.
    24 packets directly received from prequeue
    602278 packets header predicted
    1204446 acknowledgments not containing data received
    602423 predicted acknowledgments
    1 times recovered from packet loss due to SACK data
    10 congestion windows recovered after partial ack
    1 TCP data loss events
    1 fast retransmits
    27 other TCP timeouts
    1 DSACKs sent for old packets
    2 DSACKs received
    1 connections reset due to unexpected data
    1 connections reset due to early user close
 netstat -s c (2)
 
 
Ip:
    7227192 total packets received
    7226305 forwarded
    0 incoming packets discarded
    854 incoming packets delivered
    7226920 requests sent out
Icmp:
    9 ICMP messages received
    0 input ICMP message failed.
    ICMP input histogram:
        echo requests: 9
    9 ICMP messages sent
    0 ICMP messages failed
    ICMP output histogram:
        echo replies: 9
Tcp:
    9 active connections openings
    9 passive connection openings
    0 failed connection attempts
    0 connection resets received
    3 connections established
    798 segments received
    548 segments send out
    9 segments retransmited
    0 bad segments received.
    0 resets sent
Udp:
    77 packets received
    0 packets to unknown port received.
    0 packet receive errors
    79 packets sent
TcpExt:
    10 TCP sockets finished time wait in fast timer
    601948 TCP sockets finished time wait in slow timer
    12 delayed acks sent
    65 packets directly queued to recvmsg prequeue.
    17 packets directly received from prequeue
    179 packets header predicted
    59 acknowledgments not containing data received
    347 predicted acknowledgments
    4 congestion windows recovered after partial ack
    0 TCP data loss events
    8 other TCP timeouts
    2 DSACKs sent for old packets
    5 DSACKs received
 netstat -s c (3)
 
 
Ip:
    3613147 total packets received
    0 forwarded
    0 incoming packets discarded
    3613147 incoming packets delivered
    3613158 requests sent out
Icmp:
    4 ICMP messages received
    0 input ICMP message failed.
    ICMP input histogram:
        timeout in transit: 4
    4 ICMP messages sent
    0 ICMP messages failed
    ICMP output histogram:
        destination unreachable: 4
Tcp:
    9 active connections openings
    602198 passive connection openings
    4 failed connection attempts
    0 connection resets received
    0 connections established
    3613169 segments received
    3613184 segments send out
    0 segments retransmited
    0 bad segments received.
    0 resets sent
Udp:
    0 packets received
    4 packets to unknown port received.
    0 packet receive errors
    0 packets sent
TcpExt:
    1 delayed acks sent
    602191 packets header predicted
    602191 acknowledgments not containing data received
    0 TCP data loss events
 *2*
 ethtool -S eth0 с (1)
 
 
NIC statistics:
     rx_packets: 3615960
     tx_packets: 3613901
     rx_bytes: 411792822
     tx_bytes: 289840057
     rx_errors: 0
     tx_errors: 0
     tx_dropped: 0
     multicast: 2
     collisions: 0
     rx_length_errors: 0
     rx_over_errors: 0
     rx_crc_errors: 0
     rx_frame_errors: 0
     rx_no_buffer_count: 0
     rx_missed_errors: 0
     tx_aborted_errors: 0
     tx_carrier_errors: 0
     tx_fifo_errors: 0
     tx_heartbeat_errors: 0
     tx_window_errors: 0
     tx_abort_late_coll: 0
     tx_deferred_ok: 0
     tx_single_coll_ok: 0
     tx_multi_coll_ok: 0
     tx_timeout_count: 0
     rx_long_length_errors: 0
     rx_short_length_errors: 0
     rx_align_errors: 0
     tx_tcp_seg_good: 20
     tx_tcp_seg_failed: 0
     rx_flow_control_xon: 0
     rx_flow_control_xoff: 0
     tx_flow_control_xon: 0
     tx_flow_control_xoff: 0
     rx_long_byte_count: 411792822
     rx_csum_offload_good: 3613986
     rx_csum_offload_errors: 0
     rx_header_split: 0
     alloc_rx_buff_failed: 0
 ethtool -S eth0 с (2)
 
 
NIC statistics:
     rx_packets: 3616218
     tx_packets: 3613918
     rx_bytes: 290005297
     tx_bytes: 411677781
     rx_broadcast: 2006
     tx_broadcast: 7
     rx_multicast: 2
     tx_multicast: 6
     rx_errors: 0
     tx_errors: 0
     tx_dropped: 0
     multicast: 2
     collisions: 0
     rx_length_errors: 0
     rx_over_errors: 0
     rx_crc_errors: 0
     rx_frame_errors: 0
     rx_no_buffer_count: 0
     rx_missed_errors: 0
     tx_aborted_errors: 0
     tx_carrier_errors: 0
     tx_fifo_errors: 0
     tx_heartbeat_errors: 0
     tx_window_errors: 0
     tx_abort_late_coll: 0
     tx_deferred_ok: 0
     tx_single_coll_ok: 0
     tx_multi_coll_ok: 0
     tx_timeout_count: 0
     tx_restart_queue: 0
     rx_long_length_errors: 0
     rx_short_length_errors: 0
     rx_align_errors: 0
     tx_tcp_seg_good: 1
     tx_tcp_seg_failed: 0
     rx_flow_control_xon: 0
     rx_flow_control_xoff: 0
     tx_flow_control_xon: 0
     tx_flow_control_xoff: 0
     rx_long_byte_count: 290005297
     rx_csum_offload_good: 3614184
     rx_csum_offload_errors: 0
     rx_header_split: 0
     alloc_rx_buff_failed: 0
     tx_smbus: 0
     rx_smbus: 0
     dropped_smbus: 0
 *3*
 while : ; do php dummy.php ; echo ; done | grep -e "[1-9]\.[0-9]"
 3.0010950565338
 3.0016250610352
 3.0018901824951
 3.00084400177
 3.0018458366394
 3.0011219978333
 3.0014960765839
 3.001119852066
 
 
 т.е. всего 8 раз наблюдались задержки. Это примерно 10-15 минут теста.
 
 http://people.mobiledirect.ru/people/umask/public/ovz_proble ms/
 
 HN_where_test_running.out[.gz] - (1)
 problem_HN_eth0.out[.gz] - (2) на eth0
 problem_HN_venet0.out[.gz] - (2) на venet0
 problem_VPS_venet0.out[.gz] - (3) на venet0
 
 tcpdump запускался так: tcpdump -n -i <interface> > /tmp/file.out 2>&1.
 
 показания netstat и ethtool снимались по окончанию теста.
 [Updated on: Fri, 04 July 2008 20:36] Report message to a moderator |  
	|  |  | 
	|  | 
	|  | 
	|  | 
	|  | 
	| 
		
			| Re: сетевые странности [message #47193 is a reply to message #31517] | Fri, 20 July 2012 08:03  |  
			| 
				
				
					|  hiddenman Messages: 1
 Registered: July 2012
 | Junior Member |  |  |  
	| Коллеги, приветствую. 
 Спасибо за тестирование. Похоже, у нас точно такая же проблема, по крайней мере симптомы похожи.
 
 Сейчас запускаем тестирование сравнения времени ответа в VHN и CT, проверим.
 
 
 |  
	|  |  |