From 17e7699007e6140c20478c095977af8cf26fa5b2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=88=98=E4=BB=A3=E6=99=93?= Date: Fri, 27 Oct 2023 16:43:16 +0800 Subject: [PATCH] Site updated: 2023-10-27 16:42:18 --- 2019/09/12/golang-lock/index.html | 475 +++ 2019/09/12/php-imply-cast/index.html | 471 +++ 2019/09/13/hello-world/index.html | 436 +++ 2019/09/14/tcpdump-resp/index.html | 456 +++ .../compile-and-debug-linux-kernel/index.html | 468 +++ 2019/09/17/go-micro-hello-world/index.html | 451 +++ .../index.html" | 457 +++ .../cors-\347\233\270\345\205\263/index.html" | 454 +++ 2019/09/21/hello-world-java/index.html | 481 +++ .../index.html" | 470 +++ .../index.html" | 457 +++ 2019/10/02/learn-es-invert-index/index.html | 625 ++++ 2019/10/08/mysql-string-max-length/index.html | 524 +++ 2019/10/13/https-tls-ssl/index.html | 467 +++ .../index.html" | 494 +++ 2019/10/19/php-try-catch/index.html | 478 +++ .../22/composer\347\232\204psr4/index.html" | 532 ++++ .../ast\346\236\204\351\200\240/index.html" | 444 +++ .../index.html" | 492 +++ 2019/11/21/composer-ext/index.html | 448 +++ .../index.html" | 453 +++ .../26/docker\344\270\216iptable/index.html" | 506 +++ .../java-\345\274\202\345\270\270/index.html" | 451 +++ 2019/11/27/php-tokenlizer/index.html | 453 +++ .../index.html | 440 +++ .../index.html" | 443 +++ .../02/ik\345\210\206\350\257\215/index.html" | 455 +++ 2019/12/04/stop/index.html | 440 +++ .../index.html" | 521 +++ .../namespace\344\270\216docker/index.html" | 445 +++ .../index.html" | 440 +++ 2019/12/11/php-cgi-windows-curl/index.html | 445 +++ 2019/12/12/java-Class-forName/index.html | 446 +++ .../index.html" | 453 +++ .../index.html" | 441 +++ .../index.html" | 447 +++ .../index.html" | 480 +++ 2019/12/23/mysql-error-sqlstate/index.html | 458 +++ .../php-\345\217\215\345\260\204/index.html" | 441 +++ .../index.html" | 454 +++ 2020/03/04/induction/index.html | 561 ++++ 2020/03/10/math/index.html | 449 +++ .../index.html" | 444 +++ 2020/03/16/ssa-optimistic/index.html | 457 +++ .../08/a-language-to-machine-code/index.html | 445 +++ 2020/04/10/golang-stack/index.html | 446 +++ 2020/04/15/coding/index.html | 448 +++ .../index.html" | 441 +++ .../index.html" | 442 +++ 2020/05/08/php-opcode-to-handler/index.html | 444 +++ 2020/05/11/raft/index.html | 455 +++ .../index.html" | 450 +++ 2020/05/27/tired/index.html | 438 +++ 2020/05/28/functor/index.html | 433 +++ 2020/06/01/functor-1/index.html | 455 +++ .../index.html" | 438 +++ .../index.html" | 440 +++ 2020/06/12/curry/index.html | 451 +++ .../index.html" | 445 +++ .../index.html" | 438 +++ 2020/06/29/basic-paxos/index.html | 459 +++ 2020/07/07/c-auto-cast/index.html | 437 +++ .../index.html" | 433 +++ .../index.html" | 433 +++ 2020/07/21/sql-join/index.html | 462 +++ .../index.html" | 443 +++ .../index.html" | 455 +++ 2020/09/18/crf/index.html | 592 ++++ 2020/09/25/invariants/index.html | 441 +++ .../index.html" | 445 +++ .../index.html" | 441 +++ 2020/10/21/parser/index.html | 440 +++ 2020/10/28/todolist/index.html | 438 +++ 2020/11/12/mvcc/index.html | 567 ++++ .../index.html" | 436 +++ 2020/11/18/btree/index.html | 456 +++ .../index.html" | 441 +++ .../07/\346\263\233\345\236\213/index.html" | 448 +++ 2020/12/17/tcp/index.html | 439 +++ 2020/12/21/pushdown/index.html | 443 +++ .../index.html" | 444 +++ 2021/01/06/pdf-format/index.html | 438 +++ 2021/01/06/three-value-prediate/index.html | 450 +++ 2021/01/13/RSA/index.html | 563 ++++ .../01/28/mysql\347\232\204select/index.html" | 451 +++ 2021/02/18/mvcc-translate/index.html | 463 +++ .../index.html" | 438 +++ 2021/03/10/docker-compose-spec/index.html | 458 +++ 2021/03/11/mysqlbinlog/index.html | 448 +++ .../index.html" | 444 +++ .../index.html" | 444 +++ .../index.html" | 442 +++ .../index.html" | 444 +++ 2021/03/27/build-grpc/index.html | 443 +++ .../index.html" | 443 +++ .../index.html" | 439 +++ 2021/03/30/ipc/index.html | 440 +++ .../31/\350\267\263\350\241\250/index.html" | 440 +++ .../tcp\345\215\217\350\256\256/index.html" | 454 +++ .../index.html" | 458 +++ .../index.html" | 454 +++ .../index.html" | 443 +++ .../mysql\344\270\273\344\273\216/index.html" | 438 +++ .../mysql\346\217\241\346\211\213/index.html" | 433 +++ .../index.html" | 447 +++ .../index.html" | 445 +++ 2021/05/21/vxlan/index.html | 439 +++ .../24/\345\240\206\346\240\210/index.html" | 449 +++ .../index.html" | 447 +++ 2021/06/04/mongoinsert/index.html | 445 +++ 2021/06/22/skiplist/index.html | 442 +++ 2021/07/15/tcp-nodelay/index.html | 442 +++ .../index.html" | 481 +++ .../index.html" | 439 +++ 2021/09/25/redis/index.html | 476 +++ 2021/09/26/redis-cluster/index.html | 457 +++ .../index.html" | 442 +++ 2021/10/15/rabbitmq-ack-reject/index.html | 445 +++ 2021/10/18/llvm/index.html | 440 +++ .../index.html" | 690 ++++ .../index.html" | 446 +++ .../index.html" | 441 +++ .../30/\347\247\237\347\272\246/index.html" | 440 +++ 2021/12/30/roaring-bitmap/index.html | 482 +++ 2022/01/04/croaring-bitmap/index.html | 442 +++ .../jdk-\347\274\226\350\257\221/index.html" | 459 +++ 2022/03/27/lr-parser/index.html | 597 ++++ .../index.html" | 442 +++ 2022/04/02/dubbo-rpc/index.html | 454 +++ .../12/java\345\222\214springboot/index.html" | 442 +++ .../index.html" | 449 +++ .../index.html" | 452 +++ 2022/04/19/bloom-filter/index.html | 443 +++ .../index.html | 477 +++ .../index.html" | 459 +++ .../\344\275\277\347\224\250gtest/index.html" | 444 +++ .../index.html" | 446 +++ 2022/06/01/clickhouse-function/index.html | 459 +++ .../index.html" | 493 +++ .../26/c-\345\237\272\347\241\200/index.html" | 456 +++ .../index.html" | 536 ++++ 2022/07/06/clickhouse-parser/index.html | 441 +++ 2022/08/16/paper/index.html | 451 +++ .../index.html" | 458 +++ .../23/java-static-\345\235\227/index.html" | 444 +++ .../3-Method-Reference-Expressions/index.html | 444 +++ 2022/09/05/max-min-heap/index.html | 462 +++ .../index.html" | 448 +++ .../index.html" | 462 +++ .../jdk\347\274\226\350\257\221/index.html" | 469 +++ .../java-\345\240\206\346\240\210/index.html" | 443 +++ 2022/10/24/cpp-flag/index.html | 447 +++ .../11/09/zookeeper-connetion-loss/index.html | 446 +++ .../index.html" | 471 +++ 2022/11/21/xid-equal-to-close-xid/index.html | 451 +++ 2022/11/29/javac/index.html | 470 +++ .../index.html" | 451 +++ 2022/11/30/java-assert/index.html | 441 +++ .../index.html" | 448 +++ .../index.html" | 508 +++ .../index.html" | 449 +++ .../index.html" | 464 +++ 2022/12/06/java-volalite/index.html | 488 +++ 2022/12/07/java-redis-client/index.html | 451 +++ 2022/12/07/spring-boot/index.html | 486 +++ 2022/12/10/create-a-maven-plugin/index.html | 455 +++ .../index.html" | 443 +++ .../12/12/mybatis-dollor-and-sharp/index.html | 444 +++ .../index.html" | 463 +++ .../index.html" | 445 +++ 2022/12/16/ConcurrentHashMap-npe/index.html | 443 +++ 2022/12/16/java-thread-pool/index.html | 445 +++ .../index.html" | 450 +++ .../index.html" | 461 +++ .../index.html" | 469 +++ 2022/12/19/memory-model/index.html | 464 +++ 2022/12/20/java-parser/index.html | 456 +++ 2022/12/21/how-to-debug-javac/index.html | 442 +++ .../index.html" | 444 +++ 2022/12/22/java-arraycopy/index.html | 500 +++ .../index.html" | 444 +++ 2022/12/22/rabbitmq-spring-boot/index.html | 458 +++ 2022/12/26/cms-gc/index.html | 447 +++ .../java\345\237\272\347\241\200/index.html" | 479 +++ 2022/12/26/thread-pool/index.html | 451 +++ 2022/12/27/maven-scope/index.html | 451 +++ .../index.html" | 479 +++ .../index.html" | 462 +++ .../index.html" | 531 ++++ 2023/01/12/clickhouse-400-error/index.html | 456 +++ .../index.html" | 463 +++ .../index.html" | 465 +++ .../index.html | 447 +++ 2023/02/13/jstak/index.html | 446 +++ .../index.html" | 451 +++ .../index.html | 475 +++ 2023/02/28/utf8-encoding-and-java/index.html | 461 +++ .../index.html | 574 ++++ .../index.html" | 461 +++ 2023/03/29/java-unsafe/index.html | 457 +++ .../index.html" | 540 ++++ 2023/04/09/direct-memory-in-java/index.html | 484 +++ .../index.html" | 464 +++ .../index.html" | 442 +++ .../index.html" | 446 +++ 2023/04/18/java-integer-divison/index.html | 465 +++ .../index.html" | 536 ++++ .../index.html" | 450 +++ .../index.html" | 456 +++ .../index.html" | 439 +++ .../index.html" | 451 +++ .../fst-\347\273\223\346\236\204/index.html" | 485 +++ .../index.html" | 445 +++ .../index.html" | 444 +++ .../index.html | 464 +++ .../index.html | 450 +++ .../why-bison-can-be-find-in-cmake/index.html | 443 +++ .../14/clickhouse-jdbc-1002-error/index.html | 448 +++ 2023/06/14/nacos-client-and-serve/index.html | 443 +++ .../index.html" | 485 +++ 2023/06/27/java-sort-default-order/index.html | 455 +++ .../index.html" | 451 +++ .../index.html | 450 +++ .../index.html | 449 +++ .../index.html | 444 +++ .../index.html" | 478 +++ .../index.html" | 459 +++ .../10/hidden-and-shadow-in-java/index.html | 466 +++ 2023/07/13/kmp-correct/index.html | 450 +++ .../07/16/System-arraycopy-in-java/index.html | 443 +++ 2023/07/30/java-generic/index.html | 547 ++++ 2023/08/04/java-nio/index.html | 467 +++ .../index.html | 447 +++ 2023/08/15/java-juc/index.html | 504 +++ .../index.html | 455 +++ .../index.html | 445 +++ .../index.html" | 451 +++ 2023/08/24/java-main/index.html | 482 +++ .../index.html | 444 +++ 2023/08/25/java-unbox/index.html | 454 +++ 2023/09/06/java-branch-bytecode/index.html | 450 +++ .../index.html" | 449 +++ .../index.html" | 498 +++ .../index.html" | 467 +++ .../index.html" | 458 +++ .../index.html" | 465 +++ .../index.html" | 447 +++ .../index.html" | 447 +++ .../java-mybatis-plus-date-handler/index.html | 442 +++ 2023/10/19/lsmtree/index.html | 445 +++ .../index.html | 446 +++ 2023/10/25/bm25-and-search/index.html | 443 +++ 2023/10/25/fst/index.html | 554 ++++ 2023/10/25/java-wait-notify/index.html | 463 +++ .../index.html" | 438 +++ CNAME | 1 + archives/2019/09/index.html | 580 ++++ archives/2019/09/page/2/index.html | 400 +++ archives/2019/10/index.html | 517 +++ archives/2019/11/index.html | 537 ++++ archives/2019/12/index.html | 580 ++++ archives/2019/12/page/2/index.html | 420 +++ archives/2019/index.html | 580 ++++ archives/2019/page/2/index.html | 580 ++++ archives/2019/page/3/index.html | 580 ++++ archives/2019/page/4/index.html | 540 ++++ archives/2020/01/index.html | 397 +++ archives/2020/02/index.html | 397 +++ archives/2020/03/index.html | 457 +++ archives/2020/04/index.html | 457 +++ archives/2020/05/index.html | 497 +++ archives/2020/06/index.html | 517 +++ archives/2020/07/index.html | 457 +++ archives/2020/08/index.html | 397 +++ archives/2020/09/index.html | 457 +++ archives/2020/10/index.html | 437 +++ archives/2020/11/index.html | 457 +++ archives/2020/12/index.html | 457 +++ archives/2020/index.html | 580 ++++ archives/2020/page/2/index.html | 580 ++++ archives/2020/page/3/index.html | 580 ++++ archives/2020/page/4/index.html | 580 ++++ archives/2020/page/5/index.html | 440 +++ archives/2021/01/index.html | 457 +++ archives/2021/02/index.html | 397 +++ archives/2021/03/index.html | 580 ++++ archives/2021/03/page/2/index.html | 420 +++ archives/2021/04/index.html | 497 +++ archives/2021/05/index.html | 477 +++ archives/2021/06/index.html | 417 +++ archives/2021/07/index.html | 417 +++ archives/2021/08/index.html | 397 +++ archives/2021/09/index.html | 417 +++ archives/2021/10/index.html | 457 +++ archives/2021/11/index.html | 437 +++ archives/2021/12/index.html | 397 +++ archives/2021/index.html | 580 ++++ archives/2021/page/2/index.html | 580 ++++ archives/2021/page/3/index.html | 580 ++++ archives/2021/page/4/index.html | 580 ++++ archives/2021/page/5/index.html | 440 +++ archives/2022/01/index.html | 397 +++ archives/2022/02/index.html | 397 +++ archives/2022/03/index.html | 417 +++ archives/2022/04/index.html | 477 +++ archives/2022/05/index.html | 457 +++ archives/2022/06/index.html | 457 +++ archives/2022/07/index.html | 397 +++ archives/2022/08/index.html | 457 +++ archives/2022/09/index.html | 477 +++ archives/2022/10/index.html | 397 +++ archives/2022/11/index.html | 537 ++++ archives/2022/12/index.html | 580 ++++ archives/2022/12/page/2/index.html | 580 ++++ archives/2022/12/page/3/index.html | 520 +++ archives/2022/index.html | 580 ++++ archives/2022/page/2/index.html | 580 ++++ archives/2022/page/3/index.html | 580 ++++ archives/2022/page/4/index.html | 580 ++++ archives/2022/page/5/index.html | 580 ++++ archives/2022/page/6/index.html | 580 ++++ archives/2022/page/7/index.html | 440 +++ archives/2023/01/index.html | 477 +++ archives/2023/02/index.html | 477 +++ archives/2023/03/index.html | 437 +++ archives/2023/04/index.html | 517 +++ archives/2023/05/index.html | 497 +++ archives/2023/06/index.html | 557 ++++ archives/2023/07/index.html | 557 ++++ archives/2023/08/index.html | 557 ++++ archives/2023/09/index.html | 497 +++ archives/2023/10/index.html | 557 ++++ archives/2023/index.html | 580 ++++ archives/2023/page/2/index.html | 580 ++++ archives/2023/page/3/index.html | 580 ++++ archives/2023/page/4/index.html | 580 ++++ archives/2023/page/5/index.html | 580 ++++ archives/2023/page/6/index.html | 580 ++++ archives/2023/page/7/index.html | 540 ++++ archives/index.html | 580 ++++ archives/page/10/index.html | 580 ++++ archives/page/11/index.html | 580 ++++ archives/page/12/index.html | 580 ++++ archives/page/13/index.html | 580 ++++ archives/page/14/index.html | 583 ++++ archives/page/15/index.html | 580 ++++ archives/page/16/index.html | 580 ++++ archives/page/17/index.html | 580 ++++ archives/page/18/index.html | 583 ++++ archives/page/19/index.html | 580 ++++ archives/page/2/index.html | 580 ++++ archives/page/20/index.html | 580 ++++ archives/page/21/index.html | 580 ++++ archives/page/22/index.html | 583 ++++ archives/page/23/index.html | 580 ++++ archives/page/24/index.html | 580 ++++ archives/page/25/index.html | 580 ++++ archives/page/26/index.html | 480 +++ archives/page/3/index.html | 580 ++++ archives/page/4/index.html | 580 ++++ archives/page/5/index.html | 580 ++++ archives/page/6/index.html | 580 ++++ archives/page/7/index.html | 583 ++++ archives/page/8/index.html | 580 ++++ archives/page/9/index.html | 580 ++++ baidusitemap.xml | 1023 ++++++ css/main.css | 2420 ++++++++++++++ images/algolia_logo.svg | 9 + images/apple-touch-icon-next.png | Bin 0 -> 1544 bytes images/avatar.gif | Bin 0 -> 1793 bytes images/cc-by-nc-nd.svg | 121 + images/cc-by-nc-sa.svg | 121 + images/cc-by-nc.svg | 121 + images/cc-by-nd.svg | 117 + images/cc-by-sa.svg | 121 + images/cc-by.svg | 121 + images/cc-zero.svg | 72 + images/favicon-16x16-next.png | Bin 0 -> 435 bytes images/favicon-32x32-next.png | Bin 0 -> 640 bytes images/logo.svg | 23 + index.html | 1185 +++++++ js/algolia-search.js | 124 + js/bookmark.js | 56 + js/local-search.js | 278 ++ js/motion.js | 177 ++ js/next-boot.js | 114 + js/schemes/muse.js | 113 + js/schemes/pisces.js | 86 + js/utils.js | 415 +++ lib/anime.min.js | 8 + lib/font-awesome/css/all.min.css | 5 + lib/font-awesome/webfonts/fa-brands-400.woff2 | Bin 0 -> 76612 bytes .../webfonts/fa-regular-400.woff2 | Bin 0 -> 13584 bytes lib/font-awesome/webfonts/fa-solid-900.woff2 | Bin 0 -> 79444 bytes lib/velocity/velocity.min.js | 4 + lib/velocity/velocity.ui.min.js | 2 + page/10/index.html | 1240 ++++++++ page/11/index.html | 1137 +++++++ page/12/index.html | 1221 +++++++ page/13/index.html | 1281 ++++++++ page/14/index.html | 1332 ++++++++ page/15/index.html | 1133 +++++++ page/16/index.html | 1067 +++++++ page/17/index.html | 1071 +++++++ page/18/index.html | 1199 +++++++ page/19/index.html | 1207 +++++++ page/2/index.html | 1180 +++++++ page/20/index.html | 1066 +++++++ page/21/index.html | 1091 +++++++ page/22/index.html | 1257 ++++++++ page/23/index.html | 1133 +++++++ page/24/index.html | 1296 ++++++++ page/25/index.html | 1472 +++++++++ page/26/index.html | 792 +++++ page/3/index.html | 1301 ++++++++ page/4/index.html | 1112 +++++++ page/5/index.html | 1229 +++++++ page/6/index.html | 1384 ++++++++ page/7/index.html | 1238 +++++++ page/8/index.html | 1179 +++++++ page/9/index.html | 1114 +++++++ placeholder | 0 sitemap.txt | 330 ++ sitemap.xml | 2830 +++++++++++++++++ tags/algorithm/index.html | 397 +++ tags/arroyo/index.html | 397 +++ tags/bitmap/index.html | 397 +++ tags/bytecode/index.html | 397 +++ tags/c/index.html | 440 +++ tags/canal/index.html | 397 +++ tags/clickhouse/index.html | 583 ++++ tags/clickhouse/page/2/index.html | 463 +++ tags/cmake/index.html | 397 +++ tags/compile/index.html | 420 +++ tags/compiler/index.html | 417 +++ tags/cpp/index.html | 397 +++ tags/curl/index.html | 397 +++ tags/db/index.html | 440 +++ tags/docker/index.html | 480 +++ tags/druid/index.html | 397 +++ tags/elasticsearch/index.html | 397 +++ tags/es/index.html | 417 +++ tags/flink/index.html | 397 +++ tags/function/index.html | 397 +++ tags/golang/index.html | 437 +++ tags/groovy/index.html | 397 +++ tags/grpc/index.html | 397 +++ tags/hash/index.html | 397 +++ tags/http/index.html | 417 +++ tags/index.html | 397 +++ tags/io/index.html | 397 +++ tags/java/index.html | 580 ++++ tags/java/page/2/index.html | 580 ++++ tags/java/page/3/index.html | 580 ++++ tags/java/page/4/index.html | 580 ++++ tags/java/page/5/index.html | 583 ++++ tags/java/page/6/index.html | 580 ++++ tags/java/page/7/index.html | 580 ++++ tags/java/page/8/index.html | 580 ++++ tags/java/page/9/index.html | 563 ++++ tags/jdbc/index.html | 417 +++ tags/js/index.html | 443 +++ tags/juc/index.html | 397 +++ tags/jvm/index.html | 397 +++ tags/k3s/index.html | 397 +++ tags/k8s/index.html | 420 +++ tags/lettuce/index.html | 397 +++ tags/linux/index.html | 440 +++ tags/llvm/index.html | 397 +++ tags/lsmtree/index.html | 397 +++ tags/lucene/index.html | 583 ++++ tags/lucene/page/2/index.html | 423 +++ tags/maven/index.html | 397 +++ tags/memory/index.html | 397 +++ tags/ml/index.html | 397 +++ tags/model/index.html | 397 +++ tags/mvcc/index.html | 397 +++ tags/mysql/index.html | 586 ++++ tags/mysql/page/2/index.html | 400 +++ tags/nacos/index.html | 420 +++ tags/netty/index.html | 397 +++ tags/nginx/index.html | 397 +++ tags/nio/index.html | 397 +++ tags/nlp/index.html | 437 +++ tags/paper/index.html | 420 +++ tags/paxos/index.html | 397 +++ tags/php/index.html | 586 ++++ tags/php/page/2/index.html | 400 +++ tags/rabbitmq/index.html | 457 +++ tags/raft/index.html | 397 +++ tags/redis/index.html | 463 +++ tags/redission/index.html | 397 +++ tags/redisson/index.html | 397 +++ tags/regular/index.html | 397 +++ tags/roaring-bitmap/index.html | 397 +++ tags/rsa/index.html | 397 +++ tags/search/index.html | 420 +++ tags/servelet/index.html | 397 +++ tags/shell/index.html | 397 +++ tags/spring-boot/index.html | 417 +++ tags/springboot/index.html | 397 +++ tags/sql/index.html | 460 +++ tags/string/index.html | 397 +++ tags/tensorflow/index.html | 397 +++ tags/thread-pool/index.html | 397 +++ tags/tomcat/index.html | 397 +++ tags/utf-8/index.html | 397 +++ tags/zookeeper/index.html | 417 +++ "tags/\345\217\215\346\200\235/index.html" | 397 +++ 509 files changed, 250777 insertions(+) create mode 100644 2019/09/12/golang-lock/index.html create mode 100644 2019/09/12/php-imply-cast/index.html create mode 100644 2019/09/13/hello-world/index.html create mode 100644 2019/09/14/tcpdump-resp/index.html create mode 100644 2019/09/15/compile-and-debug-linux-kernel/index.html create mode 100644 2019/09/17/go-micro-hello-world/index.html create mode 100644 "2019/09/18/golang-interface-\346\257\224\350\276\203/index.html" create mode 100644 "2019/09/19/cors-\347\233\270\345\205\263/index.html" create mode 100644 2019/09/21/hello-world-java/index.html create mode 100644 "2019/09/21/\347\216\257\345\242\203\345\217\230\351\207\217\346\230\257\344\273\200\344\271\210/index.html" create mode 100644 "2019/09/28/maven\346\211\223\345\214\205NoClassDefFoundError-on-Maven-dependency/index.html" create mode 100644 2019/10/02/learn-es-invert-index/index.html create mode 100644 2019/10/08/mysql-string-max-length/index.html create mode 100644 2019/10/13/https-tls-ssl/index.html create mode 100644 "2019/10/18/lex\345\222\214yacc\344\276\213\345\255\220/index.html" create mode 100644 2019/10/19/php-try-catch/index.html create mode 100644 "2019/10/22/composer\347\232\204psr4/index.html" create mode 100644 "2019/10/31/ast\346\236\204\351\200\240/index.html" create mode 100644 "2019/11/14/java\347\232\204package\344\270\216\346\226\207\344\273\266\350\267\257\345\276\204/index.html" create mode 100644 2019/11/21/composer-ext/index.html create mode 100644 "2019/11/25/java-string-\347\233\270\345\205\263\345\206\205\345\256\271/index.html" create mode 100644 "2019/11/26/docker\344\270\216iptable/index.html" create mode 100644 "2019/11/26/java-\345\274\202\345\270\270/index.html" create mode 100644 2019/11/27/php-tokenlizer/index.html create mode 100644 2019/11/28/mysql-explain-impossible-condition/index.html create mode 100644 "2019/11/30/\346\255\243\345\210\231\346\250\241\345\274\217/index.html" create mode 100644 "2019/12/02/ik\345\210\206\350\257\215/index.html" create mode 100644 2019/12/04/stop/index.html create mode 100644 "2019/12/05/mysql-\351\232\220\345\274\217\350\275\254\346\215\242/index.html" create mode 100644 "2019/12/08/namespace\344\270\216docker/index.html" create mode 100644 "2019/12/09/\345\215\217\347\250\213\345\210\207\346\215\242/index.html" create mode 100644 2019/12/11/php-cgi-windows-curl/index.html create mode 100644 2019/12/12/java-Class-forName/index.html create mode 100644 "2019/12/12/js-vue\345\237\272\347\241\200/index.html" create mode 100644 "2019/12/12/mysql\344\270\245\346\240\274\346\250\241\345\274\217/index.html" create mode 100644 "2019/12/12/php-pdo-\347\233\270\345\205\263\345\217\202\346\225\260/index.html" create mode 100644 "2019/12/19/clickhouse-\347\274\226\350\257\221\345\256\211\350\243\205/index.html" create mode 100644 2019/12/23/mysql-error-sqlstate/index.html create mode 100644 "2020/01/07/php-\345\217\215\345\260\204/index.html" create mode 100644 "2020/02/20/\347\274\226\350\257\221\345\216\237\347\220\206/index.html" create mode 100644 2020/03/04/induction/index.html create mode 100644 2020/03/10/math/index.html create mode 100644 "2020/03/15/\350\247\204\345\210\231\347\263\273\347\273\237/index.html" create mode 100644 2020/03/16/ssa-optimistic/index.html create mode 100644 2020/04/08/a-language-to-machine-code/index.html create mode 100644 2020/04/10/golang-stack/index.html create mode 100644 2020/04/15/coding/index.html create mode 100644 "2020/04/27/\345\255\227\347\254\246\344\270\262\345\210\260\344\273\243\347\240\201/index.html" create mode 100644 "2020/05/06/\344\270\200\344\270\252sql\347\232\204\347\273\204\346\210\220/index.html" create mode 100644 2020/05/08/php-opcode-to-handler/index.html create mode 100644 2020/05/11/raft/index.html create mode 100644 "2020/05/18/\350\214\203\347\225\264\345\222\214\347\261\263\347\224\260\345\274\225\347\220\206/index.html" create mode 100644 2020/05/27/tired/index.html create mode 100644 2020/05/28/functor/index.html create mode 100644 2020/06/01/functor-1/index.html create mode 100644 "2020/06/01/\344\276\235\350\265\226\345\222\214\345\206\262\347\252\201/index.html" create mode 100644 "2020/06/11/\345\217\257\346\211\251\345\261\225\346\200\247/index.html" create mode 100644 2020/06/12/curry/index.html create mode 100644 "2020/06/12/js-define\345\207\275\346\225\260/index.html" create mode 100644 "2020/06/18/\345\217\214\345\220\221\347\273\221\345\256\232/index.html" create mode 100644 2020/06/29/basic-paxos/index.html create mode 100644 2020/07/07/c-auto-cast/index.html create mode 100644 "2020/07/10/group-concat\347\234\213mysql\345\207\275\346\225\260/index.html" create mode 100644 "2020/07/13/\345\273\266\350\277\237\346\261\202\345\200\274/index.html" create mode 100644 2020/07/21/sql-join/index.html create mode 100644 "2020/08/11/\346\234\200\345\244\247\347\206\265/index.html" create mode 100644 "2020/09/01/\346\210\221\347\232\204es\344\271\213\350\267\257/index.html" create mode 100644 2020/09/18/crf/index.html create mode 100644 2020/09/25/invariants/index.html create mode 100644 "2020/09/27/\345\276\252\347\216\257\344\270\215\345\217\230\345\274\217loop-invariants/index.html" create mode 100644 "2020/10/19/\346\204\237\347\237\245\346\234\272/index.html" create mode 100644 2020/10/21/parser/index.html create mode 100644 2020/10/28/todolist/index.html create mode 100644 2020/11/12/mvcc/index.html create mode 100644 "2020/11/12/\346\225\260\347\220\206\351\200\273\350\276\221/index.html" create mode 100644 2020/11/18/btree/index.html create mode 100644 "2020/11/27/\347\272\246\346\235\237\345\222\214\347\273\223\346\236\204/index.html" create mode 100644 "2020/12/07/\346\263\233\345\236\213/index.html" create mode 100644 2020/12/17/tcp/index.html create mode 100644 2020/12/21/pushdown/index.html create mode 100644 "2020/12/25/\345\275\242\345\274\217\345\214\226\350\257\255\344\271\211/index.html" create mode 100644 2021/01/06/pdf-format/index.html create mode 100644 2021/01/06/three-value-prediate/index.html create mode 100644 2021/01/13/RSA/index.html create mode 100644 "2021/01/28/mysql\347\232\204select/index.html" create mode 100644 2021/02/18/mvcc-translate/index.html create mode 100644 "2021/03/05/\345\246\202\344\275\225\345\206\231\344\270\200\344\270\252\346\255\243\347\241\256\347\232\204\344\273\243\347\240\201/index.html" create mode 100644 2021/03/10/docker-compose-spec/index.html create mode 100644 2021/03/11/mysqlbinlog/index.html create mode 100644 "2021/03/15/c-\346\240\207\345\207\206\345\272\223\347\232\204vector/index.html" create mode 100644 "2021/03/15/\345\277\203\350\267\263\345\222\214tcp/index.html" create mode 100644 "2021/03/17/\350\256\272\346\226\207\347\232\204\346\234\211\350\266\243\346\200\247/index.html" create mode 100644 "2021/03/22/\344\270\200\346\254\241tcp\351\224\231\350\257\257\346\216\222\346\237\245/index.html" create mode 100644 2021/03/27/build-grpc/index.html create mode 100644 "2021/03/27/docker\346\214\201\344\271\205\345\214\226/index.html" create mode 100644 "2021/03/27/mysql-\344\270\273\344\273\216\345\244\215\345\210\266/index.html" create mode 100644 2021/03/30/ipc/index.html create mode 100644 "2021/03/31/\350\267\263\350\241\250/index.html" create mode 100644 "2021/04/01/tcp\345\215\217\350\256\256/index.html" create mode 100644 "2021/04/02/canal\351\234\200\350\246\201\346\263\250\346\204\217\347\232\204\347\202\271/index.html" create mode 100644 "2021/04/13/20210413\345\217\215\346\200\235\350\277\207\345\216\273/index.html" create mode 100644 "2021/04/14/c99\346\237\224\346\200\247\346\225\260\347\273\204/index.html" create mode 100644 "2021/04/14/mysql\344\270\273\344\273\216/index.html" create mode 100644 "2021/04/28/mysql\346\217\241\346\211\213/index.html" create mode 100644 "2021/05/11/c\345\255\227\350\212\202\345\257\271\351\275\220/index.html" create mode 100644 "2021/05/11/mysql-binlog\350\216\267\345\217\226/index.html" create mode 100644 2021/05/21/vxlan/index.html create mode 100644 "2021/05/24/\345\240\206\346\240\210/index.html" create mode 100644 "2021/05/24/\351\234\215\345\260\224\351\200\273\350\276\221-\344\273\216\345\277\253\346\216\222\345\274\200\345\247\213/index.html" create mode 100644 2021/06/04/mongoinsert/index.html create mode 100644 2021/06/22/skiplist/index.html create mode 100644 2021/07/15/tcp-nodelay/index.html create mode 100644 "2021/07/29/mysql-5-7-in-\347\232\204\344\274\230\345\214\226\345\274\225\350\265\267\347\232\204bug/index.html" create mode 100644 "2021/08/09/redis-\344\270\273\344\273\216\345\210\207\346\215\242\345\222\214\351\253\230\345\217\257\347\224\250/index.html" create mode 100644 2021/09/25/redis/index.html create mode 100644 2021/09/26/redis-cluster/index.html create mode 100644 "2021/10/10/\346\227\266\351\227\264\350\275\256\347\256\227\346\263\225/index.html" create mode 100644 2021/10/15/rabbitmq-ack-reject/index.html create mode 100644 2021/10/18/llvm/index.html create mode 100644 "2021/10/21/lucence\346\272\220\347\240\201\345\210\206\346\236\220/index.html" create mode 100644 "2021/11/04/rabbitmq\345\277\203\350\267\263\351\227\256\351\242\230\345\222\214php/index.html" create mode 100644 "2021/11/09/rabbit\346\265\201\347\250\213/index.html" create mode 100644 "2021/11/30/\347\247\237\347\272\246/index.html" create mode 100644 2021/12/30/roaring-bitmap/index.html create mode 100644 2022/01/04/croaring-bitmap/index.html create mode 100644 "2022/02/18/jdk-\347\274\226\350\257\221/index.html" create mode 100644 2022/03/27/lr-parser/index.html create mode 100644 "2022/03/30/\350\214\203\345\236\213\346\243\200\346\237\245/index.html" create mode 100644 2022/04/02/dubbo-rpc/index.html create mode 100644 "2022/04/12/java\345\222\214springboot/index.html" create mode 100644 "2022/04/12/java\347\261\273\345\210\235\345\247\213\345\214\226/index.html" create mode 100644 "2022/04/12/jvm\347\272\277\347\250\213\345\256\236\347\216\260/index.html" create mode 100644 2022/04/19/bloom-filter/index.html create mode 100644 2022/05/13/clickhosue-insert-insert-deduplicate/index.html create mode 100644 "2022/05/15/clickhouse-\345\273\272\350\241\250/index.html" create mode 100644 "2022/05/20/\344\275\277\347\224\250gtest/index.html" create mode 100644 "2022/05/22/clickhouse-\350\257\267\346\261\202\347\232\204\347\224\237\345\221\275\345\221\250\346\234\237/index.html" create mode 100644 2022/06/01/clickhouse-function/index.html create mode 100644 "2022/06/19/lucene-\347\274\226\350\257\221\345\256\211\350\243\205/index.html" create mode 100644 "2022/06/26/c-\345\237\272\347\241\200/index.html" create mode 100644 "2022/06/27/lucene-10\346\272\220\347\240\201\345\210\206\346\236\220/index.html" create mode 100644 2022/07/06/clickhouse-parser/index.html create mode 100644 2022/08/16/paper/index.html create mode 100644 "2022/08/19/lucene-tim\346\240\274\345\274\217/index.html" create mode 100644 "2022/08/23/java-static-\345\235\227/index.html" create mode 100644 2022/08/26/3-Method-Reference-Expressions/index.html create mode 100644 2022/09/05/max-min-heap/index.html create mode 100644 "2022/09/06/httpServerletRequest-autowired-\345\216\237\345\233\240/index.html" create mode 100644 "2022/09/11/java-thread-local-\345\210\235\345\247\213\345\214\226\346\227\266\346\234\272/index.html" create mode 100644 "2022/09/20/jdk\347\274\226\350\257\221/index.html" create mode 100644 "2022/09/21/java-\345\240\206\346\240\210/index.html" create mode 100644 2022/10/24/cpp-flag/index.html create mode 100644 2022/11/09/zookeeper-connetion-loss/index.html create mode 100644 "2022/11/16/jdk-\345\217\215\346\261\207\347\274\226/index.html" create mode 100644 2022/11/21/xid-equal-to-close-xid/index.html create mode 100644 2022/11/29/javac/index.html create mode 100644 "2022/11/29/springboot-\350\257\267\346\261\202\346\265\201\347\250\213/index.html" create mode 100644 2022/11/30/java-assert/index.html create mode 100644 "2022/11/30/java-\344\275\277\347\224\250lua-script/index.html" create mode 100644 "2022/11/30/tomcat-\347\274\226\350\257\221/index.html" create mode 100644 "2022/12/01/java-\346\226\271\346\263\225\347\255\276\345\220\215/index.html" create mode 100644 "2022/12/04/micro-k8s-\344\275\277\347\224\250/index.html" create mode 100644 2022/12/06/java-volalite/index.html create mode 100644 2022/12/07/java-redis-client/index.html create mode 100644 2022/12/07/spring-boot/index.html create mode 100644 2022/12/10/create-a-maven-plugin/index.html create mode 100644 "2022/12/12/mockito-\344\275\277\347\224\250/index.html" create mode 100644 2022/12/12/mybatis-dollor-and-sharp/index.html create mode 100644 "2022/12/15/java-int-overflow-\346\216\242\347\251\266/index.html" create mode 100644 "2022/12/15/spring-boot-\345\237\272\347\241\200/index.html" create mode 100644 2022/12/16/ConcurrentHashMap-npe/index.html create mode 100644 2022/12/16/java-thread-pool/index.html create mode 100644 "2022/12/16/spring-boot-repackage-\345\222\214\345\205\245\345\217\243/index.html" create mode 100644 "2022/12/16/\347\274\226\350\257\221sping-boot/index.html" create mode 100644 "2022/12/19/gradle-\344\275\277\347\224\250/index.html" create mode 100644 2022/12/19/memory-model/index.html create mode 100644 2022/12/20/java-parser/index.html create mode 100644 2022/12/21/how-to-debug-javac/index.html create mode 100644 "2022/12/21/java-\345\212\250\346\200\201\344\273\243\347\220\206/index.html" create mode 100644 2022/12/22/java-arraycopy/index.html create mode 100644 "2022/12/22/nacos-\350\216\267\345\217\226\351\205\215\347\275\256/index.html" create mode 100644 2022/12/22/rabbitmq-spring-boot/index.html create mode 100644 2022/12/26/cms-gc/index.html create mode 100644 "2022/12/26/java\345\237\272\347\241\200/index.html" create mode 100644 2022/12/26/thread-pool/index.html create mode 100644 2022/12/27/maven-scope/index.html create mode 100644 "2022/12/29/redission-\350\260\203\347\224\250\346\265\201\347\250\213/index.html" create mode 100644 "2023/01/11/java-\344\270\200\346\254\241gc\346\216\222\346\237\245/index.html" create mode 100644 "2023/01/11/java-\345\257\271\350\261\241\345\244\247\345\260\217/index.html" create mode 100644 2023/01/12/clickhouse-400-error/index.html create mode 100644 "2023/01/15/\344\275\277\347\224\250k8s-\346\220\255\345\273\272redis-\351\233\206\347\276\244/index.html" create mode 100644 "2023/01/30/Garbage-First-Garbage-Collection-\347\256\200\345\215\225\346\246\202\345\206\265/index.html" create mode 100644 2023/02/04/Double-Checked-Locking-is-Broken/index.html create mode 100644 2023/02/13/jstak/index.html create mode 100644 "2023/02/14/java-\345\270\270\347\224\250\345\221\275\344\273\244/index.html" create mode 100644 2023/02/21/clickhouse-mybatis-batch-insert-cpu-raise-up/index.html create mode 100644 2023/02/28/utf8-encoding-and-java/index.html create mode 100644 2023/03/09/each-jvm-bytecode-implement-in-x86-with-asm/index.html create mode 100644 "2023/03/23/tersorflow-\345\205\245\351\227\250/index.html" create mode 100644 2023/03/29/java-unsafe/index.html create mode 100644 "2023/04/03/java-\345\237\272\346\234\254\347\261\273\345\236\213/index.html" create mode 100644 2023/04/09/direct-memory-in-java/index.html create mode 100644 "2023/04/12/arroyo-\347\274\226\350\257\221\345\222\214\344\275\277\347\224\250/index.html" create mode 100644 "2023/04/14/java-rabbitmq-\345\210\235\345\247\213\345\214\226/index.html" create mode 100644 "2023/04/14/kafka\347\274\226\350\257\221\345\222\214\345\220\257\345\212\250-1/index.html" create mode 100644 2023/04/18/java-integer-divison/index.html create mode 100644 "2023/04/20/volatile-java-\345\256\236\347\216\260/index.html" create mode 100644 "2023/05/04/insert-ignore-\346\255\273\351\224\201/index.html" create mode 100644 "2023/05/16/flink-\347\274\226\350\257\221/index.html" create mode 100644 "2023/05/18/bigint-\346\230\240\345\260\204/index.html" create mode 100644 "2023/05/24/java-\346\225\260\347\273\204\345\243\260\346\230\216\344\275\215\347\275\256\345\214\272\345\210\253/index.html" create mode 100644 "2023/05/25/fst-\347\273\223\346\236\204/index.html" create mode 100644 "2023/05/27/java-\347\272\277\347\250\213\346\261\240/index.html" create mode 100644 "2023/06/05/WFST-\345\222\214lucene-\345\222\214fst/index.html" create mode 100644 2023/06/06/Payload-value-must-not-be-empty/index.html create mode 100644 2023/06/07/Invalid-JSON-text-in-argument-2-in-mysql8/index.html create mode 100644 2023/06/09/why-bison-can-be-find-in-cmake/index.html create mode 100644 2023/06/14/clickhouse-jdbc-1002-error/index.html create mode 100644 2023/06/14/nacos-client-and-serve/index.html create mode 100644 "2023/06/19/lucene-\346\220\234\347\264\242\350\277\207\347\250\213/index.html" create mode 100644 2023/06/27/java-sort-default-order/index.html create mode 100644 "2023/06/27/milvus-\347\274\226\350\257\221\344\275\277\347\224\250/index.html" create mode 100644 2023/07/03/Unable-to-make-protected-final-java-lang-Class-java-lang-ClassLoader-defineClass/index.html create mode 100644 2023/07/04/Numeric-overflow-in-expression-idea-java/index.html create mode 100644 2023/07/04/found-duplicate-key-xxx-spring-boot/index.html create mode 100644 "2023/07/04/lucene-\345\210\206\350\257\215/index.html" create mode 100644 "2023/07/05/llvm-ir-\344\276\213\345\255\220/index.html" create mode 100644 2023/07/10/hidden-and-shadow-in-java/index.html create mode 100644 2023/07/13/kmp-correct/index.html create mode 100644 2023/07/16/System-arraycopy-in-java/index.html create mode 100644 2023/07/30/java-generic/index.html create mode 100644 2023/08/04/java-nio/index.html create mode 100644 2023/08/10/jdbc-Communications-link-failure/index.html create mode 100644 2023/08/15/java-juc/index.html create mode 100644 2023/08/15/mybatisplus-Column-status-cannot-be-null/index.html create mode 100644 2023/08/16/WARNING-An-illegal-reflective-access-operation-has-occurred-groovy/index.html create mode 100644 "2023/08/16/antlr-\344\275\277\347\224\250/index.html" create mode 100644 2023/08/24/java-main/index.html create mode 100644 2023/08/25/Hydration-completed-but-contains-mismatches/index.html create mode 100644 2023/08/25/java-unbox/index.html create mode 100644 2023/09/06/java-branch-bytecode/index.html create mode 100644 "2023/09/06/java-jdk-\344\270\272\344\273\200\344\271\210\344\270\200\344\270\252\347\272\277\347\250\213\347\251\272\346\214\207\351\222\210\344\270\215\351\200\200\345\207\272/index.html" create mode 100644 "2023/09/10/elastic-search-\347\274\226\350\257\221\345\222\214\350\260\203\350\257\225/index.html" create mode 100644 "2023/09/19/nginx-temp-proxy-\346\235\203\351\231\220\345\257\274\350\207\264\346\212\245\351\224\231/index.html" create mode 100644 "2023/09/21/java-\344\270\232\345\212\241oom\346\216\222\346\237\245/index.html" create mode 100644 "2023/09/22/java-oom-hprof\346\226\207\344\273\266\347\224\237\346\210\220\346\227\266\346\234\272/index.html" create mode 100644 "2023/10/16/clickhouse-cloud-dbeaver\350\277\236\346\216\245\344\270\215\344\270\212/index.html" create mode 100644 "2023/10/16/clickhouse-\347\211\251\345\214\226\350\247\206\345\233\276\345\222\214\344\275\215\345\233\276/index.html" create mode 100644 2023/10/18/java-mybatis-plus-date-handler/index.html create mode 100644 2023/10/19/lsmtree/index.html create mode 100644 2023/10/20/simpleDatetimeformatter-vs-datetimeformatter/index.html create mode 100644 2023/10/25/bm25-and-search/index.html create mode 100644 2023/10/25/fst/index.html create mode 100644 2023/10/25/java-wait-notify/index.html create mode 100644 "2023/10/27/clickhouse-\347\211\251\345\214\226\350\247\206\345\233\276/index.html" create mode 100644 CNAME create mode 100644 archives/2019/09/index.html create mode 100644 archives/2019/09/page/2/index.html create mode 100644 archives/2019/10/index.html create mode 100644 archives/2019/11/index.html create mode 100644 archives/2019/12/index.html create mode 100644 archives/2019/12/page/2/index.html create mode 100644 archives/2019/index.html create mode 100644 archives/2019/page/2/index.html create mode 100644 archives/2019/page/3/index.html create mode 100644 archives/2019/page/4/index.html create mode 100644 archives/2020/01/index.html create mode 100644 archives/2020/02/index.html create mode 100644 archives/2020/03/index.html create mode 100644 archives/2020/04/index.html create mode 100644 archives/2020/05/index.html create mode 100644 archives/2020/06/index.html create mode 100644 archives/2020/07/index.html create mode 100644 archives/2020/08/index.html create mode 100644 archives/2020/09/index.html create mode 100644 archives/2020/10/index.html create mode 100644 archives/2020/11/index.html create mode 100644 archives/2020/12/index.html create mode 100644 archives/2020/index.html create mode 100644 archives/2020/page/2/index.html create mode 100644 archives/2020/page/3/index.html create mode 100644 archives/2020/page/4/index.html create mode 100644 archives/2020/page/5/index.html create mode 100644 archives/2021/01/index.html create mode 100644 archives/2021/02/index.html create mode 100644 archives/2021/03/index.html create mode 100644 archives/2021/03/page/2/index.html create mode 100644 archives/2021/04/index.html create mode 100644 archives/2021/05/index.html create mode 100644 archives/2021/06/index.html create mode 100644 archives/2021/07/index.html create mode 100644 archives/2021/08/index.html create mode 100644 archives/2021/09/index.html create mode 100644 archives/2021/10/index.html create mode 100644 archives/2021/11/index.html create mode 100644 archives/2021/12/index.html create mode 100644 archives/2021/index.html create mode 100644 archives/2021/page/2/index.html create mode 100644 archives/2021/page/3/index.html create mode 100644 archives/2021/page/4/index.html create mode 100644 archives/2021/page/5/index.html create mode 100644 archives/2022/01/index.html create mode 100644 archives/2022/02/index.html create mode 100644 archives/2022/03/index.html create mode 100644 archives/2022/04/index.html create mode 100644 archives/2022/05/index.html create mode 100644 archives/2022/06/index.html create mode 100644 archives/2022/07/index.html create mode 100644 archives/2022/08/index.html create mode 100644 archives/2022/09/index.html create mode 100644 archives/2022/10/index.html create mode 100644 archives/2022/11/index.html create mode 100644 archives/2022/12/index.html create mode 100644 archives/2022/12/page/2/index.html create mode 100644 archives/2022/12/page/3/index.html create mode 100644 archives/2022/index.html create mode 100644 archives/2022/page/2/index.html create mode 100644 archives/2022/page/3/index.html create mode 100644 archives/2022/page/4/index.html create mode 100644 archives/2022/page/5/index.html create mode 100644 archives/2022/page/6/index.html create mode 100644 archives/2022/page/7/index.html create mode 100644 archives/2023/01/index.html create mode 100644 archives/2023/02/index.html create mode 100644 archives/2023/03/index.html create mode 100644 archives/2023/04/index.html create mode 100644 archives/2023/05/index.html create mode 100644 archives/2023/06/index.html create mode 100644 archives/2023/07/index.html create mode 100644 archives/2023/08/index.html create mode 100644 archives/2023/09/index.html create mode 100644 archives/2023/10/index.html create mode 100644 archives/2023/index.html create mode 100644 archives/2023/page/2/index.html create mode 100644 archives/2023/page/3/index.html create mode 100644 archives/2023/page/4/index.html create mode 100644 archives/2023/page/5/index.html create mode 100644 archives/2023/page/6/index.html create mode 100644 archives/2023/page/7/index.html create mode 100644 archives/index.html create mode 100644 archives/page/10/index.html create mode 100644 archives/page/11/index.html create mode 100644 archives/page/12/index.html create mode 100644 archives/page/13/index.html create mode 100644 archives/page/14/index.html create mode 100644 archives/page/15/index.html create mode 100644 archives/page/16/index.html create mode 100644 archives/page/17/index.html create mode 100644 archives/page/18/index.html create mode 100644 archives/page/19/index.html create mode 100644 archives/page/2/index.html create mode 100644 archives/page/20/index.html create mode 100644 archives/page/21/index.html create mode 100644 archives/page/22/index.html create mode 100644 archives/page/23/index.html create mode 100644 archives/page/24/index.html create mode 100644 archives/page/25/index.html create mode 100644 archives/page/26/index.html create mode 100644 archives/page/3/index.html create mode 100644 archives/page/4/index.html create mode 100644 archives/page/5/index.html create mode 100644 archives/page/6/index.html create mode 100644 archives/page/7/index.html create mode 100644 archives/page/8/index.html create mode 100644 archives/page/9/index.html create mode 100644 baidusitemap.xml create mode 100644 css/main.css create mode 100644 images/algolia_logo.svg create mode 100644 images/apple-touch-icon-next.png create mode 100644 images/avatar.gif create mode 100644 images/cc-by-nc-nd.svg create mode 100644 images/cc-by-nc-sa.svg create mode 100644 images/cc-by-nc.svg create mode 100644 images/cc-by-nd.svg create mode 100644 images/cc-by-sa.svg create mode 100644 images/cc-by.svg create mode 100644 images/cc-zero.svg create mode 100644 images/favicon-16x16-next.png create mode 100644 images/favicon-32x32-next.png create mode 100644 images/logo.svg create mode 100644 index.html create mode 100644 js/algolia-search.js create mode 100644 js/bookmark.js create mode 100644 js/local-search.js create mode 100644 js/motion.js create mode 100644 js/next-boot.js create mode 100644 js/schemes/muse.js create mode 100644 js/schemes/pisces.js create mode 100644 js/utils.js create mode 100644 lib/anime.min.js create mode 100644 lib/font-awesome/css/all.min.css create mode 100644 lib/font-awesome/webfonts/fa-brands-400.woff2 create mode 100644 lib/font-awesome/webfonts/fa-regular-400.woff2 create mode 100644 lib/font-awesome/webfonts/fa-solid-900.woff2 create mode 100644 lib/velocity/velocity.min.js create mode 100644 lib/velocity/velocity.ui.min.js create mode 100644 page/10/index.html create mode 100644 page/11/index.html create mode 100644 page/12/index.html create mode 100644 page/13/index.html create mode 100644 page/14/index.html create mode 100644 page/15/index.html create mode 100644 page/16/index.html create mode 100644 page/17/index.html create mode 100644 page/18/index.html create mode 100644 page/19/index.html create mode 100644 page/2/index.html create mode 100644 page/20/index.html create mode 100644 page/21/index.html create mode 100644 page/22/index.html create mode 100644 page/23/index.html create mode 100644 page/24/index.html create mode 100644 page/25/index.html create mode 100644 page/26/index.html create mode 100644 page/3/index.html create mode 100644 page/4/index.html create mode 100644 page/5/index.html create mode 100644 page/6/index.html create mode 100644 page/7/index.html create mode 100644 page/8/index.html create mode 100644 page/9/index.html delete mode 100644 placeholder create mode 100644 sitemap.txt create mode 100644 sitemap.xml create mode 100644 tags/algorithm/index.html create mode 100644 tags/arroyo/index.html create mode 100644 tags/bitmap/index.html create mode 100644 tags/bytecode/index.html create mode 100644 tags/c/index.html create mode 100644 tags/canal/index.html create mode 100644 tags/clickhouse/index.html create mode 100644 tags/clickhouse/page/2/index.html create mode 100644 tags/cmake/index.html create mode 100644 tags/compile/index.html create mode 100644 tags/compiler/index.html create mode 100644 tags/cpp/index.html create mode 100644 tags/curl/index.html create mode 100644 tags/db/index.html create mode 100644 tags/docker/index.html create mode 100644 tags/druid/index.html create mode 100644 tags/elasticsearch/index.html create mode 100644 tags/es/index.html create mode 100644 tags/flink/index.html create mode 100644 tags/function/index.html create mode 100644 tags/golang/index.html create mode 100644 tags/groovy/index.html create mode 100644 tags/grpc/index.html create mode 100644 tags/hash/index.html create mode 100644 tags/http/index.html create mode 100644 tags/index.html create mode 100644 tags/io/index.html create mode 100644 tags/java/index.html create mode 100644 tags/java/page/2/index.html create mode 100644 tags/java/page/3/index.html create mode 100644 tags/java/page/4/index.html create mode 100644 tags/java/page/5/index.html create mode 100644 tags/java/page/6/index.html create mode 100644 tags/java/page/7/index.html create mode 100644 tags/java/page/8/index.html create mode 100644 tags/java/page/9/index.html create mode 100644 tags/jdbc/index.html create mode 100644 tags/js/index.html create mode 100644 tags/juc/index.html create mode 100644 tags/jvm/index.html create mode 100644 tags/k3s/index.html create mode 100644 tags/k8s/index.html create mode 100644 tags/lettuce/index.html create mode 100644 tags/linux/index.html create mode 100644 tags/llvm/index.html create mode 100644 tags/lsmtree/index.html create mode 100644 tags/lucene/index.html create mode 100644 tags/lucene/page/2/index.html create mode 100644 tags/maven/index.html create mode 100644 tags/memory/index.html create mode 100644 tags/ml/index.html create mode 100644 tags/model/index.html create mode 100644 tags/mvcc/index.html create mode 100644 tags/mysql/index.html create mode 100644 tags/mysql/page/2/index.html create mode 100644 tags/nacos/index.html create mode 100644 tags/netty/index.html create mode 100644 tags/nginx/index.html create mode 100644 tags/nio/index.html create mode 100644 tags/nlp/index.html create mode 100644 tags/paper/index.html create mode 100644 tags/paxos/index.html create mode 100644 tags/php/index.html create mode 100644 tags/php/page/2/index.html create mode 100644 tags/rabbitmq/index.html create mode 100644 tags/raft/index.html create mode 100644 tags/redis/index.html create mode 100644 tags/redission/index.html create mode 100644 tags/redisson/index.html create mode 100644 tags/regular/index.html create mode 100644 tags/roaring-bitmap/index.html create mode 100644 tags/rsa/index.html create mode 100644 tags/search/index.html create mode 100644 tags/servelet/index.html create mode 100644 tags/shell/index.html create mode 100644 tags/spring-boot/index.html create mode 100644 tags/springboot/index.html create mode 100644 tags/sql/index.html create mode 100644 tags/string/index.html create mode 100644 tags/tensorflow/index.html create mode 100644 tags/thread-pool/index.html create mode 100644 tags/tomcat/index.html create mode 100644 tags/utf-8/index.html create mode 100644 tags/zookeeper/index.html create mode 100644 "tags/\345\217\215\346\200\235/index.html" diff --git a/2019/09/12/golang-lock/index.html b/2019/09/12/golang-lock/index.html new file mode 100644 index 0000000000..9c5fd9cbaf --- /dev/null +++ b/2019/09/12/golang-lock/index.html @@ -0,0 +1,475 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + golang_gc 相关问题 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ golang_gc 相关问题 +

+ + +
+ + + + +
+ + +

golang gc 关闭fd

4月还是5月的时候写了个golang 的程序,因为要保证最多只有一个进程存在所以进程启动就去获取锁,没有获取文件锁的进程就退出。每分钟我会启动一次进程。目的就是为了进程保活。

+

使用文件锁就是为了他的特性:

+
    +
  • 如果文件关闭,那么锁也会被回收
  • +
+

遇到的问题

    +
  • 问题是:过了半天之后ps aux 看启动的进程,发现居然有7-8个。按照预想应该只有一个。
    代码大概是长这样的
  • +
+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
func lockFile(){
name := "lockfiletest.lock"
file, err := os.OpenFile(name, syscall.O_CREAT|syscall.O_RDWR|syscall.O_CLOEXEC, 0666) //①打开文件
 ...
err = syscall.FcntlFlock(file.Fd(), syscall.F_SETLK, &flockT)  //②加锁
...
}
func main(){
err :=lockFile()
if err!=nil{
os.Exit(2) // ③加锁失败退出
}
}

+ +

很简单的逻辑,就是获取文件锁,获取失败则退出

+

找问题

    +
  • 问题出在哪里呢?
  • +
+

  

+

想了很久很久:难道是我用的库哪里fork了进程?文件被哪个第三方包关闭了?

+

想了很久很久一直怀疑第三方包有问题,但是最后经过google很多次后定位到是gc 的问题。

+

相关链接

+

在下面的例子里面编译后会在手动执行runtime.GC()后文件被回收

+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
package main

import (
"os"
"log"
"time"
"runtime"
)

func openFile(path string) error {
_, err := os.Open(path)
return err
}

func main() {
if err := openFile(os.Args[1]); err != nil {
log.Fatal(err)
}
// trigger GC below will also recycle the non-referenced fd opened before
runtime.GC()
time.Sleep(time.Hour)
}
+ + +
    +
  • 怎么看进程打开的文件呢?  

    +

    通过proc文件系统就可以了,proc文件系统几乎把linux内核所有的统计量都导出来了哦

    +
  • +
+
1
2
3
4
5
6
7
8
9
10
11
12
13
## 8808 就是我的nginx 的master 的pid
ll /proc/8808/fd/
total 0
dr-x------ 2 root root 0 8月 10 06:16 ./
dr-xr-xr-x 9 root root 0 8月 9 07:35 ../
lrwx------ 1 root root 64 8月 10 06:16 0 -> /dev/null
lrwx------ 1 root root 64 8月 10 06:16 1 -> /dev/null
l-wx------ 1 root root 64 8月 10 06:16 2 -> /usr/local/nginx/logs/error.log*
lrwx------ 1 root root 64 8月 10 06:16 3 -> socket:[78178946]
l-wx------ 1 root root 64 8月 10 06:16 4 -> /usr/local/nginx/logs/access.log*
l-wx------ 1 root root 64 8月 10 06:16 5 -> /usr/local/nginx/logs/error.log*
lrwx------ 1 root root 64 8月 10 06:16 6 -> socket:[78180730]
lrwx------ 1 root root 64 8月 10 06:16 7 -> socket:[78178947]
+ +

怎么解决

第一:我们的问题是什么?  
其实问题很简单:

+
    +
  • 我们的fd这个对象被回收了
  • +
  • gc的调用fd对象回调函数
  • +
  • 回调函数把fd对象对应的文件描述符关闭了
  • +
+

解决方案:

+

把fd 弄成全局变量,全局变量一直被引用所以不会被gc回收掉

+
1
2
3
4
5
6
7
8
var file *File  // 加了一行变成全局变量
func lockFile(){
name := "lockfiletest.lock"
file, err := os.OpenFile(name, syscall.O_CREAT|syscall.O_RDWR|syscall.O_CLOEXEC, 0666) //①打开文件
 ...
err = syscall.FcntlFlock(file.Fd(), syscall.F_SETLK, &flockT)  //②加锁
...
}
+ +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/2019/09/12/php-imply-cast/index.html b/2019/09/12/php-imply-cast/index.html new file mode 100644 index 0000000000..0da9efb835 --- /dev/null +++ b/2019/09/12/php-imply-cast/index.html @@ -0,0 +1,471 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + php隐式转换大坑 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ php隐式转换大坑 +

+ + +
+ + + + +
+ + +

php大坑-隐式转换

php是弱类型语言,其中一个坑是隐式转换

+

什么时候会触发隐式转换

这个我只知道比较的时候如果有相应的隐式转换。

+

例子

下面有个例子

+

php 代码

+
1
2
3
<?php
var_dump('1abc'== 1);
// 返回 true
+ +

经过_is_numeric_string_ex转换后,将1abc转换成了1

+
1
2
(gdb) p *lval
$4 = 1
+ +

堆栈如下

+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
(gdb) bt
#0 _is_numeric_string_ex (str=0x7fffef602b58 "1abc", length=4, lval=0x7fffffff99a0, dval=0x7fffffff99a0, allow_errors=1, oflow_info=0x0) at /home/dinosaur/Downloads/php-7.2.2/Zend/zend_operators.c:3001
#1 0x0000000000938c52 in is_numeric_string_ex (str=0x7fffef602b58 "1abc", length=4, lval=0x7fffffff99a0, dval=0x7fffffff99a0, allow_errors=1, oflow_info=0x0)
at /home/dinosaur/Downloads/php-7.2.2/Zend/zend_operators.h:142
#2 0x0000000000938c94 in is_numeric_string (str=0x7fffef602b58 "1abc", length=4, lval=0x7fffffff99a0, dval=0x7fffffff99a0, allow_errors=1)
at /home/dinosaur/Downloads/php-7.2.2/Zend/zend_operators.h:146
#3 0x000000000094502b in compare_function (result=0x7fffffff9b78, op1=0x7fffffff9aa8, op2=0x7fffffff9ac8) at /home/dinosaur/Downloads/php-7.2.2/Zend/zend_operators.c:2069
#4 0x0000000000945a32 in is_equal_function (result=0x7fffffff9b78, op1=0x7fffffff9aa8, op2=0x7fffffff9ac8) at /home/dinosaur/Downloads/php-7.2.2/Zend/zend_operators.c:2159
#5 0x00000000009274e3 in zend_try_ct_eval_binary_op (result=0x7fffffff9b78, opcode=17, op1=0x7fffffff9aa8, op2=0x7fffffff9ac8) at /home/dinosaur/Downloads/php-7.2.2/Zend/zend_compile.c:6880
#6 0x0000000000927a0d in zend_compile_binary_op (result=0x7fffffff9b70, ast=0x7fffef686090) at /home/dinosaur/Downloads/php-7.2.2/Zend/zend_compile.c:6999
#7 0x000000000092b8d1 in zend_compile_expr (result=0x7fffffff9b70, ast=0x7fffef686090) at /home/dinosaur/Downloads/php-7.2.2/Zend/zend_compile.c:8235
#8 0x000000000091b84f in zend_compile_args (ast=0x7fffef6860a8, fbc=0x167f050) at /home/dinosaur/Downloads/php-7.2.2/Zend/zend_compile.c:3202
#9 0x000000000091baaf in zend_compile_call_common (result=0x7fffffff9d20, args_ast=0x7fffef6860a8, fbc=0x167f050) at /home/dinosaur/Downloads/php-7.2.2/Zend/zend_compile.c:3282
#10 0x000000000091e44b in zend_compile_call (result=0x7fffffff9d20, ast=0x7fffef6860d8, type=0) at /home/dinosaur/Downloads/php-7.2.2/Zend/zend_compile.c:4009
#11 0x000000000092bc3e in zend_compile_var (result=0x7fffffff9d20, ast=0x7fffef6860d8, type=0) at /home/dinosaur/Downloads/php-7.2.2/Zend/zend_compile.c:8339
#12 0x000000000092b841 in zend_compile_expr (result=0x7fffffff9d20, ast=0x7fffef6860d8) at /home/dinosaur/Downloads/php-7.2.2/Zend/zend_compile.c:8217
#13 0x000000000092b513 in zend_compile_stmt (ast=0x7fffef6860d8) at /home/dinosaur/Downloads/php-7.2.2/Zend/zend_compile.c:8186
#14 0x000000000092b0de in zend_compile_top_stmt (ast=0x7fffef6860d8) at /home/dinosaur/Downloads/php-7.2.2/Zend/zend_compile.c:8072
#15 0x000000000092b0c0 in zend_compile_top_stmt (ast=0x7fffef686018) at /home/dinosaur/Downloads/php-7.2.2/Zend/zend_compile.c:8067
#16 0x00000000008ec355 in zend_compile (type=2) at Zend/zend_language_scanner.l:601
#17 0x00000000008ec4e6 in compile_file (file_handle=0x7fffffffca10, type=8) at Zend/zend_language_scanner.l:635
#18 0x00000000007296f0 in phar_compile_file (file_handle=0x7fffffffca10, type=8) at /home/dinosaur/Downloads/php-7.2.2/ext/phar/phar.c:3320
#19 0x00007fffeeeca612 in opcache_compile_file (file_handle=0x7fffffffca10, type=8, key=0x7fffef16dd4c <accel_globals+556> "test.php:240416:240464", op_array_p=0x7fffffffa318)
at /home/dinosaur/Downloads/php-7.2.2/ext/opcache/ZendAccelerator.c:1600
#20 0x00007fffeeecb722 in persistent_compile_file (file_handle=0x7fffffffca10, type=8) at /home/dinosaur/Downloads/php-7.2.2/ext/opcache/ZendAccelerator.c:1941
#21 0x000000000094ccb4 in zend_execute_scripts (type=8, retval=0x0, file_count=3) at /home/dinosaur/Downloads/php-7.2.2/Zend/zend.c:1490
#22 0x00000000008b0b4a in php_execute_script (primary_file=0x7fffffffca10) at /home/dinosaur/Downloads/php-7.2.2/main/main.c:2590
#23 0x0000000000a3fd23 in do_cli (argc=2, argv=0x1441f40) at /home/dinosaur/Downloads/php-7.2.2/sapi/cli/php_cli.c:1011
#24 0x0000000000a40ee0 in main (argc=2, argv=0x1441f40) at /home/dinosaur/Downloads/php-7.2.2/sapi/cli/php_cli.c:1404


+

这是什么规则呢?
1 如果一个操作数是string ,一个是number ,会将string 转换成int ,如果转换发现不是数字就转换成0
然后他们就相等了

+

规则

+ + + + + + + + + + + + + +
操作数1操作数2规则
string,resource 或 numberstring,resource 或 number将字符串和资源转换成数字,按普通数学比较
+

相关阅读

+ +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/2019/09/13/hello-world/index.html b/2019/09/13/hello-world/index.html new file mode 100644 index 0000000000..c35544d7ba --- /dev/null +++ b/2019/09/13/hello-world/index.html @@ -0,0 +1,436 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Hello World | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ Hello World +

+ + +
+ + + + +
+ + +

将之前的内容迁移到hexo

+ +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/2019/09/14/tcpdump-resp/index.html b/2019/09/14/tcpdump-resp/index.html new file mode 100644 index 0000000000..b88f558869 --- /dev/null +++ b/2019/09/14/tcpdump-resp/index.html @@ -0,0 +1,456 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + tcpdump_resp | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ tcpdump_resp +

+ + +
+ + + + +
+ + +

tcpdump 解析redis 的RESP协议

最近使用tcpdump 抓包发现居然支持RESP 协议的解析

+
1
tcpdump -i lo port 6379
+

当我在redis-cli 敲 set a a时候,tcpdump返回

+
1
seq 1023993873:1023993900, ack 4077734227, win 342, options [nop,nop,TS val 2912390058 ecr 2912384753], length 27: RESP "set" "a" "a"
+ +
    +
  • 如何实现?
  • +
+

去githup 上拿下来编译之后,发现是调用resp_print解析RESP协议的,而且是固定端口的,当端口是6379的时候会尝试用redis的协议解析

+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
void
tcp_print(netdissect_options *ndo,
const u_char *bp, u_int length,
const u_char *bp2, int fragmented)
{
...
if (ndo->ndo_packettype) {
switch (ndo->ndo_packettype) {
case PT_ZMTP1:
zmtp1_print(ndo, bp, length);
break;
case PT_RESP: // 指定解析类型 -T
resp_print(ndo, bp, length);
break;
}
return;
}
...
else if (IS_SRC_OR_DST_PORT(REDIS_PORT)) //REDIS_PORT=6379
resp_print(ndo, bp, length);
...
}
+ +
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
(gdb) bt
#0 resp_print (ndo=0x7fffffffca40, bp=0x7ffff6e82088 "*3\r\n$3\r\nset\r\n$1\r\na\r\n$1\r\na\r\n", length=27) at ./print-resp.c:214
#1 0x000000000045ae3f in tcp_print (ndo=0x7fffffffca40, bp=0x7ffff6e82088 "*3\r\n$3\r\nset\r\n$1\r\na\r\n$1\r\na\r\n", length=27, bp2=0x7ffff6e82054 "E", fragmented=0) at ./print-tcp.c:723
#2 0x0000000000420f52 in ip_print_demux (ndo=0x7fffffffca40, bp=0x7ffff6e82068 "\242D\030\353=\b\350\021\363\rIS\200\030\001V\376C", length=59, ver=4, fragmented=0, ttl_hl=64, nh=6 '\006',
iph=0x7ffff6e82054 "E") at ./print-ip-demux.c:100
#3 0x0000000000420b1b in ip_print (ndo=0x7fffffffca40, bp=0x7ffff6e82054 "E", length=79) at ./print-ip.c:493
#4 0x000000000041bf64 in ethertype_print (ndo=0x7fffffffca40, ether_type=2048, p=0x7ffff6e82054 "E", length=79, caplen=79, src=0x7fffffffc680, dst=0x7fffffffc690) at ./print-ether.c:490
#5 0x000000000041bb03 in ether_print_common (ndo=0x7fffffffca40, p=0x7ffff6e82054 "E", length=79, caplen=79, print_switch_tag=0x0, switch_tag_len=0, print_encap_header=0x0, encap_header_arg=0x0)
at ./print-ether.c:345
#6 0x000000000041bc44 in ether_print (ndo=0x7fffffffca40, p=0x7ffff6e82046 "", length=93, caplen=93, print_encap_header=0x0, encap_header_arg=0x0) at ./print-ether.c:401
#7 0x000000000041bc94 in ether_if_print (ndo=0x7fffffffca40, h=0x7fffffffc7e0, p=0x7ffff6e82046 "") at ./print-ether.c:416
#8 0x00000000004078fe in pretty_print_packet (ndo=0x7fffffffca40, h=0x7fffffffc7e0, sp=0x7ffff6e82046 "", packets_captured=1) at ./print.c:414
#9 0x0000000000406d84 in print_packet (user=0x7fffffffca40 "", h=0x7fffffffc7e0, sp=0x7ffff6e82046 "") at ./tcpdump.c:2984
#10 0x00007ffff7755ef6 in ?? () from /usr/lib/x86_64-linux-gnu/libpcap.so.0.8
#11 0x00007ffff775a4a3 in ?? () from /usr/lib/x86_64-linux-gnu/libpcap.so.0.8
#12 0x00007ffff775f1fd in pcap_loop () from /usr/lib/x86_64-linux-gnu/libpcap.so.0.8
#13 0x00000000004060b1 in main (argc=5, argv=0x7fffffffddd8) at ./tcpdump.c:2438

+ + +

其他端口解析RESP

如果你的redis-server 不是在6379,只要加上-T RESP 即可在其他端口解析RESP协议

+
1
2
3
4
5
6
sudo tcpdump -i lo port 7777 -T RESP
tcpdump: verbose output suppressed, use -v or -vv for full protocol decode
listening on lo, link-type EN10MB (Ethernet), capture size 262144 bytes
13:07:31.737440 IP localhost.42466 > localhost.7777: Flags [P.], seq 2285148837:2285148864, ack 2982472773, win 342, options [nop,nop,TS val 2913788220 ecr 2913780890], length 27: RESP "set" "a" "a"
13:07:31.737680 IP localhost.7777 > localhost.42466: Flags [P.], seq 1:6, ack 27, win 342, options [nop,nop,TS val 2913788220 ecr 2913788220], length 5: RESP "OK"
13:07:31.737706 IP localhost.42466 > localhost.7777: Flags [.], ack 6, win 342, options [nop,nop,TS val 2913788220 ecr 2913788220], length 0
+ +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/2019/09/15/compile-and-debug-linux-kernel/index.html b/2019/09/15/compile-and-debug-linux-kernel/index.html new file mode 100644 index 0000000000..913fa26f13 --- /dev/null +++ b/2019/09/15/compile-and-debug-linux-kernel/index.html @@ -0,0 +1,468 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + qemu调试内核 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ qemu调试内核 +

+ + +
+ + + + +
+ + +

编译和调试Linux内核

相关参考

+

第一步

1
2
3
$ cd linux-4.16
$ make menuconfig
$ make -j8
+
    +
  • 开启debug 信息
  • +
+
1
2
3
4
5
Kernel hacking  ---> 
[*] Kernel debugging
Compile-time checks and compiler options --->
[*] Compile the kernel with debug info
[*] Provide GDB scripts for kernel debugging
+ +
    +
  • 关闭地址随机化
    选中Processor type and features 然后关闭
    1
    2
    3
    4
    5
    [*] 64-bit kernel                                                │ │  
    │ │ General setup ---> │ │
    │ │ [*] Enable loadable module support ---> │ │
    │ │ -*- Enable the block layer ---> │ │
    │ │ Processor type and features --->
  • +
+

反选 KASLR, 也就是关闭地址随机化

+
1
│ │    [ ]   Randomize the address of the kernel image (KASLR)   
+ +

为什么需要关闭地址随机

+

第二步

编译busybox,注意是静态链接,然后构建initramfs根文件系统

+

第三步

通过qemu 跑这个系统

+
1
qemu-system-x86_64  -kernel  /home/dinosaur/Downloads/linux-4.16/arch/x86/boot/bzImage  -hda qemu_rootfs.img  -append "root=/dev/sda rootfstype=ext4 rw"   -gdb tcp::1234
+ +

gdb 调试

+
1
2
3
gdb vmlinux
(gdb) target remote localhost:1234
b vfs_write
+ +

然后输出

+
1
2
3
4
5
6
7
8
9
10
(gdb) bt
#0 vfs_write (file=0xffff880006431700, buf=0x66506a <error: Cannot access memory at address 0x66506a>,
count=46, pos=0xffffc900000b7f08) at fs/read_write.c:529
#1 0xffffffff811a08cd in SYSC_write (count=<optimized out>, buf=<optimized out>, fd=<optimized out>)
at fs/read_write.c:589
#2 SyS_write (fd=<optimized out>, buf=6705258, count=46) at fs/read_write.c:581
#3 0xffffffff81001c8b in do_syscall_64 (regs=0xffff880006431700) at arch/x86/entry/common.c:287
#4 0xffffffff81a00071 in entry_SYSCALL_64 () at arch/x86/entry/entry_64.S:237
#5 0x0000000000000000 in ?? ()

+ + + + + + +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/2019/09/17/go-micro-hello-world/index.html b/2019/09/17/go-micro-hello-world/index.html new file mode 100644 index 0000000000..c28881046a --- /dev/null +++ b/2019/09/17/go-micro-hello-world/index.html @@ -0,0 +1,451 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + go-micro hello world | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ go-micro hello world +

+ + +
+ + + + +
+ + +

最近在学一点点go 相关的内容,遇到了很多坑

+

什么是mDNS

相关阅读

+

gomicro 遇到的编译不过的问题(时间是2019/9/19)

我但是用的go 版本是1.10,然后编译的时候报crypto/ed25519 这个包找不到

+
    +
  • 怎么解决?
  • +
+

升级到go 1.13以上版本,好像1.13才有这个包

+

相关记录

+
1
2
3
4
// In Go 1.13, the ed25519 package was promoted to the standard library as
// crypto/ed25519, and this package became a wrapper for the standard library one.
//
// +build !go1.13
+ +

整个调用流程和抓包

// todo

+ +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git "a/2019/09/18/golang-interface-\346\257\224\350\276\203/index.html" "b/2019/09/18/golang-interface-\346\257\224\350\276\203/index.html" new file mode 100644 index 0000000000..470fc10c5a --- /dev/null +++ "b/2019/09/18/golang-interface-\346\257\224\350\276\203/index.html" @@ -0,0 +1,457 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + golang interface 比较 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ golang interface 比较 +

+ + +
+ + + + +
+ + +

来源

+
1
2
3
4
5
Interface values are comparable. Two interface values are equal if they have identical dynamic types and equal dynamic values or if both have value nil.
A value x of non-interface type X and a value t of interface type T are comparable when values of type X are comparable and X implements T. They are equal if t's dynamic type is identical to X and t's dynamic value is equal to x.

A comparison of two interface values with identical dynamic types causes a run-time panic if values of that type are not comparable. This behavior applies not only to direct interface value comparisons but also when comparing arrays of interface values or structs with interface-valued fields.

+ +

interface 是可以比较的,当两个interface满足以下之一的时候两者相等:

+
    +
  • 两个interface的动态类型和动态值两两相等
  • +
  • 两个interface值都是nil
  • +
+

当一个是interface,一个不是interface的时候,满足以下条件才能可比较:

+
    +
  • x 是类型X的值,t是类型T的值。只有X是可比较且X是T的实现的时候,x和t是可比较的
  • +
+

那么当一个是interface,一个不是interface的时候,可比较的时候,怎么样才能相等呢?

+
    +
  • 当t的动态类型和X相同且t的动态值与x相同
  • +
+

当比较两个interface的时候,如果他们的类型是不可比较的,那么会产生运行时panic,这种panic不仅仅会发生在interface直接比较。还会发生在interface数组比较或者包含interface作为structs中的字段时候的structs之间的比较。

+

相关分析

+
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git "a/2019/09/19/cors-\347\233\270\345\205\263/index.html" "b/2019/09/19/cors-\347\233\270\345\205\263/index.html" new file mode 100644 index 0000000000..07aeac389c --- /dev/null +++ "b/2019/09/19/cors-\347\233\270\345\205\263/index.html" @@ -0,0 +1,454 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + cors 相关 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ cors 相关 +

+ + +
+ + + + +
+ + +

前言

谈到cors之前,必须先谈同源策略和跨域。

+

为什么要跨域

同源策略限制了从同一个源加载的文档或脚本如何与来自另一个源的资源进行交互。这是一个用于隔离潜在恶意文件的重要安全机制。而且这些是浏览器自己限制的。

+

rfc 6454

+
+

User agents interact with content created by a large number of
authors. Although many of those authors are well-meaning, some
authors might be malicious. To the extent that user agents undertake
actions based on content they process, user agent implementors might
wish to restrict the ability of malicious authors to disrupt the
confidentiality or integrity of other content or servers.

+
+

详细的相关内容

+

cors

相关阅读

+

相关阅读2

+

简单请求

简单请求就是不会触发cors预检请求的请求.

+
+

某些请求不会触发 CORS 预检请求。本文称这样的请求为“简单请求”,请注意,该术语并不属于 Fetch (其中定义了 CORS)规范。若请求满足所有下述条件,则该请求可视为“简单请求”:

+
+

使用下列方法之一:
GET
HEAD
POST
Fetch 规范定义了对 CORS 安全的首部字段集合,不得人为设置该集合之外的其他首部字段。该集合为:
Accept
Accept-Language
Content-Language
Content-Type (需要注意额外的限制)
DPR
Downlink
Save-Data
Viewport-Width
Width
Content-Type 的值仅限于下列三者之一:
text/plain
multipart/form-data
application/x-www-form-urlencoded
请求中的任意XMLHttpRequestUpload 对象均没有注册任何事件监听器;XMLHttpRequestUpload 对象可以使用 XMLHttpRequest.upload 属性访问。
请求中没有使用 ReadableStream 对象。

+ +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/2019/09/21/hello-world-java/index.html b/2019/09/21/hello-world-java/index.html new file mode 100644 index 0000000000..538ea5d0d9 --- /dev/null +++ b/2019/09/21/hello-world-java/index.html @@ -0,0 +1,481 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + hello world java | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ hello world java +

+ + +
+ + + + +
+ + +

hello

java hello world

+
1
2
3
4
5
6
7
8
9
10
public class HelloWorld {

public static void main(String[] args) {
// Prints "Hello, World" to the terminal window.
System.out.println("Hello, World");
}

}


+

编译

编译 需要添加g 选项

+
1
javac -g HelloWorld.java 
+

调试

方法一:

+

使用jdb 调试hello wrold

+
1
jdb -classpath . HelloWorld
+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
> stop  in HelloWorld.main                                
Deferring breakpoint HelloWorld.main.
It will be set after the class is loaded.
> run
run HelloWorld
Set uncaught java.lang.Throwable
Set deferred uncaught java.lang.Throwable
>
VM Started: Set deferred breakpoint HelloWorld.main

Breakpoint hit: "thread=main", HelloWorld.main(), line=5 bci=0
5 System.out.println("Hello, World");

main[1]

+ +

使用maven 编写helloworld

当遇到maven package后,java -java some.jar 说找不到main的时候可以参考以下答案
https://stackoverflow.com/a/9689877/6229548

+

加载类

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
(gdb) bt
#0 open64 () at ../sysdeps/unix/syscall-template.S:84
#1 0x00007ffff695b544 in os::open (path=0x7ffff7fcefd0 "/home/dinosaur/jdk8/build/linux-x86_64-normal-server-slowdebug/jdk/classes/java/lang/Class.class", oflag=0, mode=0)
at /home/dinosaur/jdk8/hotspot/src/os/linux/vm/os_linux.cpp:5188
#2 0x00007ffff63ffdfc in ClassPathDirEntry::open_stream (this=0x7ffff006f178, name=0x7ffff000cce8 "java/lang/Class.class", __the_thread__=0x7ffff000c000)
at /home/dinosaur/jdk8/hotspot/src/share/vm/classfile/classLoader.cpp:210
#3 0x00007ffff640055b in LazyClassPathEntry::open_stream (this=0x7ffff001ad48, name=0x7ffff000cce8 "java/lang/Class.class", __the_thread__=0x7ffff000c000)
at /home/dinosaur/jdk8/hotspot/src/share/vm/classfile/classLoader.cpp:330
#4 0x00007ffff640209b in ClassLoader::load_classfile (h_name=0x7ffff4062108, __the_thread__=0x7ffff000c000) at /home/dinosaur/jdk8/hotspot/src/share/vm/classfile/classLoader.cpp:909
#5 0x00007ffff6a8570a in SystemDictionary::load_instance_class (class_name=0x7ffff4062108, class_loader=..., __the_thread__=0x7ffff000c000)
at /home/dinosaur/jdk8/hotspot/src/share/vm/classfile/systemDictionary.cpp:1304
#6 0x00007ffff6a838b8 in SystemDictionary::resolve_instance_class_or_null (name=0x7ffff4062108, class_loader=..., protection_domain=..., __the_thread__=0x7ffff000c000)
at /home/dinosaur/jdk8/hotspot/src/share/vm/classfile/systemDictionary.cpp:779
#7 0x00007ffff6a81ff7 in SystemDictionary::resolve_or_null (class_name=0x7ffff4062108, class_loader=..., protection_domain=..., __the_thread__=0x7ffff000c000)
at /home/dinosaur/jdk8/hotspot/src/share/vm/classfile/systemDictionary.cpp:232
#8 0x00007ffff6a819f2 in SystemDictionary::resolve_or_fail (class_name=0x7ffff4062108, class_loader=..., protection_domain=..., throw_error=true, __the_thread__=0x7ffff000c000)
at /home/dinosaur/jdk8/hotspot/src/share/vm/classfile/systemDictionary.cpp:171
#9 0x00007ffff6a81d64 in SystemDictionary::resolve_or_fail (class_name=0x7ffff4062108, throw_error=true, __the_thread__=0x7ffff000c000)
at /home/dinosaur/jdk8/hotspot/src/share/vm/classfile/systemDictionary.cpp:212
#10 0x00007ffff6a87277 in SystemDictionary::initialize_wk_klass (id=SystemDictionary::Class_klass_knum, init_opt=0, __the_thread__=0x7ffff000c000)
at /home/dinosaur/jdk8/hotspot/src/share/vm/classfile/systemDictionary.cpp:1866
#11 0x00007ffff6a873a7 in SystemDictionary::initialize_wk_klasses_until (limit_id=SystemDictionary::Cloneable_klass_knum, start_id=@0x7ffff7fd0a84: SystemDictionary::Object_klass_knum,
__the_thread__=0x7ffff000c000) at /home/dinosaur/jdk8/hotspot/src/share/vm/classfile/systemDictionary.cpp:1882
#12 0x00007ffff6a8b13c in SystemDictionary::initialize_wk_klasses_through (end_id=SystemDictionary::Class_klass_knum, start_id=@0x7ffff7fd0a84: SystemDictionary::Object_klass_knum,
__the_thread__=0x7ffff000c000) at /home/dinosaur/jdk8/hotspot/src/share/vm/classfile/systemDictionary.hpp:408
#13 0x00007ffff6a874e0 in SystemDictionary::initialize_preloaded_classes (__the_thread__=0x7ffff000c000) at /home/dinosaur/jdk8/hotspot/src/share/vm/classfile/systemDictionary.cpp:1901
#14 0x00007ffff6a87199 in SystemDictionary::initialize (__the_thread__=0x7ffff000c000) at /home/dinosaur/jdk8/hotspot/src/share/vm/classfile/systemDictionary.cpp:1843
#15 0x00007ffff6ad68c9 in Universe::genesis (__the_thread__=0x7ffff000c000) at /home/dinosaur/jdk8/hotspot/src/share/vm/memory/universe.cpp:288
#16 0x00007ffff6ad8db6 in universe2_init () at /home/dinosaur/jdk8/hotspot/src/share/vm/memory/universe.cpp:991
#17 0x00007ffff66463b3 in init_globals () at /home/dinosaur/jdk8/hotspot/src/share/vm/runtime/init.cpp:114
#18 0x00007ffff6ab93ef in Threads::create_vm (args=0x7ffff7fd0e80, canTryAgain=0x7ffff7fd0e03) at /home/dinosaur/jdk8/hotspot/src/share/vm/runtime/thread.cpp:3424
#19 0x00007ffff6702ed0 in JNI_CreateJavaVM (vm=0x7ffff7fd0ed8, penv=0x7ffff7fd0ee0, args=0x7ffff7fd0e80) at /home/dinosaur/jdk8/hotspot/src/share/vm/prims/jni.cpp:5166
#20 0x00007ffff7bc3bda in InitializeJVM (pvm=0x7ffff7fd0ed8, penv=0x7ffff7fd0ee0, ifn=0x7ffff7fd0f30) at /home/dinosaur/jdk8/jdk/src/share/bin/java.c:1145
#21 0x00007ffff7bc1a36 in JavaMain (_args=0x7fffffffa910) at /home/dinosaur/jdk8/jdk/src/share/bin/java.c:371
#22 0x00007ffff73d66ba in start_thread (arg=0x7ffff7fd1700) at pthread_create.c:333
#23 0x00007ffff78f741d in clone () at ../sysdeps/unix/sysv/linux/x86_64/clone.S:109

+ + +

加载classloader

+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
(gdb) bt
#0 open64 () at ../sysdeps/unix/syscall-template.S:84
#1 0x00007ffff695b544 in os::open (path=0x7ffff7fcefd0 "/home/dinosaur/jdk8/build/linux-x86_64-normal-server-slowdebug/jdk/classes/java/lang/ClassLoader.class", oflag=0, mode=0)
at /home/dinosaur/jdk8/hotspot/src/os/linux/vm/os_linux.cpp:5188
#2 0x00007ffff63ffdfc in ClassPathDirEntry::open_stream (this=0x7ffff006f178, name=0x7ffff000cd08 "java/lang/ClassLoader.class", __the_thread__=0x7ffff000c000)
at /home/dinosaur/jdk8/hotspot/src/share/vm/classfile/classLoader.cpp:210
#3 0x00007ffff640055b in LazyClassPathEntry::open_stream (this=0x7ffff001ad48, name=0x7ffff000cd08 "java/lang/ClassLoader.class", __the_thread__=0x7ffff000c000)
at /home/dinosaur/jdk8/hotspot/src/share/vm/classfile/classLoader.cpp:330
#4 0x00007ffff640209b in ClassLoader::load_classfile (h_name=0x7ffff40621c8, __the_thread__=0x7ffff000c000) at /home/dinosaur/jdk8/hotspot/src/share/vm/classfile/classLoader.cpp:909
#5 0x00007ffff6a8570a in SystemDictionary::load_instance_class (class_name=0x7ffff40621c8, class_loader=..., __the_thread__=0x7ffff000c000)
at /home/dinosaur/jdk8/hotspot/src/share/vm/classfile/systemDictionary.cpp:1304
#6 0x00007ffff6a838b8 in SystemDictionary::resolve_instance_class_or_null (name=0x7ffff40621c8, class_loader=..., protection_domain=..., __the_thread__=0x7ffff000c000)
at /home/dinosaur/jdk8/hotspot/src/share/vm/classfile/systemDictionary.cpp:779
#7 0x00007ffff6a81ff7 in SystemDictionary::resolve_or_null (class_name=0x7ffff40621c8, class_loader=..., protection_domain=..., __the_thread__=0x7ffff000c000)
at /home/dinosaur/jdk8/hotspot/src/share/vm/classfile/systemDictionary.cpp:232
#8 0x00007ffff6a819f2 in SystemDictionary::resolve_or_fail (class_name=0x7ffff40621c8, class_loader=..., protection_domain=..., throw_error=true, __the_thread__=0x7ffff000c000)
at /home/dinosaur/jdk8/hotspot/src/share/vm/classfile/systemDictionary.cpp:171
#9 0x00007ffff6a81d64 in SystemDictionary::resolve_or_fail (class_name=0x7ffff40621c8, throw_error=true, __the_thread__=0x7ffff000c000)
at /home/dinosaur/jdk8/hotspot/src/share/vm/classfile/systemDictionary.cpp:212
#10 0x00007ffff6a87277 in SystemDictionary::initialize_wk_klass (id=SystemDictionary::ClassLoader_klass_knum, init_opt=0, __the_thread__=0x7ffff000c000)
at /home/dinosaur/jdk8/hotspot/src/share/vm/classfile/systemDictionary.cpp:1866
#11 0x00007ffff6a873a7 in SystemDictionary::initialize_wk_klasses_until (limit_id=SystemDictionary::SoftReference_klass_knum, start_id=@0x7ffff7fd0a84: SystemDictionary::Cloneable_klass_knum,
__the_thread__=0x7ffff000c000) at /home/dinosaur/jdk8/hotspot/src/share/vm/classfile/systemDictionary.cpp:1882
#12 0x00007ffff6a8b13c in SystemDictionary::initialize_wk_klasses_through (end_id=SystemDictionary::Reference_klass_knum, start_id=@0x7ffff7fd0a84: SystemDictionary::Cloneable_klass_knum,
__the_thread__=0x7ffff000c000) at /home/dinosaur/jdk8/hotspot/src/share/vm/classfile/systemDictionary.hpp:408
#13 0x00007ffff6a87553 in SystemDictionary::initialize_preloaded_classes (__the_thread__=0x7ffff000c000) at /home/dinosaur/jdk8/hotspot/src/share/vm/classfile/systemDictionary.cpp:1918
#14 0x00007ffff6a87199 in SystemDictionary::initialize (__the_thread__=0x7ffff000c000) at /home/dinosaur/jdk8/hotspot/src/share/vm/classfile/systemDictionary.cpp:1843
#15 0x00007ffff6ad68c9 in Universe::genesis (__the_thread__=0x7ffff000c000) at /home/dinosaur/jdk8/hotspot/src/share/vm/memory/universe.cpp:288
#16 0x00007ffff6ad8db6 in universe2_init () at /home/dinosaur/jdk8/hotspot/src/share/vm/memory/universe.cpp:991
#17 0x00007ffff66463b3 in init_globals () at /home/dinosaur/jdk8/hotspot/src/share/vm/runtime/init.cpp:114
#18 0x00007ffff6ab93ef in Threads::create_vm (args=0x7ffff7fd0e80, canTryAgain=0x7ffff7fd0e03) at /home/dinosaur/jdk8/hotspot/src/share/vm/runtime/thread.cpp:3424
#19 0x00007ffff6702ed0 in JNI_CreateJavaVM (vm=0x7ffff7fd0ed8, penv=0x7ffff7fd0ee0, args=0x7ffff7fd0e80) at /home/dinosaur/jdk8/hotspot/src/share/vm/prims/jni.cpp:5166
#20 0x00007ffff7bc3bda in InitializeJVM (pvm=0x7ffff7fd0ed8, penv=0x7ffff7fd0ee0, ifn=0x7ffff7fd0f30) at /home/dinosaur/jdk8/jdk/src/share/bin/java.c:1145
#21 0x00007ffff7bc1a36 in JavaMain (_args=0x7fffffffa910) at /home/dinosaur/jdk8/jdk/src/share/bin/java.c:371
#22 0x00007ffff73d66ba in start_thread (arg=0x7ffff7fd1700) at pthread_create.c:333
#23 0x00007ffff78f741d in clone () at ../sysdeps/unix/sysv/linux/x86_64/clone.S:109

+

打印hello world

这是打印hello world 的堆栈,估计是被优化了打印不了完整堆栈

+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
(gdb) bt
#0 write () at ../sysdeps/unix/syscall-template.S:84
#1 0x00007ffff556779a in handleWrite (fd=1, buf=0x7ffff7fce270, len=12)
at /home/dinosaur/jdk8/jdk/src/solaris/native/java/io/io_util_md.c:164
#2 0x00007ffff556710a in writeBytes (env=0x7ffff000c210, this=0x7ffff7fd0398, bytes=0x7ffff7fd0390, off=0, len=12, append=0 '\000',
fid=0x47e1043) at /home/dinosaur/jdk8/jdk/src/share/native/java/io/io_util.c:189
#3 0x00007ffff555a79c in Java_java_io_FileOutputStream_writeBytes (env=0x7ffff000c210, this=0x7ffff7fd0398, bytes=0x7ffff7fd0390,
off=0, len=12, append=0 '\000') at /home/dinosaur/jdk8/jdk/src/solaris/native/java/io/FileOutputStream_md.c:70
#4 0x00007fffe10298dc in ?? ()
#5 0x0000000000000008 in ?? ()
#6 0x0000000000000008 in ?? ()
#7 0x00007ffff000c000 in ?? ()
#8 0x00007fffe02c74d8 in ?? ()
#9 0x00007fffe1028ee3 in ?? ()
#10 0x00007ffff7fd0318 in ?? ()
#11 0x00007fffe0173f60 in ?? ()
#12 0x00007ffff7fd0398 in ?? ()
#13 0x00007fffe0175120 in ?? ()
#14 0x0000000000000000 in ?? ()
(gdb) c
Continuing.
Hello, World

+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
(gdb) bt
#0 write () at ../sysdeps/unix/syscall-template.S:84
#1 0x00007ffff556779a in handleWrite (fd=1, buf=0x7ffff7fce2d0, len=1)
at /home/dinosaur/jdk8/jdk/src/solaris/native/java/io/io_util_md.c:164
#2 0x00007ffff556710a in writeBytes (env=0x7ffff000c210, this=0x7ffff7fd0400, bytes=0x7ffff7fd03f8, off=0, len=1, append=0 '\000',
fid=0x47e1043) at /home/dinosaur/jdk8/jdk/src/share/native/java/io/io_util.c:189
#3 0x00007ffff555a79c in Java_java_io_FileOutputStream_writeBytes (env=0x7ffff000c210, this=0x7ffff7fd0400, bytes=0x7ffff7fd03f8,
off=0, len=1, append=0 '\000') at /home/dinosaur/jdk8/jdk/src/solaris/native/java/io/FileOutputStream_md.c:70
#4 0x00007fffe10298dc in ?? ()
#5 0x00007ffff7fd0410 in ?? ()
#6 0x00007ffff672dd43 in JVM_ArrayCopy (env=0x7ffff000c210, ignored=0x7ffff7fd0400, src=0x7ffff7fd03f8, src_pos=0,
dst=0x7f00f6265bea, dst_pos=1, length=0) at /home/dinosaur/jdk8/hotspot/src/share/vm/prims/jvm.cpp:298
#7 0x00007fffe1007500 in ?? ()
#8 0x0000000000000000 in ?? ()

+ +

java class file

4.1 The ClassFile Structure

+
+

A class file consists of a stream of 8-bit bytes. All 16-bit, 32-bit, and 64-bit
quantities are constructed by reading in two, four, and eight consecutive 8-bit
bytes, respectively. Multibyte data items are always stored in big-endian order,
where the high bytes come first. In the Java SE platform, this format is supported
by interfaces java.io.DataInput and java.io.DataOutput and classes such as
java.io.DataInputStream and java.io.DataOutputStream.

+
+

通过jvm文档,可以知道class文件存的magic number0xCAFEBABE,存储方式是大端的

+

4.1 The ClassFile Structure
A class file consists of a single ClassFile structure:

+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
ClassFile {
u4 magic;
u2 minor_version;
u2 major_version;
u2 constant_pool_count;
cp_info constant_pool[constant_pool_count-1];
u2 access_flags;
u2 this_class;
u2 super_class;
u2 interfaces_count;
u2 interfaces[interfaces_count];
u2 fields_count;
field_info fields[fields_count];
u2 methods_count;
method_info methods[methods_count];
u2 attributes_count;
attribute_info attributes[attributes_count];
}
+ +
+

The magic item supplies the magic number identifying the class file format;
it has the value 0xCAFEBABE.

+
+
1
dinosaur@dinosaur-X550VXK:~/jdk8/build$ hexdump  HelloWorld.class -C
+

使用hexdump查看class文件

+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29

00000000 ca fe ba be 00 00 00 34 00 1d 0a 00 06 00 0f 09 |.......4........|
00000010 00 10 00 11 08 00 12 0a 00 13 00 14 07 00 15 07 |................|
00000020 00 16 01 00 06 3c 69 6e 69 74 3e 01 00 03 28 29 |.....<init>...()|
00000030 56 01 00 04 43 6f 64 65 01 00 0f 4c 69 6e 65 4e |V...Code...LineN|
00000040 75 6d 62 65 72 54 61 62 6c 65 01 00 04 6d 61 69 |umberTable...mai|
00000050 6e 01 00 16 28 5b 4c 6a 61 76 61 2f 6c 61 6e 67 |n...([Ljava/lang|
00000060 2f 53 74 72 69 6e 67 3b 29 56 01 00 0a 53 6f 75 |/String;)V...Sou|
00000070 72 63 65 46 69 6c 65 01 00 0f 48 65 6c 6c 6f 57 |rceFile...HelloW|
00000080 6f 72 6c 64 2e 6a 61 76 61 0c 00 07 00 08 07 00 |orld.java.......|
00000090 17 0c 00 18 00 19 01 00 0c 48 65 6c 6c 6f 2c 20 |.........Hello, |
000000a0 57 6f 72 6c 64 07 00 1a 0c 00 1b 00 1c 01 00 0a |World...........|
000000b0 48 65 6c 6c 6f 57 6f 72 6c 64 01 00 10 6a 61 76 |HelloWorld...jav|
000000c0 61 2f 6c 61 6e 67 2f 4f 62 6a 65 63 74 01 00 10 |a/lang/Object...|
000000d0 6a 61 76 61 2f 6c 61 6e 67 2f 53 79 73 74 65 6d |java/lang/System|
000000e0 01 00 03 6f 75 74 01 00 15 4c 6a 61 76 61 2f 69 |...out...Ljava/i|
000000f0 6f 2f 50 72 69 6e 74 53 74 72 65 61 6d 3b 01 00 |o/PrintStream;..|
00000100 13 6a 61 76 61 2f 69 6f 2f 50 72 69 6e 74 53 74 |.java/io/PrintSt|
00000110 72 65 61 6d 01 00 07 70 72 69 6e 74 6c 6e 01 00 |ream...println..|
00000120 15 28 4c 6a 61 76 61 2f 6c 61 6e 67 2f 53 74 72 |.(Ljava/lang/Str|
00000130 69 6e 67 3b 29 56 00 21 00 05 00 06 00 00 00 00 |ing;)V.!........|
00000140 00 02 00 01 00 07 00 08 00 01 00 09 00 00 00 1d |................|
00000150 00 01 00 01 00 00 00 05 2a b7 00 01 b1 00 00 00 |........*.......|
00000160 01 00 0a 00 00 00 06 00 01 00 00 00 01 00 09 00 |................|
00000170 0b 00 0c 00 01 00 09 00 00 00 25 00 02 00 01 00 |..........%.....|
00000180 00 00 09 b2 00 02 12 03 b6 00 04 b1 00 00 00 01 |................|
00000190 00 0a 00 00 00 0a 00 02 00 00 00 05 00 08 00 06 |................|
000001a0 00 01 00 0d 00 00 00 02 00 0e |..........|

+ +

我们来看看hello world这个class文件的各种内容

+

第一个是magic number: ca fe ba be 四个字节
然后是minor_version:00 00
major_version:00 34

+

相关阅读

+ +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git "a/2019/09/21/\347\216\257\345\242\203\345\217\230\351\207\217\346\230\257\344\273\200\344\271\210/index.html" "b/2019/09/21/\347\216\257\345\242\203\345\217\230\351\207\217\346\230\257\344\273\200\344\271\210/index.html" new file mode 100644 index 0000000000..e83b9e5a87 --- /dev/null +++ "b/2019/09/21/\347\216\257\345\242\203\345\217\230\351\207\217\346\230\257\344\273\200\344\271\210/index.html" @@ -0,0 +1,470 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 环境变量是什么 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ 环境变量是什么 +

+ + +
+ + + + +
+ + +

环境变量

当我们使用shell 的命令env命令的时候可以看到很多字符串,那些就是这个进程的环境变量

+

环境变量怎么存

+

The first two arguments are just the same. The third argument envp gives the program’s environment; it is the same as the value of environ. See Environment Variables. POSIX.1 does not allow this three-argument form, so to be portable it is best to write main to take two arguments, and use the value of environ.

+
+

posix 相关文档

+
+

where argc is the argument count and argv is an array of character pointers to the arguments themselves. In addition, the following variable:

+
+
1
extern char **environ;
+
+

is initialized as a pointer to an array of character pointers to the environment strings. The argv and environ arrays are each terminated by a null pointer. The null pointer terminating the argv array is not counted in argc.

+
+

例子

下面是例子

+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
#include <stdio.h>

extern char **environ;

int main(int argc, const char *argv[]) {
printf("environment variables:\n");
int i = 0;
while (environ[i]) {
printf("%p\t%s\n", environ[i], environ[i]);
i++;
}

printf("argv:\n");
for (int i = 0; i < argc; i++) {
printf("%p\t%s\n", argv[i], argv[i]);
}
}
+

编译后会把这些打印出来

+
1
2
3
4
5
6
7
8
9
10
11
gcc main.c -o main
dinosaur@dinosaur-X550VXK:~/test$ ./main
environment variables:
0x7ffc250920c7 XDG_VTNR=7
0x7ffc250920d2 LC_PAPER=zh_CN.UTF-8
0x7ffc250920e7 LC_ADDRESS=zh_CN.UTF-8
0x7ffc250920fe XDG_SESSION_ID=c1
0x7ffc25092110 XDG_GREETER_DATA_DIR=/var/lib/lightdm-data/dinosaur
0x7ffc25092144 LC_MONETARY=zh_CN.UTF-8
0x7ffc2509215c CLUTTER_IM_MODULE=xim
...
+ +

glibc变量

定义

定义在glibc-master/posix/environ.c

+
1
2
3
4
5
6
7
8
9
/* This file just defines the `__environ' variable (and alias `environ').  */

#include <unistd.h>
#include <stddef.h>

/* This must be initialized; we cannot have a weak alias into bss. */
char **__environ = NULL;
weak_alias (__environ, environ) // 弱引用 其实environ 就是__environ
...
+ +

读getenv

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
char *
getenv (const char *name)
{
size_t len = strlen (name);
char **ep;
uint16_t name_start;


...
name_start = *(const uint16_t *) name;
...
len -= 2;
name += 2;

for (ep = __environ; *ep != NULL; ++ep)
{
uint16_t ep_start = *(uint16_t *) *ep;

if (name_start == ep_start && !strncmp (*ep + 2, name, len)
&& (*ep)[len + 2] == '=')
return &(*ep)[len + 3];
}
...

return NULL;
}
+ +

写 putenv setenv

putenv setenv 都调用__add_to_environ

+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
int
__add_to_environ (const char *name, const char *value, const char *combined,
int replace)
{
const size_t namelen = strlen (name);
size_t vallen;
...
vallen = strlen (value) + 1;
...
const size_t varlen = namelen + 1 + vallen;
...
memcpy (new_value, name, namelen);
new_value[namelen] = '=';
memcpy (&new_value[namelen + 1], value, vallen);
...
}
+ +

其实就是char ** environ 变量存着 key=value的字符串

+

如何以及什么时机继承

// todo 有空扒一下

+

总结

环境变量是一堆字符串,继承通过进程父子关系

+

1 环境变量的来源、原理与应用

+

2 glibc 文档

+ +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git "a/2019/09/28/maven\346\211\223\345\214\205NoClassDefFoundError-on-Maven-dependency/index.html" "b/2019/09/28/maven\346\211\223\345\214\205NoClassDefFoundError-on-Maven-dependency/index.html" new file mode 100644 index 0000000000..3f58ccaa27 --- /dev/null +++ "b/2019/09/28/maven\346\211\223\345\214\205NoClassDefFoundError-on-Maven-dependency/index.html" @@ -0,0 +1,457 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + maven打包NoClassDefFoundError | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ maven打包NoClassDefFoundError +

+ + +
+ + + + +
+ + +

maven打包NoClassDefFoundError

刚刚在学习怎么使用maven,可以编译通过,但是运行命令java -jar xxx.jar 的时候却报了错误NoClassDefFoundError

+

踩坑开始

踩坑第一步是去stack overflow 找了一个答案,使用插件maven-shade-plugin,其实这个也是正确的答案

+

这是正确答案

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
<project>
...
<build>
<plugins>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-shade-plugin</artifactId>
<version>3.2.1</version>
<executions>
<execution>
<phase>package</phase>
<goals>
<goal>shade</goal>
</goals>
</execution>
</executions>
</plugin>
</plugins>
</build>
...
</project>
+ + +

相关链接

+

我踩坑在哪里呢?

我当时不了解xml节点<pluginManagement>下面的plugins节点

+
    +
  • 这个是错误的写法
  • +
+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
<project>
...
<build>
<pluginManagement>
<plugins>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-shade-plugin</artifactId>
<version>3.2.1</version>
<executions>
<execution>
<phase>package</phase>
<goals>
<goal>shade</goal>
</goals>
</execution>
</executions>
</plugin>
</plugins>
</pluginManagement>
</build>
...
</project>
+ +

最终我的写法

最终写法就是得放在build 节点的下一级,不能放在pluginManagement里面的<plugins>节点里面

+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
<project>
...
<build>
<plugins>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-shade-plugin</artifactId>
<version>3.2.1</version>
<executions>
<execution>
<phase>package</phase>
<goals>
<goal>shade</goal>
</goals>
</execution>
</executions>
</plugin>
</plugins>
<pluginManagement>
...
</pluginManagement>
</build>
...
</project>
+ + +

然后运行mvn package 就能打包所有依赖进去

+ +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/2019/10/02/learn-es-invert-index/index.html b/2019/10/02/learn-es-invert-index/index.html new file mode 100644 index 0000000000..ede2592292 --- /dev/null +++ b/2019/10/02/learn-es-invert-index/index.html @@ -0,0 +1,625 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 倒排索引 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ 倒排索引 +

+ + +
+ + + + +
+ + +

es编译

1
gradle idea
+

跑了很久

+
+

BUILD SUCCESSFUL in 49m 34s
334 actionable tasks: 334 executed

+
+

es 堆栈

+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
prepareRequest:61, RestCatAction (org.elasticsearch.rest.action.cat)
handleRequest:80, BaseRestHandler (org.elasticsearch.rest)
handleRequest:69, SecurityRestFilter (org.elasticsearch.xpack.security.rest)
dispatchRequest:240, RestController (org.elasticsearch.rest)
tryAllHandlers:337, RestController (org.elasticsearch.rest)
dispatchRequest:174, RestController (org.elasticsearch.rest)
dispatchRequest:324, AbstractHttpServerTransport (org.elasticsearch.http)
handleIncomingRequest:374, AbstractHttpServerTransport (org.elasticsearch.http)
incomingRequest:303, AbstractHttpServerTransport (org.elasticsearch.http)
channelRead0:66, Netty4HttpRequestHandler (org.elasticsearch.http.netty4)
channelRead0:31, Netty4HttpRequestHandler (org.elasticsearch.http.netty4)
channelRead:105, SimpleChannelInboundHandler (io.netty.channel)
invokeChannelRead:362, AbstractChannelHandlerContext (io.netty.channel)
invokeChannelRead:348, AbstractChannelHandlerContext (io.netty.channel)
fireChannelRead:340, AbstractChannelHandlerContext (io.netty.channel)
channelRead:58, Netty4HttpPipeliningHandler (org.elasticsearch.http.netty4)
invokeChannelRead:362, AbstractChannelHandlerContext (io.netty.channel)
invokeChannelRead:348, AbstractChannelHandlerContext (io.netty.channel)
fireChannelRead:340, AbstractChannelHandlerContext (io.netty.channel)
channelRead:102, MessageToMessageDecoder (io.netty.handler.codec)
channelRead:111, MessageToMessageCodec (io.netty.handler.codec)
invokeChannelRead:362, AbstractChannelHandlerContext (io.netty.channel)
invokeChannelRead:348, AbstractChannelHandlerContext (io.netty.channel)
fireChannelRead:340, AbstractChannelHandlerContext (io.netty.channel)
channelRead:102, MessageToMessageDecoder (io.netty.handler.codec)
invokeChannelRead:362, AbstractChannelHandlerContext (io.netty.channel)
invokeChannelRead:348, AbstractChannelHandlerContext (io.netty.channel)
fireChannelRead:340, AbstractChannelHandlerContext (io.netty.channel)
channelRead:102, MessageToMessageDecoder (io.netty.handler.codec)
invokeChannelRead:362, AbstractChannelHandlerContext (io.netty.channel)
invokeChannelRead:348, AbstractChannelHandlerContext (io.netty.channel)
fireChannelRead:340, AbstractChannelHandlerContext (io.netty.channel)
fireChannelRead:323, ByteToMessageDecoder (io.netty.handler.codec)
channelRead:297, ByteToMessageDecoder (io.netty.handler.codec)
invokeChannelRead:362, AbstractChannelHandlerContext (io.netty.channel)
invokeChannelRead:348, AbstractChannelHandlerContext (io.netty.channel)
fireChannelRead:340, AbstractChannelHandlerContext (io.netty.channel)
channelRead:286, IdleStateHandler (io.netty.handler.timeout)
invokeChannelRead:362, AbstractChannelHandlerContext (io.netty.channel)
invokeChannelRead:348, AbstractChannelHandlerContext (io.netty.channel)
fireChannelRead:340, AbstractChannelHandlerContext (io.netty.channel)
channelRead:1434, DefaultChannelPipeline$HeadContext (io.netty.channel)
invokeChannelRead:362, AbstractChannelHandlerContext (io.netty.channel)
invokeChannelRead:348, AbstractChannelHandlerContext (io.netty.channel)
fireChannelRead:965, DefaultChannelPipeline (io.netty.channel)
read:163, AbstractNioByteChannel$NioByteUnsafe (io.netty.channel.nio)
processSelectedKey:644, NioEventLoop (io.netty.channel.nio)
processSelectedKeysPlain:544, NioEventLoop (io.netty.channel.nio)
processSelectedKeys:498, NioEventLoop (io.netty.channel.nio)
run:458, NioEventLoop (io.netty.channel.nio)
run:897, SingleThreadEventExecutor$5 (io.netty.util.concurrent)
run:834, Thread (java.lang)
+

以及

+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
handleRequest:97, BaseRestHandler (org.elasticsearch.rest)
handleRequest:69, SecurityRestFilter (org.elasticsearch.xpack.security.rest)
dispatchRequest:240, RestController (org.elasticsearch.rest)
tryAllHandlers:337, RestController (org.elasticsearch.rest)
dispatchRequest:174, RestController (org.elasticsearch.rest)
dispatchRequest:324, AbstractHttpServerTransport (org.elasticsearch.http)
handleIncomingRequest:374, AbstractHttpServerTransport (org.elasticsearch.http)
incomingRequest:303, AbstractHttpServerTransport (org.elasticsearch.http)
channelRead0:66, Netty4HttpRequestHandler (org.elasticsearch.http.netty4)
channelRead0:31, Netty4HttpRequestHandler (org.elasticsearch.http.netty4)
channelRead:105, SimpleChannelInboundHandler (io.netty.channel)
invokeChannelRead:362, AbstractChannelHandlerContext (io.netty.channel)
invokeChannelRead:348, AbstractChannelHandlerContext (io.netty.channel)
fireChannelRead:340, AbstractChannelHandlerContext (io.netty.channel)
channelRead:58, Netty4HttpPipeliningHandler (org.elasticsearch.http.netty4)
invokeChannelRead:362, AbstractChannelHandlerContext (io.netty.channel)
invokeChannelRead:348, AbstractChannelHandlerContext (io.netty.channel)
fireChannelRead:340, AbstractChannelHandlerContext (io.netty.channel)
channelRead:102, MessageToMessageDecoder (io.netty.handler.codec)
channelRead:111, MessageToMessageCodec (io.netty.handler.codec)
invokeChannelRead:362, AbstractChannelHandlerContext (io.netty.channel)
invokeChannelRead:348, AbstractChannelHandlerContext (io.netty.channel)
fireChannelRead:340, AbstractChannelHandlerContext (io.netty.channel)
channelRead:102, MessageToMessageDecoder (io.netty.handler.codec)
invokeChannelRead:362, AbstractChannelHandlerContext (io.netty.channel)
invokeChannelRead:348, AbstractChannelHandlerContext (io.netty.channel)
fireChannelRead:340, AbstractChannelHandlerContext (io.netty.channel)
channelRead:102, MessageToMessageDecoder (io.netty.handler.codec)
invokeChannelRead:362, AbstractChannelHandlerContext (io.netty.channel)
invokeChannelRead:348, AbstractChannelHandlerContext (io.netty.channel)
fireChannelRead:340, AbstractChannelHandlerContext (io.netty.channel)
fireChannelRead:323, ByteToMessageDecoder (io.netty.handler.codec)
channelRead:297, ByteToMessageDecoder (io.netty.handler.codec)
invokeChannelRead:362, AbstractChannelHandlerContext (io.netty.channel)
invokeChannelRead:348, AbstractChannelHandlerContext (io.netty.channel)
fireChannelRead:340, AbstractChannelHandlerContext (io.netty.channel)
channelRead:286, IdleStateHandler (io.netty.handler.timeout)
invokeChannelRead:362, AbstractChannelHandlerContext (io.netty.channel)
invokeChannelRead:348, AbstractChannelHandlerContext (io.netty.channel)
fireChannelRead:340, AbstractChannelHandlerContext (io.netty.channel)
channelRead:1434, DefaultChannelPipeline$HeadContext (io.netty.channel)
invokeChannelRead:362, AbstractChannelHandlerContext (io.netty.channel)
invokeChannelRead:348, AbstractChannelHandlerContext (io.netty.channel)
fireChannelRead:965, DefaultChannelPipeline (io.netty.channel)
read:163, AbstractNioByteChannel$NioByteUnsafe (io.netty.channel.nio)
processSelectedKey:644, NioEventLoop (io.netty.channel.nio)
processSelectedKeysPlain:544, NioEventLoop (io.netty.channel.nio)
processSelectedKeys:498, NioEventLoop (io.netty.channel.nio)
run:458, NioEventLoop (io.netty.channel.nio)
run:897, SingleThreadEventExecutor$5 (io.netty.util.concurrent)
run:834, Thread (java.lang)
+

倒排索引简介

+

到排索引解决什么问题?

+

当我们有一个文档a.txt,里面有一堆文字hello wrold ,i am dinosaur

+

我们需要从所有文档里面判断这个文档里面是否存在world 这个词汇,应该怎么做呢?

+

当文档的数量很少的时候,可以

+
    +
  • 1 打开文件
  • +
  • 2 从头开始去读取文件内容判断是否包含world
  • +
+

那么当我们不仅仅只有一个文档a.txt,我们还有b.txtc.txt的时候,我们怎么判断某个词word是否在这些文档里面呢?如果word在里面,又在那些文档的第几行呢?

+

如果我们还用之前的从头开始一个个文件读的话,如果文档数量少还好,如果文档很多,我们就非常慢才能读完所有的文档。

+

倒排索引解决的其中一个问题就是如何快速定位某个词是是否在这些文档中,如果在又在哪些文档里面。

+

相关例子

+

baseline invert index

倒排索引包括主要两个部分:

+
    +
  • 第一部分包含两个域:
      +
    • : 文档(document)中包含词文档个数,也就是说有多少个文档含有词,那么等于几。
    • +
    • 指向的指针
    • +
    +
  • +
  • 第二部分是一个列表,列表的每个元素包括以下两个域:
      +
    • : 文档对应的id,可以理解为文档主键
    • +
    • : 该 中包含词的数量
    • +
    +
  • +
+

uwiAvq.png

+

我自己写了的demo代码github 地址,输出如下

+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
keeper  3|[{1 1} {4 1} {5 1}]
In 1|[{2 1}]
house 2|[{2 1} {3 1}]
nignt 2|[{4 1} {5 1}]
did 1|[{4 1}]
dark 1|[{6 1}]
old 4|[{1 1} {2 1} {3 1} {4 1}]
night 3|[{1 1} {5 1} {6 1}]
had 1|[{3 1}]
sleeps 1|[{6 1}]
keep 3|[{1 1} {3 1} {5 1}]
big 2|[{2 1} {3 1}]
keeps 3|[{1 1} {5 1} {6 1}]
the 6|[{1 1} {2 1} {3 1} {4 1} {5 1} {6 1}]
never 1|[{4 1}]
and 1|[{6 1}]
And 1|[{6 1}]
in 5|[{1 1} {2 1} {3 1} {5 1} {6 1}]
The 3|[{1 1} {3 1} {5 1}]
sleep 1|[{4 1}]
Where 1|[{4 1}]
town 2|[{1 1} {3 1}]
gown 1|[{2 1}]
+ + + +

构造倒排索引的步骤

    +
  • 1 读取文档
  • +
  • 2 分词
  • +
  • 3 对分词正规化(normalized)
  • +
  • 4 建立包含词频和偏移量的倒排索引
  • +
+

分词

https://www.cnblogs.com/forfuture1978/archive/2010/06/06/1752837.html

+

Lucene 的堆栈,主要的逻辑都在invert方法里面

+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
incrementToken:48, FilteringTokenFilter (org.apache.lucene.analysis)
invert:812, DefaultIndexingChain$PerField (org.apache.lucene.index)
processField:442, DefaultIndexingChain (org.apache.lucene.index)
processDocument:406, DefaultIndexingChain (org.apache.lucene.index)
updateDocument:250, DocumentsWriterPerThread (org.apache.lucene.index)
updateDocument:495, DocumentsWriter (org.apache.lucene.index)
updateDocument:1594, IndexWriter (org.apache.lucene.index)
addDocument:1213, IndexWriter (org.apache.lucene.index)
indexDoc:198, IndexFiles (com.dinosaur)
visitFile:155, IndexFiles$1 (com.dinosaur)
visitFile:151, IndexFiles$1 (com.dinosaur)
walkFileTree:2670, Files (java.nio.file)
walkFileTree:2742, Files (java.nio.file)
indexDocs:151, IndexFiles (com.dinosaur)
main:113, IndexFiles (com.dinosaur)
+ + +

Lucene分词的核心在于incrementToken获取token

+

举个例子

+

Lucene的标准分词器

+
1
2
3
4
5
6
7
private final CharTermAttribute termAtt = addAttribute(CharTermAttribute.class);  // final的单例
@Override
public final boolean incrementToken() throws IOException {
...
scanner.getText(termAtt); // scanner 返回一个词并将那个词设置到termAtt上面
...
}
+ + + +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/2019/10/08/mysql-string-max-length/index.html b/2019/10/08/mysql-string-max-length/index.html new file mode 100644 index 0000000000..e101cc986d --- /dev/null +++ b/2019/10/08/mysql-string-max-length/index.html @@ -0,0 +1,524 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + mysql字符串最大长度 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ mysql字符串最大长度 +

+ + +
+ + + + +
+ + +
+

本文主要是记录mysql各种类型的字符串受什么限制。

+
+

前言

今天遇到一个特别的事情:把一个pdf的文档转成html然后存进mysql里面,所以我用了text 的字段来存。
结果读出来的时候发现少了一截。搜索了一番才发现text居然最大只能支持16kb的字节的内容。

+

字节和字符

如果你写过php,你可以比较清晰地知道strlen("你好")mb_strlen("你好")两者的区别。
如果是java的话,字节流的InputStreamOutputStream 或者writerreader这两个系列的区别你肯定也不陌生。

+

mysql字符串的长度与类型关系

+

String Type Storage Requirements

+
+
+

In the following table, M represents the declared column length in characters for nonbinary string types and bytes for binary string types. L represents the actual length in bytes of a given string value.

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Data TypeStorage Required
CHAR(M)The compact family of InnoDB row formats optimize storage for variable-length character
BINARY(M)M bytes, 0 <= M <= 255
VARCHAR(M), VARBINARY(M)L + 1 bytes if column values require 0 − 255 bytes, L + 2 bytes if values may require more than 255 bytes
TINYBLOB, TINYTEXTL + 1 bytes, where L < 28
BLOB, TEXTL + 2 bytes, where L < 216
MEDIUMBLOB, MEDIUMTEXTL + 3 bytes, where L < 224
LONGBLOB, LONGTEXTL + 4 bytes, where L < 232
ENUM(‘value1’,’value2’,…)1 or 2 bytes, depending on the number of enumeration values (65,535 values maximum)
SET(‘value1’,’value2’,…)1, 2, 3, 4, or 8 bytes, depending on the number of set members (64 members maximum)
+

来源

+

CHAR

CHAR 最大是255个字符

+

用如下的sql创建256个字符的char类型字符串会报错误

+
+

ERROR 1074 (42000): Column length too big for column ‘name’ (max = 255); use BLOB or TEXT instead

+
+
1
CREATE TABLE `test123` ( `name` char(256)) ENGINE=InnoDB AUTO_INCREMENT=2 DEFAULT CHARSET=utf8mb4;
+ + +
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
(gdb) bt
#0 my_error (nr=1074, MyFlags=0) at /home/dinosaur/Downloads/mysql-5.7.21/mysys/my_error.c:194
#1 0x0000000000f93e75 in Create_field::init (this=0x7fb9b8006740, thd=0x7fb9b8000b70, fld_name=0x7fb9b8006730 "name", fld_type=MYSQL_TYPE_STRING, fld_length=0x7fb9b8006738 "256", fld_decimals=0x0, fld_type_modifier=0,
fld_default_value=0x0, fld_on_update_value=0x0, fld_comment=0x7fb9b8002fe0, fld_change=0x0, fld_interval_list=0x7fb9b8003150, fld_charset=0x0, fld_geom_type=0, fld_gcol_info=0x0)
at /home/dinosaur/Downloads/mysql-5.7.21/sql/field.cc:10962
#2 0x000000000163ae21 in add_field_to_list (thd=0x7fb9b8000b70, field_name=0x7fba3d30c460, type=MYSQL_TYPE_STRING, length=0x7fb9b8006738 "256", decimals=0x0, type_modifier=0, default_value=0x0, on_update_value=0x0,
comment=0x7fb9b8002fe0, change=0x0, interval_list=0x7fb9b8003150, cs=0x0, uint_geom_type=0, gcol_info=0x0) at /home/dinosaur/Downloads/mysql-5.7.21/sql/sql_parse.cc:5798
#3 0x000000000178e3f6 in MYSQLparse (YYTHD=0x7fb9b8000b70) at /home/dinosaur/Downloads/mysql-5.7.21/sql/sql_yacc.yy:6337
#4 0x000000000163d75a in parse_sql (thd=0x7fb9b8000b70, parser_state=0x7fba3d30d550, creation_ctx=0x0) at /home/dinosaur/Downloads/mysql-5.7.21/sql/sql_parse.cc:7131
#5 0x0000000001639f07 in mysql_parse (thd=0x7fb9b8000b70, parser_state=0x7fba3d30d550) at /home/dinosaur/Downloads/mysql-5.7.21/sql/sql_parse.cc:5469
#6 0x000000000162f0a3 in dispatch_command (thd=0x7fb9b8000b70, com_data=0x7fba3d30de00, command=COM_QUERY) at /home/dinosaur/Downloads/mysql-5.7.21/sql/sql_parse.cc:1458
#7 0x000000000162df32 in do_command (thd=0x7fb9b8000b70) at /home/dinosaur/Downloads/mysql-5.7.21/sql/sql_parse.cc:999
#8 0x0000000001770f97 in handle_connection (arg=0x570d510) at /home/dinosaur/Downloads/mysql-5.7.21/sql/conn_handler/connection_handler_per_thread.cc:300
#9 0x0000000001de0b41 in pfs_spawn_thread (arg=0x5749fc0) at /home/dinosaur/Downloads/mysql-5.7.21/storage/perfschema/pfs.cc:2190
#10 0x00007fba478aa6ba in start_thread (arg=0x7fba3d30e700) at pthread_create.c:333
#11 0x00007fba46d3341d in clone () at ../sysdeps/unix/sysv/linux/x86_64/clone.S:109
+ + +

varchar最大长度

和char类似,想创建一个65532字符的varchar类型字段

+
1
2
CREATE TABLE `test123` ( `name` varchar(65533)) ENGINE=InnoDB AUTO_INCREMENT=2 DEFAULT CHARSET=utf8mb4;

+

结果也是一样的错误

+
+

ERROR 1074 (42000): Column length too big for column ‘name’ (max = 16383); use BLOB or TEXT instead

+
+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
(gdb) bt
#0 my_error (nr=1074, MyFlags=0) at /home/dinosaur/Downloads/mysql-5.7.21/mysys/my_error.c:194
#1 0x00000000016c9998 in prepare_blob_field (thd=0x7fb9b8000b70, sql_field=0x7fb9b8006840) at /home/dinosaur/Downloads/mysql-5.7.21/sql/sql_table.cc:4715
#2 0x00000000016c6a33 in mysql_prepare_create_table (thd=0x7fb9b8000b70, error_schema_name=0x7fb9b8006728 "test", error_table_name=0x7fb9b8006168 "test123", create_info=0x7fba3d30c6b0, alter_info=0x7fba3d30c600,
tmp_table=false, db_options=0x7fba3d30b080, file=0x7fb9b8006ac0, key_info_buffer=0x7fba3d30c170, key_count=0x7fba3d30c16c, select_field_count=0) at /home/dinosaur/Downloads/mysql-5.7.21/sql/sql_table.cc:3721
#3 0x00000000016cac22 in create_table_impl (thd=0x7fb9b8000b70, db=0x7fb9b8006728 "test", table_name=0x7fb9b8006168 "test123", error_table_name=0x7fb9b8006168 "test123", path=0x7fba3d30c180 "./test/test123",
create_info=0x7fba3d30c6b0, alter_info=0x7fba3d30c600, internal_tmp_table=false, select_field_count=0, no_ha_table=false, is_trans=0x7fba3d30c3da, key_info=0x7fba3d30c170, key_count=0x7fba3d30c16c)
at /home/dinosaur/Downloads/mysql-5.7.21/sql/sql_table.cc:5131
#4 0x00000000016cb884 in mysql_create_table_no_lock (thd=0x7fb9b8000b70, db=0x7fb9b8006728 "test", table_name=0x7fb9b8006168 "test123", create_info=0x7fba3d30c6b0, alter_info=0x7fba3d30c600, select_field_count=0,
is_trans=0x7fba3d30c3da) at /home/dinosaur/Downloads/mysql-5.7.21/sql/sql_table.cc:5417
#5 0x00000000016cb9a2 in mysql_create_table (thd=0x7fb9b8000b70, create_table=0x7fb9b80061a0, create_info=0x7fba3d30c6b0, alter_info=0x7fba3d30c600) at /home/dinosaur/Downloads/mysql-5.7.21/sql/sql_table.cc:5463
#6 0x00000000016335be in mysql_execute_command (thd=0x7fb9b8000b70, first_level=true) at /home/dinosaur/Downloads/mysql-5.7.21/sql/sql_parse.cc:3248
#7 0x000000000163a31c in mysql_parse (thd=0x7fb9b8000b70, parser_state=0x7fba3d30d550) at /home/dinosaur/Downloads/mysql-5.7.21/sql/sql_parse.cc:5582
#8 0x000000000162f0a3 in dispatch_command (thd=0x7fb9b8000b70, com_data=0x7fba3d30de00, command=COM_QUERY) at /home/dinosaur/Downloads/mysql-5.7.21/sql/sql_parse.cc:1458
#9 0x000000000162df32 in do_command (thd=0x7fb9b8000b70) at /home/dinosaur/Downloads/mysql-5.7.21/sql/sql_parse.cc:999
#10 0x0000000001770f97 in handle_connection (arg=0x570d510) at /home/dinosaur/Downloads/mysql-5.7.21/sql/conn_handler/connection_handler_per_thread.cc:300
#11 0x0000000001de0b41 in pfs_spawn_thread (arg=0x5749fc0) at /home/dinosaur/Downloads/mysql-5.7.21/storage/perfschema/pfs.cc:2190
#12 0x00007fba478aa6ba in start_thread (arg=0x7fba3d30e700) at pthread_create.c:333
#13 0x00007fba46d3341d in clone () at ../sysdeps/unix/sysv/linux/x86_64/clone.S:109

(gdb) p sql_field->length
$2 = 262132
+ + +
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
static bool prepare_blob_field(THD *thd, Create_field *sql_field)
{
DBUG_ENTER("prepare_blob_field");

if (sql_field->length > MAX_FIELD_VARCHARLENGTH && // sql_field->length = 262132
!(sql_field->flags & BLOB_FLAG))
{
/* Convert long VARCHAR columns to TEXT or BLOB */
char warn_buff[MYSQL_ERRMSG_SIZE];

if (sql_field->def || thd->is_strict_mode()) // 严格模式下会打印errorERROR 1074 (42000): Column length too big for
{ // column 'name' (max = 16383); use BLOB or TEXT instead
my_error(ER_TOO_BIG_FIELDLENGTH, MYF(0), sql_field->field_name,
static_cast<ulong>(MAX_FIELD_VARCHARLENGTH / // MAX_FIELD_VARCHARLENGTH = 65535
sql_field->charset->mbmaxlen)); // sql_field->charset->mbmaxlen = 4
DBUG_RETURN(1);
}
...
}
+ + +

也就是严格模式下,varchar 最大是65535字节的内容,改成varchar(16383)看看

+
1
2
3
mysql> CREATE TABLE `test123` ( `name` varchar(16383)) ENGINE=InnoDB AUTO_INCREMENT=2 DEFAULT CHARSET=utf8mb4;
Query OK, 0 rows affected (0.26 sec)

+

ok,没有问题

+ +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/2019/10/13/https-tls-ssl/index.html b/2019/10/13/https-tls-ssl/index.html new file mode 100644 index 0000000000..13a4a64b7c --- /dev/null +++ b/2019/10/13/https-tls-ssl/index.html @@ -0,0 +1,467 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + https_tls_ssl | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ https_tls_ssl +

+ + +
+ + + + +
+ + +

最近找了个华为云的vps,想做个简单的网址,于是一番注册域名和http证书。

+

结过弄了很久发现居然访问不了。

+

其实原因是没有备案,我的证书配置是正常的。

+

排查过程

通过curl 定位

直接curl -v url可以看到详细的握手过程

+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
./curl https://gitlab.shakudada.xyz -v
* STATE: INIT => CONNECT handle 0x1a23898; line 1491 (connection #-5000)
* Added connection 0. The cache now contains 1 members
* STATE: CONNECT => WAITRESOLVE handle 0x1a23898; line 1532 (connection #0)
* Trying 139.9.222.124:443...
* TCP_NODELAY set
* STATE: WAITRESOLVE => WAITCONNECT handle 0x1a23898; line 1611 (connection #0)
* Connected to gitlab.shakudada.xyz (139.9.222.124) port 443 (#0)
* STATE: WAITCONNECT => SENDPROTOCONNECT handle 0x1a23898; line 1667 (connection #0)
* Marked for [keep alive]: HTTP default
* ALPN, offering http/1.1
* Cipher selection: ALL:!EXPORT:!EXPORT40:!EXPORT56:!aNULL:!LOW:!RC4:@STRENGTH
* successfully set certificate verify locations:
* CAfile: /etc/ssl/certs/ca-certificates.crt
CApath: none
* TLSv1.2 (OUT), TLS header, Certificate Status (22):
* TLSv1.2 (OUT), TLS handshake, Client hello (1):
* STATE: SENDPROTOCONNECT => PROTOCONNECT handle 0x1a23898; line 1682 (connection #0)
* error:140770FC:SSL routines:SSL23_GET_SERVER_HELLO:unknown protocol
* Marked for [closure]: Failed HTTPS connection
* multi_done
* Closing connection 0
* The cache now contains 0 members
* Expire cleared (transfer 0x1a23898)
curl: (35) error:140770FC:SSL routines:SSL23_GET_SERVER_HELLO:unknown protocol
dinosaur@dinosaur-X550VXK:~/curl/mycurl/bin$
+ +

client hello 后就失败了,所以直接tcpdump 看看包数据

+
1
2
tcpdump -i wlp3s0  host 139.9.222.124 and  port 443 -A -X

+

其中139.9.222.124就是我那没有备案的ip

+

以下是抓包,去掉了开始的tcp的三次握手

+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
22:22:23.280367 IP 192.168.1.106.33170 > 139.9.222.124.https: Flags [P.], seq 1:518, ack 1, win 229, options [nop,nop,TS val 434651673 ecr 3365805465], length 517
0x0000: 4500 0239 15ab 4000 4006 f77b c0a8 016a E..9..@.@..{...j
0x0010: 8b09 de7c 8192 01bb f6ab 981d 4d9a ceb8 ...|........M...
0x0020: 8018 00e5 22f2 0000 0101 080a 19e8 4219 ....".........B.
0x0030: c89e 1d99 1603 0102 0001 0001 fc03 0304 ................
0x0040: 6e2a ea14 6844 e2e1 db8c 1ee3 3582 e33f n*..hD......5..?
0x0050: 9128 2ad2 cd1c bac2 1e70 dd4f 6587 d700 .(*......p.Oe...
0x0060: 009e c030 c02c c028 c024 c014 c00a 00a5 ...0.,.(.$......
0x0070: 00a3 00a1 009f 006b 006a 0069 0068 0039 .......k.j.i.h.9
0x0080: 0038 0037 0036 0088 0087 0086 0085 c032 .8.7.6.........2
0x0090: c02e c02a c026 c00f c005 009d 003d 0035 ...*.&.......=.5
0x00a0: 0084 c02f c02b c027 c023 c013 c009 00a4 .../.+.'.#......
0x00b0: 00a2 00a0 009e 0067 0040 003f 003e 0033 .......g.@.?.>.3
0x00c0: 0032 0031 0030 009a 0099 0098 0097 0045 .2.1.0.........E
0x00d0: 0044 0043 0042 c031 c02d c029 c025 c00e .D.C.B.1.-.).%..
0x00e0: c004 009c 003c 002f 0096 0041 c012 c008 .....<./...A....
0x00f0: 0016 0013 0010 000d c00d c003 000a 00ff ................
0x0100: 0100 0135 0000 0019 0017 0000 1467 6974 ...5.........git
0x0110: 6c61 622e 7368 616b 7564 6164 612e 7879 lab.shakudada.xy
0x0120: 7a00 0b00 0403 0001 0200 0a00 1c00 1a00 z...............
0x0130: 1700 1900 1c00 1b00 1800 1a00 1600 0e00 ................
0x0140: 0d00 0b00 0c00 0900 0a00 0d00 2000 1e06 ................
0x0150: 0106 0206 0305 0105 0205 0304 0104 0204 ................
0x0160: 0303 0103 0203 0302 0102 0202 0300 0f00 ................
0x0170: 0101 3374 0000 0010 000b 0009 0868 7474 ..3t.........htt
0x0180: 702f 312e 3100 1500 b000 0000 0000 0000 p/1.1...........
0x0190: 0000 0000 0000 0000 0000 0000 0000 0000 ................
0x01a0: 0000 0000 0000 0000 0000 0000 0000 0000 ................
0x01b0: 0000 0000 0000 0000 0000 0000 0000 0000 ................
0x01c0: 0000 0000 0000 0000 0000 0000 0000 0000 ................
0x01d0: 0000 0000 0000 0000 0000 0000 0000 0000 ................
0x01e0: 0000 0000 0000 0000 0000 0000 0000 0000 ................
0x01f0: 0000 0000 0000 0000 0000 0000 0000 0000 ................
0x0200: 0000 0000 0000 0000 0000 0000 0000 0000 ................
0x0210: 0000 0000 0000 0000 0000 0000 0000 0000 ................
0x0220: 0000 0000 0000 0000 0000 0000 0000 0000 ................
0x0230: 0000 0000 0000 0000 00 .........
22:22:23.292507 IP 139.9.222.124.https > 192.168.1.106.33170: Flags [FP.], seq 1:650, ack 518, win 8192, length 649
0x0000: 4500 02b1 15ac 4000 f606 4102 8b09 de7c E.....@...A....|
0x0010: c0a8 016a 01bb 8192 4d9a ceb8 f6ab 9a22 ...j....M......"
0x0020: 5019 2000 d25b 0000 4854 5450 2f31 2e31 P....[..HTTP/1.1
0x0030: 2034 3033 2046 6f72 6269 6464 656e 0a43 .403.Forbidden.C
0x0040: 6f6e 7465 6e74 2d54 7970 653a 2074 6578 ontent-Type:.tex
0x0050: 742f 6874 6d6c 3b20 6368 6172 7365 743d t/html;.charset=
0x0060: 7574 662d 380a 5365 7276 6572 3a20 4144 utf-8.Server:.AD
0x0070: 4d2f 322e 312e 310a 436f 6e6e 6563 7469 M/2.1.1.Connecti
0x0080: 6f6e 3a20 636c 6f73 650a 436f 6e74 656e on:.close.Conten
0x0090: 742d 4c65 6e67 7468 3a20 3533 300a 0a3c t-Length:.530..<
0x00a0: 6874 6d6c 3e0a 3c68 6561 643e 0a3c 6d65 html>.<head>.<me
0x00b0: 7461 2068 7474 702d 6571 7569 763d 2243 ta.http-equiv="C
0x00c0: 6f6e 7465 6e74 2d54 7970 6522 2063 6f6e ontent-Type".con
0x00d0: 7465 6e74 3d22 7465 7874 6d6c 3b63 6861 tent="textml;cha
0x00e0: 7273 6574 3d47 4232 3331 3222 202f 3e0a rset=GB2312"./>.
0x00f0: 2020 203c 7374 796c 653e 626f 6479 7b62 ...<style>body{b
0x0100: 6163 6b67 726f 756e 642d 636f 6c6f 723a ackground-color:
0x0110: 2346 4646 4646 467d 3c2f 7374 796c 653e #FFFFFF}</style>
0x0120: 200a 3c74 6974 6c65 3ee9 9d9e e6b3 95e9 ..<title>.......
0x0130: 98bb e696 ad32 3334 3c2f 7469 746c 653e .....234</title>
0x0140: 0a20 203c 7363 7269 7074 206c 616e 6775 ...<script.langu
0x0150: 6167 653d 226a 6176 6173 6372 6970 7422 age="javascript"
0x0160: 2074 7970 653d 2274 6578 742f 6a61 7661 .type="text/java
0x0170: 7363 7269 7074 223e 0a20 2020 2020 2020 script">........
0x0180: 2020 7769 6e64 6f77 2e6f 6e6c 6f61 6420 ..window.onload.
0x0190: 3d20 6675 6e63 7469 6f6e 2028 2920 7b20 =.function.().{.
0x01a0: 0a20 2020 2020 2020 2020 2020 646f 6375 ............docu
0x01b0: 6d65 6e74 2e67 6574 456c 656d 656e 7442 ment.getElementB
0x01c0: 7949 6428 226d 6169 6e46 7261 6d65 2229 yId("mainFrame")
0x01d0: 2e73 7263 3d20 2268 7474 703a 2f2f 3131 .src=."http://11
0x01e0: 342e 3131 352e 3139 322e 3234 363a 3930 4.115.192.246:90
0x01f0: 3830 2f65 7272 6f72 2e68 746d 6c22 3b0a 80/error.html";.
0x0200: 2020 2020 2020 2020 2020 2020 7d0a 3c2f ............}.</
0x0210: 7363 7269 7074 3e20 2020 0a3c 2f68 6561 script>....</hea
0x0220: 643e 0a20 203c 626f 6479 3e0a 2020 2020 d>...<body>.....
0x0230: 3c69 6672 616d 6520 7374 796c 653d 2277 <iframe.style="w
0x0240: 6964 7468 3a31 3030 253b 2068 6569 6768 idth:100%;.heigh
0x0250: 743a 3130 3025 3b22 2069 643d 226d 6169 t:100%;".id="mai
0x0260: 6e46 7261 6d65 2220 7372 633d 2222 2066 nFrame".src="".f
0x0270: 7261 6d65 626f 7264 6572 3d22 3022 2073 rameborder="0".s
0x0280: 6372 6f6c 6c69 6e67 3d22 6e6f 223e 3c2f crolling="no"></
0x0290: 6966 7261 6d65 3e0a 2020 2020 3c2f 626f iframe>.....</bo
0x02a0: 6479 3e0a 2020 2020 2020 3c2f 6874 6d6c dy>.......</html
0x02b0: 3e >
22:22:23.292552 IP 192.168.1.106.33170 > 139.9.222.124.https: Flags [.], ack 651, win 239, options [nop,nop,TS val 434651685 ecr 3365805465], length 0
0x0000: 4500 0034 15ac 4000 4006 f97f c0a8 016a E..4..@.@......j
0x0010: 8b09 de7c 8192 01bb f6ab 9a22 4d9a d142 ...|......."M..B
0x0020: 8010 00ef d4f7 0000 0101 080a 19e8 4225 ..............B%
0x0030: c89e 1d99 ....
22:22:23.292562 IP 139.9.222.124.https > 192.168.1.106.33170: Flags [.], ack 518, win 235, options [nop,nop,TS val 3365805485 ecr 434651673], length 0
0x0000: 4500 0034 1ff1 4000 3106 fe3a 8b09 de7c E..4..@.1..:...|
0x0010: c0a8 016a 01bb 8192 4d9a ceb8 f6ab 9a22 ...j....M......"
0x0020: 8010 00eb d77d 0000 0101 080a c89e 1dad .....}..........
0x0030: 19e8 4219
+ + +

4500 这两个字节开头明显就是ip报头,4代表ipv4,5则是ip报头的长度,也就是ip报头长度是5*4=20;

+

ip报头

也就是

+
1
2
0x0000:  4500 0239 15ab 4000 4006 f77b c0a8 016a  E..9..@.@..{...j
0x0010: 8b09 de7c
+

一直到de7c都是ip报头

+

tcp 报头

1
2
0x0000:  4500 0239 15ab 4000 4006 f77b c0a8 016a  E..9..@.@..{...j
0x0010: 8b09 de7c 8192 01bb <- 01bb就是443也就是目的端口
+

1*16*16+11*16+16=443

+

版本:IP协议的版本,目前的IP协议版本号为4,下一代IP协议版本号为6。

+

首部长度:IP报头的长度。固定部分的长度(20字节)和可变部分的长度之和。共占4位。最大为1111,即10进制的15,代表IP报头的最大长度可以为15个32bits(4字节),也就是最长可为15*4=60字节,除去固定部分的长度20字节,可变部分的长度最大为40字节。

+

翻了一下rfc8446,TLSV12的client hello的版本magic number0x0303,搜索了一下果然有

+
1
2
3
4
5
6
7
8
struct {
ProtocolVersion legacy_version = 0x0303; /* TLS v1.2 */
Random random;
opaque legacy_session_id<0..32>;
CipherSuite cipher_suites<2..2^16-2>;
opaque legacy_compression_methods<1..2^8-1>;
Extension extensions<8..2^16-1>;
} ClientHello;
+ +

但是返回的明文很明显不是一个错误的链接

+

所以被sni阻断了

+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
In the OCSPStatusRequest, the "ResponderIDs" provides a list of OCSP
responders that the client trusts. A zero-length "responder_id_list"
sequence has the special meaning that the responders are implicitly
known to the server - e.g., by prior arrangement. "Extensions" is a
DER encoding of OCSP request extensions.

Both "ResponderID" and "Extensions" are DER-encoded ASN.1 types as
defined in [OCSP]. "Extensions" is imported from [PKIX]. A zero-
length "request_extensions" value means that there are no extensions
(as opposed to a zero-length ASN.1 SEQUENCE, which is not valid for
the "Extensions" type).

In the case of the "id-pkix-ocsp-nonce" OCSP extension, [OCSP] is
unclear about its encoding; for clarification, the nonce MUST be a
DER-encoded OCTET STRING, which is encapsulated as another OCTET
STRING (note that implementations based on an existing OCSP client
will need to be checked for conformance to this requirement).

Servers that receive a client hello containing the "status_request"
extension, MAY return a suitable certificate status response to the
client along with their certificate. If OCSP is requested, they
SHOULD use the information contained in the extension when selecting
an OCSP responder, and SHOULD include request_extensions in the OCSP
request.
+ +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git "a/2019/10/18/lex\345\222\214yacc\344\276\213\345\255\220/index.html" "b/2019/10/18/lex\345\222\214yacc\344\276\213\345\255\220/index.html" new file mode 100644 index 0000000000..dadfe254fd --- /dev/null +++ "b/2019/10/18/lex\345\222\214yacc\344\276\213\345\255\220/index.html" @@ -0,0 +1,494 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + lex和yacc | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ lex和yacc +

+ + +
+ + + + +
+ + +

词法分析

lex主要是用来做词法分析用的,简单来说就是分词.
每次调用yylex都会返回一个词,lucence的标准分词器也是用lex一类的包分好词的.
Lucene的分词分好之后会构造倒排索引.

+

lex例子

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18

%{
%}
%%

end { ECHO ;return 2 ;}

aaa {ECHO ;}

.|\N {}

%%
int main(){
yylex();
}
int yywrap(){
return 1;
}
+

然后执行

+
1
lex test.lex
+ +

语法分析

语法分析是什么?

+

语法分析是一个特别的规则系统,或者说.语法分析是一个图灵机,可以表达正则表达式无法表达的内容

+

语法分析如何选择?
语法分析的一个关键问题是如何在多个产生式中选择一个产生式,有且仅有一个产生式.

+

bison是yacc的gun版本
和flex一样,bison也是分成3个部分,使用%%分割
Linux下面开源的yacc版本为bison

+
1
2
3
4
5
6
7
8
9
...定义段...

%%

...规则段...

%%

...用户子例程段...
+

第一个部分主要是c的相关声明和token声明,非终结符的声明等
第二部分主要是产生式和语义动作
第三部分则是执行的相关c函数

+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
/* Infix notation calculator--calc */

%{
#define YYSTYPE double
#include <math.h>
#include <stdio.h>
%}

/* BISON Declarations */
%token NUM
%left '-' '+'
%left '*' '/'
%left NEG /* negation--unary minus */
%right '^' /* exponentiation */

/* Grammar follows */
%%
input: /* empty string */
| input line
;

line: '\n'
| exp '\n' { printf ("\t%.10g\n", $1); }
;

exp: NUM { $$ = $1; }
| exp '+' exp { $$ = $1 + $3; }
| exp '-' exp { $$ = $1 - $3; }
| exp '*' exp { $$ = $1 * $3; }
| exp '/' exp { $$ = $1 / $3; }
| '-' exp %prec NEG { $$ = -$2; }
| exp '^' exp { $$ = pow ($1, $3); }
| '(' exp ')' { $$ = $2; }
;
%%
#include <ctype.h>
main ()
{
yyparse ();
}
yyerror (s) /* Called by yyparse on error */
char *s;
{
printf ("%s\n", s);
}

yylex ()
{
int c;

/* skip white space */
while ((c = getchar ()) == ' ' || c == '\t')
;
/* process numbers */
if (c == '.' || isdigit (c))
{
ungetc (c, stdin);
scanf ("%lf", &yylval);
return NUM;
}
/* return end-of-file */
if (c == EOF)
return 0;
/* return single chars */
return c;
}
+ +

生成并编译

+
1
2
3
4
5
bison bison parse.y
gcc parse.tab.c -lm
# ./a.out
3+2
5
+ + + + + +

下面描述常用的变量的使用

%token

%token 放在定义段

+
1
%token NUMBER
+

会在生成c文件的时候变成

+
1
#define NUMBER 258 
+

所以可以理解%token是一种简写,可以减少#define的使用

+
YYSTYPE
+

In real parsers, the values of different symbols use different data types, e.g.,
int and double for numeric symbols, char * for strings, and pointers to
structures for higher level symbols. If you have multiple value types, you
have to list all the value types used in a parser so that yacc can create a C
union typedef called YYSTYPE to contain them. (Fortunately, yacc gives
you a lot of help ensuring that you use the right value type for each
symbol .)

+
+

引用自lex & yacc

+

YYSTYPE 是一个类型的宏定义,目的是给终结符合非终结符确定类型的集合

+

%union 是YYSTYPE定义的简写

+

%token 是定义词素枚举值的简写

+

%type 是非终结符的类型定义的简写

+
1
2
3
4
5
6
%union {
double dval;
int vblno;
}

%token NUMBER
+

使用--defines参数生成头文件

+
1
# bison --defines test.y
+

最后会生成如下的文件

+
1
2
3
4
5
6
7
8
enum yytokentype{
NUMBER = 258
};

union YYSTYPE{
double dval;
int vblno;
};
+ +

如果给token 添加类型的话

+
1
2
3
%token <vblno> NAME
%token <dval> NUMBER
%type <dval> expression
+
+

In action code, yacc automatically qualifies symbol value references
with the appropriate field’name, e.g., if the third symbol is a NUMBER,
a reference to $3 acts like $3,dval.

+
+

引用自lex & yacc

+

在语义动作的代码里面,如果第三个元素是NUMBER 的话, $3等价于$3.dval

+

相关阅读

+ + +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/2019/10/19/php-try-catch/index.html b/2019/10/19/php-try-catch/index.html new file mode 100644 index 0000000000..379d5f707e --- /dev/null +++ b/2019/10/19/php-try-catch/index.html @@ -0,0 +1,478 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + php7 异常、错误以及相关坑 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ php7 异常、错误以及相关坑 +

+ + +
+ + + + +
+ + +

php7 异常、错误以及相关坑

php 的坑非常之多,有高低版本的,有历史包袱类的。也有与其他语言不一致导致的知识迁移导致的坑。

+

前置知识

throwable

+
+

PHP 7 changes how most errors are reported by PHP. Instead of reporting errors through the traditional error reporting mechanism used by PHP 5, most errors are now reported by throwing Error exceptions.

+
+
+

(人肉机翻)php 7 改变了php大多数的errors的警告提示方式。和php 5 传统的error reporting 机制不同,php 的大多数错误通过抛出错误异常来警告提示。

+
+

填坑开始

例子1

    +
  • php 版本7,除以0的错误会变成异常
    1
    2
    3
    4
    5
    6
    7
    8
    <?php
    // test.php
    try {
    echo 1%0;
    } catch (DivisionByZeroError $e) {
    echo "bbb";
    }
    ?>
    +然后执行
    1
    2
    php test.php 
    bbb
    +输出bbb ,也就是被try catch 住了。
  • +
+

那么我们先看php 是怎么catch 住这个错误的

+

堆栈如下:

+
1
2
3
4
5
6
7
8
9
10
11
Breakpoint 1, zend_throw_exception_ex (exception_ce=0x14cfe70, code=0, format=0x1087ea4 "Modulo by zero") at /home/dinosaur/Downloads/php-7.2.2/Zend/zend_exceptions.c:913
913 {
(gdb) bt
#0 zend_throw_exception_ex (exception_ce=0x14cfe70, code=0, format=0x1087ea4 "Modulo by zero") at /home/dinosaur/Downloads/php-7.2.2/Zend/zend_exceptions.c:913
#1 0x00000000009b9feb in ZEND_MOD_SPEC_CONST_CONST_HANDLER () at /home/dinosaur/Downloads/php-7.2.2/Zend/zend_vm_execute.h:4270
#2 0x0000000000a381e4 in execute_ex (ex=0x7fffef61e030) at /home/dinosaur/Downloads/php-7.2.2/Zend/zend_vm_execute.h:59989
#3 0x0000000000a3d0ab in zend_execute (op_array=0x7fffef684300, return_value=0x0) at /home/dinosaur/Downloads/php-7.2.2/Zend/zend_vm_execute.h:63760
#4 0x000000000094cd22 in zend_execute_scripts (type=8, retval=0x0, file_count=3) at /home/dinosaur/Downloads/php-7.2.2/Zend/zend.c:1496
#5 0x00000000008b0b4a in php_execute_script (primary_file=0x7fffffffca10) at /home/dinosaur/Downloads/php-7.2.2/main/main.c:2590
#6 0x0000000000a3fd23 in do_cli (argc=2, argv=0x1441f40) at /home/dinosaur/Downloads/php-7.2.2/sapi/cli/php_cli.c:1011
#7 0x0000000000a40ee0 in main (argc=2, argv=0x1441f40) at /home/dinosaur/Downloads/php-7.2.2/sapi/cli/php_cli.c:1404
+

相关阅读

+

例子二

php 版本7

+
1
2
3
4
5
6
7
<?php
try {
echo 1/0; // 取余改成了除法
} catch (DivisionByZeroError $e) {
echo "bbb";
}
?>
+ +

输出

+
1
2
Warning: Division by zero in /home/dinosaur/test/test.php on line 3
INF
+

发现了不一样了吗?

+

① 抛了warning 没有被try catch 住

+

② php 脚本继续执行,(并输出INF)

+

我们看看堆栈:

+
1
2
3
4
5
6
7
8
9
10
11
12
(gdb) bt
#0 zend_error (type=2, format=0x107dcfc "Division by zero") at /home/dinosaur/Downloads/php-7.2.2/Zend/zend.c:1105
#1 0x000000000093fb5b in div_function (result=0x7fffef61e090, op1=0x7fffe70e61c0, op2=0x7fffe70e61d0) at /home/dinosaur/Downloads/php-7.2.2/Zend/zend_operators.c:1173
#2 0x00000000009a82a0 in fast_div_function (result=0x7fffef61e090, op1=0x7fffe70e61c0, op2=0x7fffe70e61d0) at /home/dinosaur/Downloads/php-7.2.2/Zend/zend_operators.h:738
#3 0x00000000009b9f22 in ZEND_DIV_SPEC_CONST_CONST_HANDLER () at /home/dinosaur/Downloads/php-7.2.2/Zend/zend_vm_execute.h:4251
#4 0x0000000000a381d4 in execute_ex (ex=0x7fffef61e030) at /home/dinosaur/Downloads/php-7.2.2/Zend/zend_vm_execute.h:59986
#5 0x0000000000a3d0ab in zend_execute (op_array=0x7fffef684300, return_value=0x0) at /home/dinosaur/Downloads/php-7.2.2/Zend/zend_vm_execute.h:63760
#6 0x000000000094cd22 in zend_execute_scripts (type=8, retval=0x0, file_count=3) at /home/dinosaur/Downloads/php-7.2.2/Zend/zend.c:1496
#7 0x00000000008b0b4a in php_execute_script (primary_file=0x7fffffffca10) at /home/dinosaur/Downloads/php-7.2.2/main/main.c:2590
#8 0x0000000000a3fd23 in do_cli (argc=2, argv=0x1441f40) at /home/dinosaur/Downloads/php-7.2.2/sapi/cli/php_cli.c:1011
#9 0x0000000000a40ee0 in main (argc=2, argv=0x1441f40) at /home/dinosaur/Downloads/php-7.2.2/sapi/cli/php_cli.c:1404

+ +

zend_error 翻到最底下就是write 系统调用了

+
1
2
3
4
5
if (Z_LVAL_P(op2) == 0) {
zend_error(E_WARNING, "Division by zero");
ZVAL_DOUBLE(result, ((double) Z_LVAL_P(op1) / (double) Z_LVAL_P(op2)));
return SUCCESS;
}
+ +

zend_error后就return 了,所以后面的程序可以继续执行

+

对比总结

1/0 不会被抛出异常,会有warning 并继续执行 

+

坑点在于:

+
    +
  • 不是所有的error都能被catch
  • +
  • 没有被catch 住的话会继续执行
  • +
+ +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git "a/2019/10/22/composer\347\232\204psr4/index.html" "b/2019/10/22/composer\347\232\204psr4/index.html" new file mode 100644 index 0000000000..4671f7eac9 --- /dev/null +++ "b/2019/10/22/composer\347\232\204psr4/index.html" @@ -0,0 +1,532 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + composer的psr4 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ composer的psr4 +

+ + +
+ + + + +
+ + +

composer的psr4

我这次主要是要描述composer的psr4自动加载相关内容.php有很多历史的包袱,所以需要做很多妥协,而namespace 以及自动加载也是.

+

include 和 require的大坑

例子

include 和require的区别什么的可能还是某些面试官的问题之一,但是include和require都有一个致命的大坑,include和require一个相对路径是相对于工作目录的.

+

举个例子.

+

当前在index.php 的目录中

+
1
2
3
4
5
6
7
8
9
# tree

test
   ├── index.php
   ├── relative.php
   └── subdir
   ├── a.php
   └── relative.php

+ +

index.php 的代码很简单,就是包含一个路径

+
1
2
<?php
include "./subdir/a.php";
+

两个relative.php 文件分别输出自己的路径
subdir/relative.php文件:

+
1
2
<?php
echo "test/subdir/relative.php"
+ +

relative.php文件:

+
1
2
<?php
echo "test/relative.php";
+ +

那么如果与index.php 同目录下会include哪一个呢?

+

答案是:

+
1
2
# php index.php 
test/relative.php
+

include了与index.php 同一个目录下的relative.php 文件

+

而如果你在index.php的上一层目录执行,也就是test目录它甚至会报错

+
1
2
3
php test/index.php 
PHP Warning: include(./subdir/a.php): failed to open stream: No such file or directory in /root/test/index.php on line 2
PHP Warning: include(): Failed opening './subdir/a.php' for inclusion (include_path='.:/usr/share/php') in /root/test/index.php on line 2
+ +

这一切都是因为当是相对路径的时候,调用了getcwd()来获取工作目录,如果你使用shell的pwd话也可以看自己的工作目录.

+

由于这个比较坑的特性,php的代码如果手工使用include并且还使用了相对路径,那之后就非常难以维护了.所以我们需要尽量减少使用include相对路径,因为你知道的原因,你一旦写了一个相对路径,总会有后人copy and paste你的代码,然后把这个include也复制进去了,而这就是下一个屎坑的开始.

+

所以,自动加载可以减缓这种大坑的产生,因为他可以减少手工include相对路径的风险,因为他们往往会这样include文件include __DIR__ . 'aaa/bbb/ccc.php',由于不是相对路径,所以会好很多.

+

CLI模式与CGI/FASTCGI工作目录的不同

CLI SAPI 不会将当前目录改为已运行的脚本所在的目录。

+

以下范例显示了本模块与 CGI SAPI 模块之间的不同:

+
1
2
3
4
<?php
// 名为 test.php 的简单测试程序
echo getcwd(), "\n";
?>
+

在使用 CGI 版本时,其输出为

+
1
2
3
4
5
$ pwd
/tmp

$ php-cgi -f another_directory/test.php
/tmp/another_directory
+

明显可以看到 PHP 将当前目录改成了刚刚运行过的脚本所在的目录。

+

使用 CLI SAPI 模式,得到:

+
1
2
3
4
5
$ pwd
/tmp

$ php -q another_directory/test.php
/tmp
+

include 和require 的opcode和getcwd

require 和include 词法分析和语法分析后,生成opcode是73,ZEND_INCLUDE_OR_EVAL,在include或者require之后,如果是相对路径

+

最后会调用

+
1
VCWD_GETCWD(cwd, MAXPATHLEN)
+

这个最后就是调用glibc 下面的getcwd

+

getcwd 系统调用

每个进程task_struct会有fs_struct 结构,这个结构体会含有pwdroot,如果使用getcwd()这个函数,通过glibc会通过系统调用读取fs_struct的pwd属性并返回

+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
static void get_fs_root_and_pwd_rcu(struct fs_struct *fs, struct path *root,
struct path *pwd)
{
...
*root = fs->root;
*pwd = fs->pwd;
...
}
SYSCALL_DEFINE2(getcwd, char __user *, buf, unsigned long, size)
{
int error;
struct path pwd, root;
char *page = __getname();

if (!page)
return -ENOMEM;

rcu_read_lock();
get_fs_root_and_pwd_rcu(current->fs, &root, &pwd); // 每个进程会关联一个fs_struct结构,fs_struct 结构有两个属性root和pwd描述了root目录和pwd目录

char *cwd = page + PATH_MAX;
int buflen = PATH_MAX;

prepend(&cwd, &buflen, "\0", 1);
error = prepend_path(&pwd, &root, &cwd, &buflen);
...
copy_to_user(buf, cwd, len) // 将处理后的pwd 返回到用户态
...

}
+ +

include和require总结

include以及require如果引入相对路径的文件,那么这个相对路径都是相对于getcwd(),也就是当前工作目录.

+

而cgi和cli模式又有不同

+
    +
  • cli模式下的当前路径就是shell pwd的值
  • +
  • 而cgi 这个SAPI和cli这个CLI SAPI不一样的地方在于他会帮你切换一次工作目录到第一次运行的php文件的当前目录作为工作目录.
  • +
+

命名空间

命名空间是什么?

+

其实就是一堆限定符.

+

为什么要有命名空间?
因为我们要复用别人的代码,你想引用别人的一个库,别人库里写了个hello函数,你也写了个hello函数.这就麻烦了,所以引入命名空间,只要保证大家的命名空间不一样,那样就算大家都有相同的函数名,也不会冲突了.

+

自动加载

开始说到自动加载了,自动加载.什么是自动加载呢?

+

其实就是动态include,或者叫做运行时include.

+

平时我们怎么include文件的呢?

+

就是手工include一堆文件,就像我刚才上面的例子一样.这样至少有两个风险:

+
    +
  • 新手使用了相对路径include
  • +
  • 得手工引入,但是include会重复引入文件,得使用include_once 或者require_once
  • +
+

就风险而言,新手使用相对路径引入的危险是非常大的.重复引入只是会校验多一点有一点性能影响而言.

+

spl_autoload_register

spl_autoload_*这一类的函数都是php自动加载的核心函数,实现自动加载则是依赖spl_autoload_register

+
1
2
3
4
5
6
7
8
9
10
11
12
13

/* {{{ proto bool spl_autoload_register([mixed autoload_function [, bool throw [, bool prepend]]])
Register given function as __autoload() implementation */
PHP_FUNCTION(spl_autoload_register)
{

...

if (zend_hash_add_mem(SPL_G(autoload_functions), lc_name, &alfi, sizeof(autoload_func_info)) == NULL) {
...
}
...
} /* }}} */
+

然后相关的调用会在zend_hash_exists(EG(class_table), lc_name) 判断是否在全局的EG(class_table) 里面
下面的spl_autoload_call是一个例子

+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
PHP_FUNCTION(spl_autoload_call)
{

if (SPL_G(autoload_functions)) { // spl_autoload_register 放进去的 SPL_G(autoload_functions)
int l_autoload_running = SPL_G(autoload_running);
SPL_G(autoload_running) = 1;
lc_name = zend_string_alloc(Z_STRLEN_P(class_name), 0);
zend_str_tolower_copy(ZSTR_VAL(lc_name), Z_STRVAL_P(class_name), Z_STRLEN_P(class_name));
zend_hash_internal_pointer_reset_ex(SPL_G(autoload_functions), &pos);
while (zend_hash_get_current_key_ex(SPL_G(autoload_functions), &func_name, &num_idx, &pos) == HASH_KEY_IS_STRING) { // 循环回调函数
alfi = zend_hash_get_current_data_ptr_ex(SPL_G(autoload_functions), &pos);
zend_call_method(Z_ISUNDEF(alfi->obj)? NULL : &alfi->obj, alfi->ce, &alfi->func_ptr, ZSTR_VAL(func_name), ZSTR_LEN(func_name), retval, 1, class_name, NULL); // 调用注册的回调函数

if (zend_hash_exists(EG(class_table), lc_name)) { // 回调找到了类名,则跳出循环

break;
}
zend_hash_move_forward_ex(SPL_G(autoload_functions), &pos);
}
...
}
..
} /* }}} */
+

自动加载流程其实很简单
自动加载的例子

+
1
2
3
4
5
6
7
<?php
// test.php
spl_autoload_register(function ($class) {
include "$class" . '.php';
});
$obj = new ClassA();

+

以及类ClassA.php

+
1
2
<?php
class ClassA{}
+ +

下面是堆栈

+
1
2
3
4
5
6
7
8
9
10
11
12
13
(gdb) bt
#0 zif_spl_autoload_call (execute_data=0x7fffef61e0a0, return_value=0x7fffffffa2f0) at /home/dinosaur/Downloads/php-7.2.2/ext/spl/php_spl.c:393
#1 0x0000000000932807 in zend_call_function (fci=0x7fffffffa330, fci_cache=0x7fffffffa300) at /home/dinosaur/Downloads/php-7.2.2/Zend/zend_execute_API.c:833
#2 0x0000000000933000 in zend_lookup_class_ex (name=0x7fffe6920b58, key=0x7fffe70e63f0, use_autoload=1) at /home/dinosaur/Downloads/php-7.2.2/Zend/zend_execute_API.c:990
#3 0x0000000000933dbd in zend_fetch_class_by_name (class_name=0x7fffe6920b58, key=0x7fffe70e63f0, fetch_type=512) at /home/dinosaur/Downloads/php-7.2.2/Zend/zend_execute_API.c:1425
#4 0x00000000009b7e46 in ZEND_NEW_SPEC_CONST_HANDLER () at /home/dinosaur/Downloads/php-7.2.2/Zend/zend_vm_execute.h:3211
#5 0x0000000000a380a4 in execute_ex (ex=0x7fffef61e030) at /home/dinosaur/Downloads/php-7.2.2/Zend/zend_vm_execute.h:59929
#6 0x0000000000a3d0ab in zend_execute (op_array=0x7fffef683300, return_value=0x0) at /home/dinosaur/Downloads/php-7.2.2/Zend/zend_vm_execute.h:63760
#7 0x000000000094cd22 in zend_execute_scripts (type=8, retval=0x0, file_count=3) at /home/dinosaur/Downloads/php-7.2.2/Zend/zend.c:1496
#8 0x00000000008b0b4a in php_execute_script (primary_file=0x7fffffffcaa0) at /home/dinosaur/Downloads/php-7.2.2/main/main.c:2590
#9 0x0000000000a3fd23 in do_cli (argc=2, argv=0x1441a60) at /home/dinosaur/Downloads/php-7.2.2/sapi/cli/php_cli.c:1011
#10 0x0000000000a40ee0 in main (argc=2, argv=0x1441a60) at /home/dinosaur/Downloads/php-7.2.2/sapi/cli/php_cli.c:1404

+ +

所以整个自动加载的核心流程就是在查找类的时候会去调用spl_autoload_call,这个函数则会回调注册的自动加载函数,直到遍历所有的回调函数都没有找到或者在某个遍历的时候找到了直接返回。

+

psr规范与psr4

psrPHP Standards Recommendations的简称,而psr4和psr0有都是和自动加载相关的内容.

+

其实就是规定了一个简单的替换

+
1
\Aura\Web\Response\Status	Aura\Web	/path/to/aura-web/src/	/path/to/aura-web/src/Response/Status.php
+

psr4规定了我们如何去加载一个文件: 将完全限定名用前缀地址替换,后面则是后面的文件.
举个例子:
你要加载的类是:

+
1
\Aura\Web\Response\Status
+

那么你可以使用Aura\Web 映射/path/to/aura-web/src/,那么类\Aura\Web\Response\Status就会去/path/to/aura-web/src/Response/Status.php文件找

+

可以说有点像nginx的路由配置:
下面是nginx的配置

+
1
2
3
location ^~ /images/ {
    # 匹配任何已 /images/ 开头的任何查询并且停止搜索。任何正则表达式将不会被测试。
}
+

那么上面的\Aura\Web\Response\Status的psr4 有点像这样:

+
1
2
3
location ^~ /Aura/Web/ {
    root /path/to/aura-web/src/;
}
+ + +

相关阅读

+ +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git "a/2019/10/31/ast\346\236\204\351\200\240/index.html" "b/2019/10/31/ast\346\236\204\351\200\240/index.html" new file mode 100644 index 0000000000..648f267493 --- /dev/null +++ "b/2019/10/31/ast\346\236\204\351\200\240/index.html" @@ -0,0 +1,444 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + ast构造 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ ast构造 +

+ + +
+ + + + +
+ + +

抽象语法树(Abstract Syntax Tree)

上一篇简单介绍了lex和yacc

+

这次主要是介绍构造一个抽象语法树

+

比如1+2+3构造成

+
1
2

1
+ +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git "a/2019/11/14/java\347\232\204package\344\270\216\346\226\207\344\273\266\350\267\257\345\276\204/index.html" "b/2019/11/14/java\347\232\204package\344\270\216\346\226\207\344\273\266\350\267\257\345\276\204/index.html" new file mode 100644 index 0000000000..a58ac1c7db --- /dev/null +++ "b/2019/11/14/java\347\232\204package\344\270\216\346\226\207\344\273\266\350\267\257\345\276\204/index.html" @@ -0,0 +1,492 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + java的package与文件路径与编译 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ java的package与文件路径与编译 +

+ + +
+ + + + +
+ + +

写了很久的php的原生代码,对php相对路径自动加载这类坑的恐惧已经深入骨髓了.
java和golang也有类似的内容,对于写java没多久的我来说,先记录一下

+

关于类名和classpath的关系,oracle的文档有相关的描述

+
+

Class Path and Package Names

+
+
+

Java classes are organized into packages that are mapped to directories in the file system. But, unlike the file system, whenever you specify a package name, you specify the whole package name and never part of it. For example, the package name for java.awt.Button is always specified as java.awt.

+
+
+

For example, suppose you want the JRE to find a class named Cool.class in the package utility.myapp. If the path to that directory is /java/MyClasses/utility/myapp, then you would set the class path so that it contains /java/MyClasses. To run that application, you could use the following java command:

+
+
+

java -classpath /java/MyClasses utility.myapp.Cool
When the application runs, the JVM uses the class path settings to find any other classes defined in the utility.myapp package that are used by the Cool class.

+
+
+

The entire package name is specified in the command. It is not possible, for example, to set the class path so it contains /java/MyClasses/utility and use the command java myapp.Cool. The class would not be found.

+
+

You might wonder what defines the package name for a class. The answer is that the package name is part of the class and cannot be modified, except by recompiling the class.

+

首先说编译:
查看javac 的帮助:

+
1
2
javac --help
Usage: javac <options> <source files>
+

javac 的参数是:
javac+文件路径

+

举个例子:
现在在com的上一级目录上

+

下面是HelloWorld.java的代码

+
1
2
3
4
5
6
7
8
package com.helloworld;

public class HelloWorld
{
static public int m = 1;
public int i = 1;

}
+ +
1
2
3
4
5
6
7
8
# ls 
com
# tree
.
└── com
└── helloworld
└── HelloWorld.java

+

我要怎么编译com/hellowrld/HelloWorld.java下面的文件呢?

+

这么编译就可以了:

+
1
javac com/helloworld/*.java
+ +

然后看一下目录树,在下面多了一个class文件

+
1
2
3
4
5
6
7
# tree
.
└── com
└── helloworld
├── HelloWorld.class
└── HelloWorld.java

+

重新开始,我们看看-d 这个参数有什么用:

+
1
2
3
4
5
6
7
# mkdir classes
# tree
.
├── classess
└── com
└── helloworld
└── HelloWorld.java
+

那么我们编译之后会在设置的-d的目录里面添加相关目录:

+
1
2
3
4
5
6
7
8
9
10
11
# javac -d ./classes/    com/helloworld/*.java
# tree
.
├── classes
│   └── com
│   └── helloworld
│   └── HelloWorld.class
├── classess
└── com
└── helloworld
└── HelloWorld.java
+ + + + + +

相关阅读

+ +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/2019/11/21/composer-ext/index.html b/2019/11/21/composer-ext/index.html new file mode 100644 index 0000000000..3d92287c67 --- /dev/null +++ b/2019/11/21/composer-ext/index.html @@ -0,0 +1,448 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + composer-ext | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ composer-ext +

+ + +
+ + + + +
+ + +

看到phpstorm的相关警告,经常会看到phpstorm会警告没有ext-json,我才最近发现composer.json会添加相关的扩展校验.

+

举个例子

+
1
2
3
4
5
6
7
8
"require": {
"php": ">=5.4.0",
"topthink/framework": "^5.0",
"php-imap/php-imap": "~2.0",
"phpoffice/phpspreadsheet": "^1.3",
"hprose/hprose": "^2.0",
"ext-json": "*" // 这就是解析require json 扩展
},
+

这个就是校验是否含有json扩展,那么composer是怎么实现的呢?其实是通过extension_loaded这个函数取查看扩展版本的

+

实现是在composer的129行实现,通过extension_loaded获取扩展.

+

相关阅读

+ +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git "a/2019/11/25/java-string-\347\233\270\345\205\263\345\206\205\345\256\271/index.html" "b/2019/11/25/java-string-\347\233\270\345\205\263\345\206\205\345\256\271/index.html" new file mode 100644 index 0000000000..badb7ddcd4 --- /dev/null +++ "b/2019/11/25/java-string-\347\233\270\345\205\263\345\206\205\345\256\271/index.html" @@ -0,0 +1,453 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + java string 相关内容 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ java string 相关内容 +

+ + +
+ + + + +
+ + +

版本是java8

+

基本类型和对象

java的string是什么呢?
很明显是对象

+

特化的+

+

15.18.1. String Concatenation Operator +
If only one operand expression is of type String, then string conversion (§5.1.11) is performed on the other operand to produce a string at run time.
来源

+
+

java的字符串连接符是+,而php的是.

+

stringbuilder

string常量折叠

由于上面提到的jls8中的String Concatenation Operator +提到相关内容,如果操作符中只有一个string类型的话,类型转换会发生在运行时.没有规定两个都是string的时候怎么处理,所以javac将他折叠了

+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
/** If tree is a concatenation of string literals, replace it
* by a single literal representing the concatenated string.
*/
protected JCExpression foldStrings(JCExpression tree) {
if (!allowStringFolding)
return tree;
ListBuffer<JCExpression> opStack = new ListBuffer<>();
ListBuffer<JCLiteral> litBuf = new ListBuffer<>();
boolean needsFolding = false;
JCExpression curr = tree;
while (true) {
if (curr.hasTag(JCTree.Tag.PLUS)) {
JCBinary op = (JCBinary)curr;
needsFolding |= foldIfNeeded(op.rhs, litBuf, opStack, false);
curr = op.lhs;
} else {
needsFolding |= foldIfNeeded(curr, litBuf, opStack, true);
break; //last one!
}
}
if (needsFolding) {
List<JCExpression> ops = opStack.toList();
JCExpression res = ops.head;
for (JCExpression op : ops.tail) {
res = F.at(op.getStartPosition()).Binary(optag(TokenKind.PLUS), res, op);
storeEnd(res, getEndPos(op));
}
return res;
} else {
return tree;
}
+

[foldStrings]https://github.com/openjdk/jdk/blob/6bab0f539fba8fb441697846347597b4a0ade428/src/jdk.compiler/share/classes/com/sun/tools/javac/parser/JavacParser.java#L950

+

相关阅读

+ +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git "a/2019/11/26/docker\344\270\216iptable/index.html" "b/2019/11/26/docker\344\270\216iptable/index.html" new file mode 100644 index 0000000000..33831c58f1 --- /dev/null +++ "b/2019/11/26/docker\344\270\216iptable/index.html" @@ -0,0 +1,506 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + docker与iptable和网桥 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ docker与iptable和网桥 +

+ + +
+ + + + +
+ + +

如何创建网桥

创建网桥,可以通过bridge-utils包的brctl来创建一个网桥

+
1
$sudo brctl addbr br0
+

然后通过brctl show可以看到列出的网桥

+
1
2
3
4
$brctl  show
bridge name bridge id STP enabled interfaces
br0 8000.000000000000 no

+ +

通过strace查看系统调用

+
1
$sudo strace  brctl addbr br1
+ + +

输出

+
1
2
3
4
5
ubuntu@VM-0-3-ubuntu:~/libnlbuild/bin$ sudo strace  brctl addbr br1
...
socket(AF_UNIX, SOCK_STREAM, 0) = 3
ioctl(3, SIOCBRADDBR, "br1") = 0
+++ exited with 0 +++
+

看到调用

+
1
ioctl(3, SIOCBRADDBR, "br1") 
+

3 指的是打开的文件描述符.0,1,2都是特殊的标准输入输出错误等的文件描述符,所以下一个打开的文件就是3

+

我写的一个创建网桥的小例子

+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
//  bradd.c
#include <linux/sockios.h>
#include <sys/types.h>
#include <sys/socket.h>
#include <sys/un.h>
#include <fcntl.h>
#include <sys/ioctl.h>
#include <stdio.h>
int main(){
int br_socket_fd,ret;
if(br_socket_fd = socket(AF_LOCAL, SOCK_STREAM, 0) < 0){
perror("Error: ");
}
if(ret = ioctl(br_socket_fd, SIOCBRADDBR,"hello") < 0) // SIOCBRADDBR 由sockios.h 引入
{
perror("ioctl error");
}
return 0;
}
+ +
1
2
3
$gcc bradd.c -o 
## 需要使用sudo添加网桥
$sudo ./bradd
+ +

然后用brctl show 输出,创建了一个叫hello的网桥:

+
1
2
3
4
$ brctl show
bridge name bridge id STP enabled interfaces
docker0 8000.024273119fd1 no vethe6cf6a0
hello 8000.000000000000 no
+ +

然后我们发现了docker0hello两个网桥相差一个interfaces,我们如何添加veth呢?

+
    +
  • 在brctl 中可以使用brctl addif
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    int br_add_interface(const char *bridge, const char *dev)
    {
    struct ifreq ifr;
    ...
    int ifindex = if_nametoindex(dev);
    ...
    strncpy(ifr.ifr_name, bridge, IFNAMSIZ);
    ifr.ifr_ifindex = ifindex;
    err = ioctl(br_socket_fd, SIOCBRADDIF, &ifr);
    ...
    }
    +最后调用linux 的net/bridge/br_if.c:
  • +
+

// dev 是我们要添加的设备
// br 是我们的网桥

+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
/* called with RTNL */
int br_add_if(struct net_bridge *br, struct net_device *dev,
struct netlink_ext_ack *extack)
{
struct net_bridge_port *p;
int err = 0;
unsigned br_hr, dev_hr;
bool changed_addr;

...
p = new_nbp(br, dev);
if (IS_ERR(p))
return PTR_ERR(p);

call_netdevice_notifiers(NETDEV_JOIN, dev);

err = dev_set_allmulti(dev, 1);
if (err) {
kfree(p); /* kobject not yet init'd, manually free */
goto err1;
}

err = kobject_init_and_add(&p->kobj, &brport_ktype, &(dev->dev.kobj),
SYSFS_BRIDGE_PORT_ATTR);
if (err)
goto err2;

err = br_sysfs_addif(p);
if (err)
goto err2;

err = br_netpoll_enable(p);
if (err)
goto err3;

err = netdev_rx_handler_register(dev, br_handle_frame, p);
if (err)
goto err4;

dev->priv_flags |= IFF_BRIDGE_PORT;

err = netdev_master_upper_dev_link(dev, br->dev, NULL, NULL, extack);
if (err)
goto err5;

err = nbp_switchdev_mark_set(p);
if (err)
goto err6;

dev_disable_lro(dev);

list_add_rcu(&p->list, &br->port_list);

nbp_update_port_count(br);

netdev_update_features(br->dev);

br_hr = br->dev->needed_headroom;
dev_hr = netdev_get_fwd_headroom(dev);
if (br_hr < dev_hr)
update_headroom(br, dev_hr);
else
netdev_set_rx_headroom(dev, br_hr);

if (br_fdb_insert(br, p, dev->dev_addr, 0))
netdev_err(dev, "failed insert local address bridge forwarding table\n");

if (br->dev->addr_assign_type != NET_ADDR_SET) {
/* Ask for permission to use this MAC address now, even if we
* don't end up choosing it below.
*/
err = dev_pre_changeaddr_notify(br->dev, dev->dev_addr, extack);
if (err)
goto err7;
}

err = nbp_vlan_init(p, extack);
if (err) {
netdev_err(dev, "failed to initialize vlan filtering on this port\n");
goto err7;
}

spin_lock_bh(&br->lock);
changed_addr = br_stp_recalculate_bridge_id(br);

if (netif_running(dev) && netif_oper_up(dev) &&
(br->dev->flags & IFF_UP))
br_stp_enable_port(p);
spin_unlock_bh(&br->lock);

br_ifinfo_notify(RTM_NEWLINK, NULL, p);

if (changed_addr)
call_netdevice_notifiers(NETDEV_CHANGEADDR, br->dev);

br_mtu_auto_adjust(br);
br_set_gso_limits(br);

kobject_uevent(&p->kobj, KOBJ_ADD);

return 0;
...
}
+ +

添加虚拟设备:

+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
# strace  ip link add vethaaa type veth peer name vethbbb
execve("/sbin/ip", ["ip", "link", "add", "vethaaa", "type", "veth", "peer", "name", "vethbbb"], 0x7ffed8af30f0 /* 23 vars */)
...
socket(AF_NETLINK, SOCK_RAW|SOCK_CLOEXEC, NETLINK_ROUTE) = 3
setsockopt(3, SOL_SOCKET, SO_SNDBUF, [32768], 4) = 0
setsockopt(3, SOL_SOCKET, SO_RCVBUF, [1048576], 4) = 0
setsockopt(3, SOL_NETLINK, NETLINK_EXT_ACK, [1], 4) = 0
bind(3, {sa_family=AF_NETLINK, nl_pid=0, nl_groups=00000000}, 12) = 0
getsockname(3, {sa_family=AF_NETLINK, nl_pid=26226, nl_groups=00000000}, [12]) = 0
sendto(3, {{len=32, type=RTM_NEWLINK, flags=NLM_F_REQUEST|NLM_F_ACK, seq=0, pid=0}, {ifi_family=AF_UNSPEC, ifi_type=ARPHRD_NETROM, ifi_index=0, ifi_flags=0, ifi_change=0}}, 32, 0, NULL, 0) = 32
recvmsg(3, {msg_name={sa_family=AF_NETLINK, nl_pid=0, nl_groups=00000000}, msg_namelen=12, msg_iov=[{iov_base={{len=52, type=NLMSG_ERROR, flags=0, seq=0, pid=26226}, {error=-ENODEV, msg={{len=32, type=RTM_NEWLINK, flags=NLM_F_REQUEST|NLM_F_ACK, seq=0, pid=0}, {ifi_family=AF_UNSPEC, ifi_type=ARPHRD_NETROM, ifi_index=0, ifi_flags=0, ifi_change=0}}}}, iov_len=16384}], msg_iovlen=1, msg_controllen=0, msg_flags=0}, 0) = 52
access("/proc/net", R_OK) = 0
access("/proc/net/unix", R_OK) = 0
socket(AF_UNIX, SOCK_DGRAM|SOCK_CLOEXEC, 0) = 4
ioctl(4, SIOCGIFINDEX, {ifr_name="vethaaa"}) = -1 ENODEV (No such device)
close(4) = 0
brk(NULL) = 0x560e12455000
brk(0x560e12476000) = 0x560e12476000
openat(AT_FDCWD, "/usr/lib/ip/link_veth.so", O_RDONLY|O_CLOEXEC) = -1 ENOENT (No such file or directory)
sendmsg(3, {msg_name={sa_family=AF_NETLINK, nl_pid=0, nl_groups=00000000}, msg_namelen=12, msg_iov=[{iov_base={{len=92, type=RTM_NEWLINK, flags=NLM_F_REQUEST|NLM_F_ACK|NLM_F_EXCL|NLM_F_CREATE, seq=1576836139, pid=0}, {ifi_family=AF_UNSPEC, ifi_type=ARPHRD_NETROM, ifi_index=0, ifi_flags=0, ifi_change=0}, [{{nla_len=12, nla_type=IFLA_IFNAME}, "vethaaa"}, {{nla_len=48, nla_type=IFLA_LINKINFO}, [{{nla_len=8, nla_type=IFLA_INFO_KIND}, "veth"...}, {{nla_len=36, nla_type=IFLA_INFO_DATA}, "\x20\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0c\x00\x03\x00\x76\x65\x74\x68\x62\x62\x62\x00"}]}]}, iov_len=92}], msg_iovlen=1, msg_controllen=0, msg_flags=0}, 0) = 92
recvmsg(3, {msg_name={sa_family=AF_NETLINK, nl_pid=0, nl_groups=00000000}, msg_namelen=12, msg_iov=[{iov_base=NULL, iov_len=0}], msg_iovlen=1, msg_controllen=0, msg_flags=MSG_TRUNC}, MSG_PEEK|MSG_TRUNC) = 36
recvmsg(3, {msg_name={sa_family=AF_NETLINK, nl_pid=0, nl_groups=00000000}, msg_namelen=12, msg_iov=[{iov_base={{len=36, type=NLMSG_ERROR, flags=NLM_F_CAPPED, seq=1576836139, pid=26226}, {error=0, msg={len=92, type=RTM_NEWLINK, flags=NLM_F_REQUEST|NLM_F_ACK|NLM_F_EXCL|NLM_F_CREATE, seq=1576836139, pid=0}}}, iov_len=36}], msg_iovlen=1, msg_controllen=0, msg_flags=0}, 0) = 36

+ + +
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
socket(AF_NETLINK, SOCK_RAW|SOCK_CLOEXEC, NETLINK_ROUTE) = 3
setsockopt(3, SOL_SOCKET, SO_SNDBUF, [32768], 4) = 0
setsockopt(3, SOL_SOCKET, SO_RCVBUF, [1048576], 4) = 0
setsockopt(3, SOL_NETLINK, NETLINK_EXT_ACK, [1], 4) = 0
bind(3, {sa_family=AF_NETLINK, nl_pid=0, nl_groups=00000000}, 12) = 0
getsockname(3, {sa_family=AF_NETLINK, nl_pid=18263, nl_groups=00000000}, [12]) = 0
sendto(3, {{len=32, type=RTM_NEWLINK, flags=NLM_F_REQUEST|NLM_F_ACK, seq=0, pid=0}, {ifi_family=AF_UNSPEC, ifi_type=ARPHRD_NETROM, ifi_index=0, ifi_flags=0, ifi_change=0}}, 32, 0, NULL, 0) = 32
recvmsg(3, {msg_name={sa_family=AF_NETLINK, nl_pid=0, nl_groups=00000000}, msg_namelen=12, msg_iov=[{iov_base={{len=52, type=NLMSG_ERROR, flags=0, seq=0, pid=18263}, {error=-EPERM, msg={{len=32, type=RTM_NEWLINK, flags=NLM_F_REQUEST|NLM_F_ACK, seq=0, pid=0}, {ifi_family=AF_UNSPEC, ifi_type=ARPHRD_NETROM, ifi_index=0, ifi_flags=0, ifi_change=0}}}}, iov_len=16384}], msg_iovlen=1, msg_controllen=0, msg_flags=0}, 0) = 52
access("/proc/net", R_OK) = 0
access("/proc/net/unix", R_OK) = 0
socket(AF_UNIX, SOCK_DGRAM|SOCK_CLOEXEC, 0) = 4
ioctl(4, SIOCGIFINDEX, {ifr_name="p1"}) = -1 ENODEV (No such device)
close(4) = 0
brk(NULL) = 0x5595d01bb000
brk(0x5595d01dc000) = 0x5595d01dc000
openat(AT_FDCWD, "/usr/lib/ip/link_veth.so", O_RDONLY|O_CLOEXEC) = -1 ENOENT (No such file or directory)
sendmsg(3, {msg_name={sa_family=AF_NETLINK, nl_pid=0, nl_groups=00000000}, msg_namelen=12, msg_iov=[{iov_base={{len=84, type=RTM_NEWLINK, flags=NLM_F_REQUEST|NLM_F_ACK|NLM_F_EXCL|NLM_F_CREATE, seq=1576748752, pid=0}, {ifi_family=AF_UNSPEC, ifi_type=ARPHRD_NETROM, ifi_index=0, ifi_flags=0, ifi_change=0}, [{{nla_len=7, nla_type=IFLA_IFNAME}, "p1"}, {{nla_len=44, nla_type=IFLA_LINKINFO}, [{{nla_len=8, nla_type=IFLA_INFO_KIND}, "veth"...}, {{nla_len=32, nla_type=IFLA_INFO_DATA}, "\x1c\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x07\x00\x03\x00\x70\x32\x00\x00"}]}]}, iov_len=84}], msg_iovlen=1, msg_controllen=0, msg_flags=0}, 0) = 84
recvmsg(3, {msg_name={sa_family=AF_NETLINK, nl_pid=0, nl_groups=00000000}, msg_namelen=12, msg_iov=[{iov_base=NULL, iov_len=0}], msg_iovlen=1, msg_controllen=0, msg_flags=MSG_TRUNC}, MSG_PEEK|MSG_TRUNC) = 104
recvmsg(3, {msg_name={sa_family=AF_NETLINK, nl_pid=0, nl_groups=00000000}, msg_namelen=12, msg_iov=[{iov_base={{len=104, type=NLMSG_ERROR, flags=0, seq=1576748752, pid=18263}, {error=-EPERM, msg={{len=84, type=RTM_NEWLINK, flags=NLM_F_REQUEST|NLM_F_ACK|NLM_F_EXCL|NLM_F_CREATE, seq=1576748752, pid=0}, {ifi_family=AF_UNSPEC, ifi_type=ARPHRD_NETROM, ifi_index=0, ifi_flags=0, ifi_change=0}, [{{nla_len=7, nla_type=IFLA_IFNAME}, "p1"}, {{nla_len=44, nla_type=IFLA_LINKINFO}, [{{nla_len=8, nla_type=IFLA_INFO_KIND}, "veth"...}, {{nla_len=32, nla_type=IFLA_INFO_DATA}, "\x1c\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x07\x00\x03\x00\x70\x32\x00\x00"}]}]}}}, iov_len=104}], msg_iovlen=1, msg_controllen=0, msg_flags=0}, 0) = 104
write(2, "RTNETLINK answers: Operation not"..., 43RTNETLINK answers: Operation not permitted
) = 43
exit_group(2) = ?
+++ exited with 2 +++

+ +

linux 相关的netlink veth内容:

+
1
2
3
4
5
6
7
8
9
10
11
12
// drivers\net\veth.c
static struct rtnl_link_ops veth_link_ops = {
.kind = DRV_NAME,
.priv_size = sizeof(struct veth_priv),
.setup = veth_setup,
.validate = veth_validate,
.newlink = veth_newlink,
.dellink = veth_dellink,
.policy = veth_policy,
.maxtype = VETH_INFO_MAX,
.get_link_net = veth_get_link_net,
};
+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
E:\linux-master\net\netlink\af_netlink.c
static const struct proto_ops netlink_ops = {
.family = PF_NETLINK,
.owner = THIS_MODULE,
.release = netlink_release,
.bind = netlink_bind,
.connect = netlink_connect,
.socketpair = sock_no_socketpair,
.accept = sock_no_accept,
.getname = netlink_getname,
.poll = datagram_poll,
.ioctl = netlink_ioctl,
.listen = sock_no_listen,
.shutdown = sock_no_shutdown,
.setsockopt = netlink_setsockopt,
.getsockopt = netlink_getsockopt,
.sendmsg = netlink_sendmsg,
.recvmsg = netlink_recvmsg,
.mmap = sock_no_mmap,
.sendpage = sock_no_sendpage,
};
+ + +

添加veth 设备

首先是添加socket

+
1
2
3
4
(gdb) bt
#0 socket () at ../sysdeps/unix/syscall-template.S:78
#1 0x00005555555b60c7 in rtnl_open_byproto (rth=0x5555557d8020 <rth>, subscriptions=0, protocol=<optimized out>) at libnetlink.c:194
#2 0x000055555555f956 in main (argc=9, argv=0x7fffffffe548) at ip.c:308
+ +
1
2
3
4
5
6
7
8
9
10
11
12
Breakpoint 6, __libc_sendmsg (fd=3, msg=msg@entry=0x7fffffffdd70, flags=flags@entry=0) at ../sysdeps/unix/sysv/linux/sendmsg.c:28
28 ../sysdeps/unix/sysv/linux/sendmsg.c: No such file or directory.
(gdb) bt
#0 __libc_sendmsg (fd=3, msg=msg@entry=0x7fffffffdd70, flags=flags@entry=0) at ../sysdeps/unix/sysv/linux/sendmsg.c:28
#1 0x00005555555b5c8f in __rtnl_talk_iov (rtnl=0x5555557d8020 <rth>, iov=iov@entry=0x7fffffffddf0, iovlen=iovlen@entry=1, answer=answer@entry=0x0, show_rtnl_err=show_rtnl_err@entry=true,
errfn=0x0) at libnetlink.c:887
#2 0x00005555555b7225 in __rtnl_talk (errfn=0x0, show_rtnl_err=true, answer=<optimized out>, n=0x7fffffffde40, rtnl=<optimized out>) at libnetlink.c:1000
#3 rtnl_talk (rtnl=<optimized out>, n=n@entry=0x7fffffffde40, answer=answer@entry=0x0) at libnetlink.c:1006
#4 0x000055555557bc6e in iplink_modify (cmd=cmd@entry=16, flags=flags@entry=1536, argc=3, argc@entry=6, argv=<optimized out>, argv@entry=0x7fffffffe560) at iplink.c:1084
#5 0x000055555557c0c6 in do_iplink (argc=7, argv=0x7fffffffe558) at iplink.c:1641
#6 0x000055555555ff0c in do_cmd (argv0=0x7fffffffe7d8 "link", argc=8, argv=0x7fffffffe550) at ip.c:113
#7 0x000055555555f9a0 in main (argc=9, argv=0x7fffffffe548) at ip.c:317
+ +

比如命令ip link add veth_0 type veth peer name veth_0_peer
初始化的时候req.n 的长度是32

+
1
2
 p req.n.nlmsg_len 
$1 = 32
+

经过ret = iplink_parse(argc, argv, &req, &type); 后变成44,

+
1
2
(gdb) p ((char *)n)[32]@64
$50 = "\v\000\003\000veth_0\000\000\064\000\022\000\b\000\001\000veth(\000\002\000$\000\001", '\000' <repeats 17 times>, "\020\000\003\000veth_0_peer"
+

iptables是什么?

1
2
# type iptables
iptables is hashed (/sbin/iptables)
+ +

iptables命令为什么可以处理那些问题呢?

+

iptable原理

iptable就是通过socket netlink做特别的通信,改变netfilter子系统的相关hook

+

源码
相关阅读

+

相关阅读

+ +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git "a/2019/11/26/java-\345\274\202\345\270\270/index.html" "b/2019/11/26/java-\345\274\202\345\270\270/index.html" new file mode 100644 index 0000000000..b7dce6dc1f --- /dev/null +++ "b/2019/11/26/java-\345\274\202\345\270\270/index.html" @@ -0,0 +1,451 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + java 异常 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ java 异常 +

+ + +
+ + + + +
+ + +

异常例子

1
2
3
4
5
6
7
8
9
10
import java.io.*;
public class className
{
public void deposit(double amount) throws RemoteException
{
// Method implementation
throw new RemoteException();
}
//Remainder of class definition
}
+ +

异常的实质是什么?

实质就是一个获取堆栈的类,这个类特别的地方在于可以获取堆栈,核心也在于获取堆栈和捕获异常

+

checked exception

来源

+
+

The unchecked exception classes are the run-time exception classes and the error classes.

+
+
+

The checked exception classes are all exception classes other than the unchecked exception classes. That is, the checked exception classes are Throwable and all its subclasses other than RuntimeException and its subclasses and Error and its subclasses.

+
+

uncheckded exception就是运行时异常类和error类,其他都是checked exception

+ +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/2019/11/27/php-tokenlizer/index.html b/2019/11/27/php-tokenlizer/index.html new file mode 100644 index 0000000000..f0a2eeee05 --- /dev/null +++ b/2019/11/27/php-tokenlizer/index.html @@ -0,0 +1,453 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + php tokenlizer与php-cs-fixer | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ php tokenlizer与php-cs-fixer +

+ + +
+ + + + +
+ + +

tokenlizer

代码:

+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
<?php
$code = '<?php echo "string1"."string2"; >';

$tokens = token_get_all($code);

foreach ($tokens as $token) {

if (is_array($token)) {

// 行号、标识符字面量、对应内容

printf("%d - %s\t%s\n", $token[2], token_name($token[0]), $token[1]);

}

}
+ +

输出内容:

+
1
2
3
4
5
6
1 - T_OPEN_TAG  <?php
1 - T_ECHO echo
1 - T_WHITESPACE
1 - T_CONSTANT_ENCAPSED_STRING "string1"
1 - T_CONSTANT_ENCAPSED_STRING "string2"
1 - T_WHITESPACE
+

php-cs-fixer

php-cs-fixer的核心函数是:token_get_all

+
1
2
3
$tokens = \defined('TOKEN_PARSE')
? token_get_all($code, TOKEN_PARSE)
: token_get_all($code);
+

调用的核心堆栈:

+
1
2
3
4
5
6
7
8
9
10
11
#0 E:\PHP-CS-Fixer\src\Tokenizer\Tokens.php(222): PhpCsFixer\Tokenizer\Tokens->setCode('<?php\n\n/*\n * Th...')
#1 E:\PHP-CS-Fixer\src\Runner\Runner.php(171): PhpCsFixer\Tokenizer\Tokens::fromCode('<?php\n\n/*\n * Th...')
#2 E:\PHP-CS-Fixer\src\Runner\Runner.php(132): PhpCsFixer\Runner\Runner->fixFile(Object(SplFileInfo), Object(PhpCsFixer\Linter\ProcessLintingResult))
#3 E:\PHP-CS-Fixer\src\Console\Command\FixCommand.php(219): PhpCsFixer\Runner\Runner->fix()
#4 E:\PHP-CS-Fixer\vendor\symfony\console\Command\Command.php(255): PhpCsFixer\Console\Command\FixCommand->execute(Object(Symfony\Component\Console\Input\ArgvInput), Object(Symfony\Component\Console\Output\ConsoleOutput))
#5 E:\PHP-CS-Fixer\vendor\symfony\console\Application.php(982): Symfony\Component\Console\Command\Command->run(Object(Symfony\Component\Console\Input\ArgvInput), Object(Symfony\Component\Console\Output\ConsoleOutput))
#6 E:\PHP-CS-Fixer\vendor\symfony\console\Application.php(255): Symfony\Component\Console\Application->doRunCommand(Object(PhpCsFixer\Console\Command\FixCommand), Object(Symfony\Component\Console\Input\ArgvInput), Object(Symfony\Component\Console\Output\ConsoleOutput))
#7 E:\PHP-CS-Fixer\src\Console\Application.php(84): Symfony\Component\Console\Application->doRun(Object(Symfony\Component\Console\Input\ArgvInput), Object(Symfony\Component\Console\Output\ConsoleOutput))
#8 E:\PHP-CS-Fixer\vendor\symfony\console\Application.php(148): PhpCsFixer\Console\Application->doRun(Object(Symfony\Component\Console\Input\ArgvInput), Object(Symfony\Component\Console\Output\ConsoleOutput))
#9 E:\PHP-CS-Fixer\php-cs-fixer(101): Symfony\Component\Console\Application->run()
#10 {main}
+ +

举个例子下面的堆栈:

+
1
2
3
4
5
6
7
8
9
10
11
12
13
#0 E:\PHP-CS-Fixer\src\Fixer\Operator\BinaryOperatorSpacesFixer.php(339): PhpCsFixer\Fixer\Operator\BinaryOperatorSpacesFixer->fixWhiteSpaceAroundOperatorToSingleSpace(Object(PhpCsFixer\Tokenizer\Tokens), 19)
#1 E:\PHP-CS-Fixer\src\Fixer\Operator\BinaryOperatorSpacesFixer.php(256): PhpCsFixer\Fixer\Operator\BinaryOperatorSpacesFixer->fixWhiteSpaceAroundOperator(Object(PhpCsFixer\Tokenizer\Tokens), 19)
#2 E:\PHP-CS-Fixer\src\AbstractFixer.php(75): PhpCsFixer\Fixer\Operator\BinaryOperatorSpacesFixer->applyFix(Object(SplFileInfo), Object(PhpCsFixer\Tokenizer\Tokens))
#3 E:\PHP-CS-Fixer\src\Runner\Runner.php(192): PhpCsFixer\AbstractFixer->fix(Object(SplFileInfo), Object(PhpCsFixer\Tokenizer\Tokens))
#4 E:\PHP-CS-Fixer\src\Runner\Runner.php(132): PhpCsFixer\Runner\Runner->fixFile(Object(SplFileInfo), Object(PhpCsFixer\Linter\ProcessLintingResult))
#5 E:\PHP-CS-Fixer\src\Console\Command\FixCommand.php(219): PhpCsFixer\Runner\Runner->fix()
#6 E:\PHP-CS-Fixer\vendor\symfony\console\Command\Command.php(255): PhpCsFixer\Console\Command\FixCommand->execute(Object(Symfony\Component\Console\Input\ArgvInput), Object(Symfony\Component\Console\Output\ConsoleOutput))
#7 E:\PHP-CS-Fixer\vendor\symfony\console\Application.php(982): Symfony\Component\Console\Command\Command->run(Object(Symfony\Component\Console\Input\ArgvInput), Object(Symfony\Component\Console\Output\ConsoleOutput))
#8 E:\PHP-CS-Fixer\vendor\symfony\console\Application.php(255): Symfony\Component\Console\Application->doRunCommand(Object(PhpCsFixer\Console\Command\FixCommand), Object(Symfony\Component\Console\Input\ArgvInput), Object(Symfony\Component\Console\Output\ConsoleOutput))
#9 E:\PHP-CS-Fixer\src\Console\Application.php(84): Symfony\Component\Console\Application->doRun(Object(Symfony\Component\Console\Input\ArgvInput), Object(Symfony\Component\Console\Output\ConsoleOutput))
#10 E:\PHP-CS-Fixer\vendor\symfony\console\Application.php(148): PhpCsFixer\Console\Application->doRun(Object(Symfony\Component\Console\Input\ArgvInput), Object(Symfony\Component\Console\Output\ConsoleOutput))
#11 E:\PHP-CS-Fixer\php-cs-fixer(101): Symfony\Component\Console\Application->run()
#12 {main}
+

核心就是给后面加入token
// todo

+ +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/2019/11/28/mysql-explain-impossible-condition/index.html b/2019/11/28/mysql-explain-impossible-condition/index.html new file mode 100644 index 0000000000..025269daf1 --- /dev/null +++ b/2019/11/28/mysql-explain-impossible-condition/index.html @@ -0,0 +1,440 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + mysql explain impossible condition | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ mysql explain impossible condition +

+ + +
+ + + + +
+ + +

很好奇explain的时候怎么explain到很多内容的,所以遇到了explain的内容是Impossible ON condition的时候觉得很好奇

+ +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git "a/2019/11/30/\346\255\243\345\210\231\346\250\241\345\274\217/index.html" "b/2019/11/30/\346\255\243\345\210\231\346\250\241\345\274\217/index.html" new file mode 100644 index 0000000000..f64f0eb276 --- /dev/null +++ "b/2019/11/30/\346\255\243\345\210\231\346\250\241\345\274\217/index.html" @@ -0,0 +1,443 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 正则模式 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + + + + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git "a/2019/12/02/ik\345\210\206\350\257\215/index.html" "b/2019/12/02/ik\345\210\206\350\257\215/index.html" new file mode 100644 index 0000000000..272e82963e --- /dev/null +++ "b/2019/12/02/ik\345\210\206\350\257\215/index.html" @@ -0,0 +1,455 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + ik分词 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ ik分词 +

+ + +
+ + + + +
+ + +

es上面安装ik分词

+
1
./bin/elasticsearch-plugin install https://github.com/medcl/elasticsearch-analysis-ik/releases/download/v7.4.0/elasticsearch-analysis-ik-7.4.0.zip
+ +

返回

+
1
-> Downloading https://github.com/medcl/elasticsearch-analysis-ik/releases/download/v7.4.0/elasticsearch-analysis-ik-7.4.0.zip
+ + +

分词核心函数

+

相关阅读

+ + +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/2019/12/04/stop/index.html b/2019/12/04/stop/index.html new file mode 100644 index 0000000000..6f06ea5be3 --- /dev/null +++ b/2019/12/04/stop/index.html @@ -0,0 +1,440 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + docker stop | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ docker stop +

+ + +
+ + + + +
+ + +

docker的stop本质就是kill -9 ,一个特别的信号而已。具体实现得看代码
// todo

+ +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git "a/2019/12/05/mysql-\351\232\220\345\274\217\350\275\254\346\215\242/index.html" "b/2019/12/05/mysql-\351\232\220\345\274\217\350\275\254\346\215\242/index.html" new file mode 100644 index 0000000000..9f685b5afe --- /dev/null +++ "b/2019/12/05/mysql-\351\232\220\345\274\217\350\275\254\346\215\242/index.html" @@ -0,0 +1,521 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + mysql 隐式转换 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ mysql 隐式转换 +

+ + +
+ + + + +
+ + +

类型系统

type and program language 这本书介绍了类型系统. 什么是类型系统呢?

+

为什么会有隐式转换

我写了很久弱类型语言,一直遇到各种隐式转换

+

但是最近(2020/04/26)我大概知道隐式转换的本质了 .
说到底,弱类型语言也是有类型的语言,变量是有类型的.变量就是一个类型集合里面的一个元素

+

举个例子
比如一个集合颜色{red,yellow,white,blue}
不同类型的变量说到底也不是一个集合的内容,是没法直接比较.那么编译器就帮你做了一个映射,映射成一个类型,然后可以比较了,就那么简单.

+

那么隐式转换的问题是什么呢?其实是开发人员可能没有注意到发生了隐式转换,执行路径和预期不一致

+

那么隐式转换的好处是什么呢?可以少写很多代码,可以更快

+

这本书讲了表达式和求值

+

sql也是一种弱类型语言,所以也有弱类型的大坑隐式转换
mysql的类型系统有人详细描述过吗?或者有相关的文档来说明吗?就像jls一样,可能是我没有看完完整的mysql文档吧

+

mysql类型

mysql类型分为以下几种:

+
    +
  • numeric
  • +
  • date and time
  • +
  • string
  • +
  • json
  • +
+

例子

1
select count(case when number_col='' OR number_col IS NULL THEN 1 END) FROM test;
+

假如number_col列是数字类型(比如int),则会发生隐式转换 number_col = ''里面,空字符串''会转换成 0

+

隐式转换在什么时候发生?

相关sql

+
1
select 1='222';
+ +
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
Thread 28 "mysqld" hit Breakpoint 1, my_strtod (str=0x7f3a500061d0 "222", end=0x7f3ad4d46998, error=0x7f3ad4d469bc) at /home/dinosaur/Downloads/mysql-5.7.21/strings/dtoa.c:465
465 {
(gdb) bt
#0 my_strtod (str=0x7f3a500061d0 "222", end=0x7f3ad4d46998, error=0x7f3ad4d469bc) at /home/dinosaur/Downloads/mysql-5.7.21/strings/dtoa.c:465
#1 0x0000000001f7279d in my_strntod_8bit (cs=0x2e8ea60 <my_charset_utf8_general_ci>, str=0x7f3a500061d0 "222", length=3, end=0x7f3ad4d46998, err=0x7f3ad4d469bc)
at /home/dinosaur/Downloads/mysql-5.7.21/strings/ctype-simple.c:741
#2 0x0000000000fdaaf2 in double_from_string_with_check (cs=0x2e8ea60 <my_charset_utf8_general_ci>, cptr=0x7f3a500061d0 "222", end=0x7f3a500061d3 "")
at /home/dinosaur/Downloads/mysql-5.7.21/sql/item.cc:3577
#3 0x0000000000fdacc5 in Item_string::val_real (this=0x7f3a500061d8) at /home/dinosaur/Downloads/mysql-5.7.21/sql/item.cc:3594
#4 0x0000000000f9e9b9 in Item::val_result (this=0x7f3a500061d8) at /home/dinosaur/Downloads/mysql-5.7.21/sql/item.h:1592
#5 0x0000000000fedf4b in Item_cache_real::cache_value (this=0x7f3a50006928) at /home/dinosaur/Downloads/mysql-5.7.21/sql/item.cc:10089
#6 0x0000000000fec91a in Item_cache::has_value (this=0x7f3a50006928) at /home/dinosaur/Downloads/mysql-5.7.21/sql/item.cc:9650
#7 0x0000000000fedfbb in Item_cache_real::val_real (this=0x7f3a50006928) at /home/dinosaur/Downloads/mysql-5.7.21/sql/item.cc:10098
#8 0x0000000000fff539 in Arg_comparator::compare_real (this=0x7f3a500065f8) at /home/dinosaur/Downloads/mysql-5.7.21/sql/item_cmpfunc.cc:1748
#9 0x0000000001014cc8 in Arg_comparator::compare (this=0x7f3a500065f8) at /home/dinosaur/Downloads/mysql-5.7.21/sql/item_cmpfunc.h:92
#10 0x00000000010017e7 in Item_func_eq::val_int (this=0x7f3a50006520) at /home/dinosaur/Downloads/mysql-5.7.21/sql/item_cmpfunc.cc:2507
#11 0x0000000000fe6144 in Item::send (this=0x7f3a50006520, protocol=0x7f3a50001d10, buffer=0x7f3ad4d46e10) at /home/dinosaur/Downloads/mysql-5.7.21/sql/item.cc:7563
#12 0x00000000015d4c48 in THD::send_result_set_row (this=0x7f3a50000b70, row_items=0x7f3a500058c8) at /home/dinosaur/Downloads/mysql-5.7.21/sql/sql_class.cc:4677
#13 0x00000000015ceed3 in Query_result_send::send_data (this=0x7f3a50006770, items=...) at /home/dinosaur/Downloads/mysql-5.7.21/sql/sql_class.cc:2717
#14 0x00000000015e697a in JOIN::exec (this=0x7f3a500069f0) at /home/dinosaur/Downloads/mysql-5.7.21/sql/sql_executor.cc:158
#15 0x00000000016892ba in handle_query (thd=0x7f3a50000b70, lex=0x7f3a50002e78, result=0x7f3a50006770, added_options=0, removed_options=0)
at /home/dinosaur/Downloads/mysql-5.7.21/sql/sql_select.cc:184
#16 0x000000000163939e in execute_sqlcom_select (thd=0x7f3a50000b70, all_tables=0x0) at /home/dinosaur/Downloads/mysql-5.7.21/sql/sql_parse.cc:5156
#17 0x0000000001632405 in mysql_execute_command (thd=0x7f3a50000b70, first_level=true) at /home/dinosaur/Downloads/mysql-5.7.21/sql/sql_parse.cc:2792
#18 0x000000000163a31c in mysql_parse (thd=0x7f3a50000b70, parser_state=0x7f3ad4d48550) at /home/dinosaur/Downloads/mysql-5.7.21/sql/sql_parse.cc:5582
#19 0x000000000162f0a3 in dispatch_command (thd=0x7f3a50000b70, com_data=0x7f3ad4d48e00, command=COM_QUERY) at /home/dinosaur/Downloads/mysql-5.7.21/sql/sql_parse.cc:1458
#20 0x000000000162df32 in do_command (thd=0x7f3a50000b70) at /home/dinosaur/Downloads/mysql-5.7.21/sql/sql_parse.cc:999
#21 0x0000000001770f97 in handle_connection (arg=0x5271810) at /home/dinosaur/Downloads/mysql-5.7.21/sql/conn_handler/connection_handler_per_thread.cc:300
#22 0x0000000001de0b41 in pfs_spawn_thread (arg=0x526e200) at /home/dinosaur/Downloads/mysql-5.7.21/storage/perfschema/pfs.cc:2190
#23 0x00007f3ade33b6ba in start_thread (arg=0x7f3ad4d49700) at pthread_create.c:333
#24 0x00007f3add76d41d in clone () at ../sysdeps/unix/sysv/linux/x86_64/clone.S:109
+ +

隐式转换规则

+

If one or both arguments are NULL, the result of the comparison is NULL, except for the NULL-safe <=> equality comparison operator. For NULL <=> NULL, the result is true. No conversion is needed.

+
+
+

If both arguments in a comparison operation are strings, they are compared as strings.

+
+
+

If both arguments are integers, they are compared as integers.

+
+
+

Hexadecimal values are treated as binary strings if not compared to a number.

+
+
+

If one of the arguments is a TIMESTAMP or DATETIME column and the other argument is a constant, the constant is converted to a timestamp before the comparison is performed. This is done to be more ODBC-friendly. This is not done for the arguments to IN(). To be safe, always use complete datetime, date, or time strings when doing comparisons. For example, to achieve best results when using BETWEEN with date or time values, use CAST() to explicitly convert the values to the desired data type.

+
+
+

A single-row subquery from a table or tables is not considered a constant. For example, if a subquery returns an integer to be compared to a DATETIME value, the comparison is done as two integers. The integer is not converted to a temporal value. To compare the operands as DATETIME values, use CAST() to explicitly convert the subquery value to DATETIME.

+
+
+

If one of the arguments is a decimal value, comparison depends on the other argument. The arguments are compared as decimal values if the other argument is a decimal or integer value, or as floating-point values if the other argument is a floating-point value.

+
+
+

In all other cases, the arguments are compared as floating-point (real) numbers. For example, a comparison of string and numeric operands takes places as a comparison of floating-point numbers.

+
+

mysql 隐式转换可能不走索引

文档只描述了字符串转数字的情况

+

举例

+

下面是表的例子先看表的样子,表里面underlying_code 是varchar类型

+
show create table `base_underlying_information`
+CREATE TABLE `base_underlying_information` (
+  `id` int(11) NOT NULL AUTO_INCREMENT COMMENT '主键ID',
+  `sec_id` varchar(10) NOT NULL COMMENT '标的ID',
+  `uni_code` varchar(30) NOT NULL COMMENT '标识,规则code-last-type',
+  `underlying_code` varchar(50) NOT NULL COMMENT '标的代码',
+  ... 省略一堆其他字段
+  PRIMARY KEY (`id`),
+  
+  KEY `idx_underlying_code` (`underlying_code`),
+) ENGINE=InnoDB  CHARSET=utf8 
+
1
2
3
- 隐式转换的时候
当sql中 条件是数字而 `603023`的时候

+EXPLAIN SELECT * FROM `base_underlying_information` WHERE underlying_code = 603023 + +
1
2
3
4
这时候的explain 是发现没有走索引
因为满足以下条件

> In all other cases, the arguments are compared as floating-point (real) numbers. For example, **a comparison of string and numeric operands takes places as a comparison of floating-point numbers.**
+id select_type table partitions type possible_keys key key_len ref rows filtered Extra +1 SIMPLE base_underlying_information \N ALL idx_underlying_code \N \N \N 506079 10.00 Using where +
1
2
3
4
5
6

发生了隐式转换

下面是文档的描述

> For comparisons of a string column with a number, MySQL cannot use an index on the column to look up the value quickly. If str_col is an indexed string column, the index cannot be used when performing the lookup in the following statement:
+SELECT * FROM tbl_name WHERE str_col=1; +
1
2
3
4
5
6
7
> The reason for this is that there are many different strings that may convert to the value 1, such as '1', ' 1', or '1a'.




- 没有隐式转换的时候
因为表里面是varchar 条件里面也是varchar 所以是没有隐式转换
+EXPLAIN SELECT * FROM `base_underlying_information` WHERE underlying_code = '603023' +
1
2
3
```
id select_type table partitions type possible_keys key key_len ref rows filtered Extra
1 SIMPLE base_underlying_information \N ref idx_underlying_code idx_underlying_code 152 const 1 100.00 \N
+ + + +看了一下词法分析好像没有做转换的,看了一下词法分析也没有做,那应该是运行时的时候做的,那是哪个函数呢? + +- http://postgres.cn/docs/9.6/extend-type-system.html +- https://dev.mysql.com/doc/refman/8.0/en/date-and-time-literals.html +- https://blog.csdn.net/n88Lpo/article/details/101013055 +- https://dev.mysql.com/doc/refman/5.7/en/type-conversion.html +
+ +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git "a/2019/12/08/namespace\344\270\216docker/index.html" "b/2019/12/08/namespace\344\270\216docker/index.html" new file mode 100644 index 0000000000..92a8216d7b --- /dev/null +++ "b/2019/12/08/namespace\344\270\216docker/index.html" @@ -0,0 +1,445 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + namespace与docker | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ namespace与docker +

+ + +
+ + + + +
+ + +
+

CLONE_NEWUTS (since Linux 2.6.19)
If CLONE_NEWUTS is set, then create the process in a new UTS
namespace, whose identifiers are initialized by duplicating
the identifiers from the UTS namespace of the calling process.
If this flag is not set, then (as with fork(2)) the process is
created in the same UTS namespace as the calling process.

+
+ + +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git "a/2019/12/09/\345\215\217\347\250\213\345\210\207\346\215\242/index.html" "b/2019/12/09/\345\215\217\347\250\213\345\210\207\346\215\242/index.html" new file mode 100644 index 0000000000..6bd5a479d1 --- /dev/null +++ "b/2019/12/09/\345\215\217\347\250\213\345\210\207\346\215\242/index.html" @@ -0,0 +1,440 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 协程切换 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ 协程切换 +

+ + +
+ + + + +
+ + +
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
Breakpoint 2, 0x00007fffedb6c090 in swoole::Context::Context(unsigned long, void (*)(void*), void*)@plt ()
from /usr/local/phpfork/lib/php/extensions/debug-non-zts-20170718/swoole.so
(gdb) bt
#0 0x00007fffedb6c090 in swoole::Context::Context(unsigned long, void (*)(void*), void*)@plt ()
from /usr/local/phpfork/lib/php/extensions/debug-non-zts-20170718/swoole.so
#1 0x00007fffedc207b1 in swoole::Coroutine::Coroutine (this=0x17a8540, fn=0x7fffedc1cdb2 <swoole::PHPCoroutine::main_func(void*)>, private_data=0x7fffffffa0a0)
at /home/dinosaur/swoole-src/include/coroutine.h:204
#2 0x00007fffedc205ee in swoole::Coroutine::create (fn=0x7fffedc1cdb2 <swoole::PHPCoroutine::main_func(void*)>, args=0x7fffffffa0a0)
at /home/dinosaur/swoole-src/include/coroutine.h:121
#3 0x00007fffedc1d7d0 in swoole::PHPCoroutine::create (fci_cache=0x7fffffffa140, argc=0, argv=0x0) at /home/dinosaur/swoole-src/swoole_coroutine.cc:857
#4 0x00007fffedc1eebd in zif_swoole_coroutine_create (execute_data=0x7fffef61e090, return_value=0x7fffffffa1e0)
at /home/dinosaur/swoole-src/swoole_coroutine.cc:964
#5 0x0000000000aaf137 in ZEND_DO_FCALL_BY_NAME_SPEC_RETVAL_UNUSED_HANDLER () at /home/dinosaur/Downloads/php-7.2.2/Zend/zend_vm_execute.h:738
#6 0x0000000000b42992 in execute_ex (ex=0x7fffef61e030) at /home/dinosaur/Downloads/php-7.2.2/Zend/zend_vm_execute.h:59743
#7 0x0000000000b47d9d in zend_execute (op_array=0x7fffef684b00, return_value=0x0) at /home/dinosaur/Downloads/php-7.2.2/Zend/zend_vm_execute.h:63760
#8 0x0000000000a3afe0 in zend_execute_scripts (type=8, retval=0x0, file_count=3) at /home/dinosaur/Downloads/php-7.2.2/Zend/zend.c:1496
#9 0x000000000098c749 in php_execute_script (primary_file=0x7fffffffc8c0) at /home/dinosaur/Downloads/php-7.2.2/main/main.c:2590
#10 0x0000000000b4b2a5 in do_cli (argc=2, argv=0x1561f20) at /home/dinosaur/Downloads/php-7.2.2/sapi/cli/php_cli.c:1011
#11 0x0000000000b4c491 in main (argc=2, argv=0x1561f20) at /home/dinosaur/Downloads/php-7.2.2/sapi/cli/php_cli.c:1404

+ +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/2019/12/11/php-cgi-windows-curl/index.html b/2019/12/11/php-cgi-windows-curl/index.html new file mode 100644 index 0000000000..9eeee8d630 --- /dev/null +++ b/2019/12/11/php-cgi-windows-curl/index.html @@ -0,0 +1,445 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + php-cgi-windows-curl | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + + + + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/2019/12/12/java-Class-forName/index.html b/2019/12/12/java-Class-forName/index.html new file mode 100644 index 0000000000..c0b598274d --- /dev/null +++ b/2019/12/12/java-Class-forName/index.html @@ -0,0 +1,446 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + java Class_forName | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ java Class_forName +

+ + +
+ + + + +
+ + +

在看到jdbc使用的例子里面,看到了Class.forName(“xxx”)的相关调用

+
1
Class.forName("com.mysql.jdbc.Driver")
+

这有什么用的?
其实可以约等于PHP的class_exist,或者说是golang的空引入
import _ "github.com/go-sql-driver/mysql
就是为了调用一下static 块的代码,初始化一下

+

相关阅读

+ +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git "a/2019/12/12/js-vue\345\237\272\347\241\200/index.html" "b/2019/12/12/js-vue\345\237\272\347\241\200/index.html" new file mode 100644 index 0000000000..2179d5f888 --- /dev/null +++ "b/2019/12/12/js-vue\345\237\272\347\241\200/index.html" @@ -0,0 +1,453 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + js-vue基础 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ js-vue基础 +

+ + +
+ + + + +
+ + +

template标签只是为了方便

+

闭包作用域

+

this作用域

+

非prop属性作用

+

component标签

+

注册变量的等价方式

+ + +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git "a/2019/12/12/mysql\344\270\245\346\240\274\346\250\241\345\274\217/index.html" "b/2019/12/12/mysql\344\270\245\346\240\274\346\250\241\345\274\217/index.html" new file mode 100644 index 0000000000..ceecb4ca84 --- /dev/null +++ "b/2019/12/12/mysql\344\270\245\346\240\274\346\250\241\345\274\217/index.html" @@ -0,0 +1,441 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + mysql严格模式 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ mysql严格模式 +

+ + +
+ + + + +
+ + +

获取sql_mode

+
1
SELECT @@sql_mode;
+ +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git "a/2019/12/12/php-pdo-\347\233\270\345\205\263\345\217\202\346\225\260/index.html" "b/2019/12/12/php-pdo-\347\233\270\345\205\263\345\217\202\346\225\260/index.html" new file mode 100644 index 0000000000..b0dd9ef35f --- /dev/null +++ "b/2019/12/12/php-pdo-\347\233\270\345\205\263\345\217\202\346\225\260/index.html" @@ -0,0 +1,447 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + php pdo 相关参数 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ php pdo 相关参数 +

+ + +
+ + + + +
+ + +

thinkphp5 的默认配置会开启ERRMODE_EXCEPTION

+
1
2
3
4
5
PDO::ATTR_CASE              => PDO::CASE_NATURAL,
PDO::ATTR_ERRMODE => PDO::ERRMODE_EXCEPTION,
PDO::ATTR_ORACLE_NULLS => PDO::NULL_NATURAL,
PDO::ATTR_STRINGIFY_FETCHES => false,
PDO::ATTR_EMULATE_PREPARES => false,
+

pdo实现

pdo 的raise_impl_error会根据配置判断是否需要抛出异常,当设置成PDO::ERRMODE_EXCEPTION,则可以需要抛出异常

+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
void pdo_raise_impl_error(pdo_dbh_t *dbh, pdo_stmt_t *stmt, const char *sqlstate, const char *supp) /* {{{ */
{
...
if (dbh && dbh->error_mode != PDO_ERRMODE_EXCEPTION) { // 没有设置 ERRMODE_EXCEPTION则抛warning
php_error_docref(NULL, E_WARNING, "%s", message);
} else {
zval ex, info;
zend_class_entry *def_ex = php_pdo_get_exception_base(1), *pdo_ex = php_pdo_get_exception();

object_init_ex(&ex, pdo_ex);

zend_update_property_string(def_ex, &ex, "message", sizeof("message")-1, message);
zend_update_property_string(def_ex, &ex, "code", sizeof("code")-1, *pdo_err);

array_init(&info);

add_next_index_string(&info, *pdo_err);
add_next_index_long(&info, 0);
zend_update_property(pdo_ex, &ex, "errorInfo", sizeof("errorInfo")-1, &info);
zval_ptr_dtor(&info);

zend_throw_exception_object(&ex); // // 否则抛出异常
}

if (message) {
efree(message);
}
}
+ + +

所以sql相关的错误只要try_catch还是能catch不少的

+ +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git "a/2019/12/19/clickhouse-\347\274\226\350\257\221\345\256\211\350\243\205/index.html" "b/2019/12/19/clickhouse-\347\274\226\350\257\221\345\256\211\350\243\205/index.html" new file mode 100644 index 0000000000..e42c21dd80 --- /dev/null +++ "b/2019/12/19/clickhouse-\347\274\226\350\257\221\345\256\211\350\243\205/index.html" @@ -0,0 +1,480 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + clickhouse 编译安装 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ clickhouse 编译安装 +

+ + +
+ + + + +
+ + +

编译流程

    +
  • github 拉代码

    +
    1
    git clone https://github.com/ClickHouse/ClickHouse.git
  • +
  • 创建子目录

    +
    1
    2
    3
    cd ClickHouse/
    mkdir build
    cmake ..
    +
  • +
  • 编译需要升级到gcc-8 g++8:

    +
  • +
+
1
2
GCC version must be at least 8.  For example, if GCC 8 is available under
gcc-8, g++-8 names, do the following: export CC=gcc-8 CXX=g++-8;
+

我的操作系统是ubuntu所以

+
1
2
3
4
$ sudo  apt-get install gcc-8 g++-8
$ sudo update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-8 800 --slave /usr/bin/g++ g++ /usr/bin/g++-8
update-alternatives: using /usr/bin/gcc-8 to provide /usr/bin/gcc (gcc) in auto mode

+

然后看gcc版本升级到8.3.0

+
1
2
3
4
5
$ gcc -v
Using built-in specs.
...
gcc version 8.3.0 (Ubuntu 8.3.0-6ubuntu1~18.04.1)

+
    +
  • 删掉cmake相关缓存
    1
    $ rm -rf   CMakeCache.txt CMakeFiles
    +重新跑有这样的错误:
    1
    2
    3
    4
    Submodules are not initialized.  Run

    git submodule update --init --recursive

    +然后初始化git submodule:
    1
    $ git submodule update --init --recursive
    +因为我用的是v2ray + proxychains4,勉强把那些包下下来了
    然后继续跑
    1
    2
    3
    cmake -DCMAKE_BUILD_TYPE=Debug  -DCMAKE_INSTALL_PREFIX=/home/ubuntu/click  ..

    ninja all
    +然后我发现内存要比较大,而且硬盘要ssd,不然会编译特别慢
  • +
+

使用docker-compse启动

相关参考

+
    +
  • 第一步创建文件 config.xmlusers.xml
  • +
  • 第二步 新建docker-compose.yml
  • +
+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
version: '3'
services:
clickhouse-server:
image: yandex/clickhouse-server
container_name: clickhouse-server
hostname: clickhouse-server
ports:
- 8123:8123
expose:
- 9000
- 9009
volumes:
- ./config.xml:/etc/clickhouse-server/config.xml
- ./users.xml:/etc/clickhouse-server/users.xml
- ./data:/var/lib/clickhouse
- ./log/clickhouse-server.log:/var/log/clickhouse-server/clickhouse-server.log
- ./log/clickhouse-server.err.log:/var/log/clickhouse-server/clickhouse-server.err.log
+
    +
  • 第二步
  • +
+
1
docker-compose up
+ +

使用clickhouse-client 连接

+
1
docker run -it --rm --link clickhouse-server:clickhouse-server --net clickhouse_default yandex/clickhouse-client --host clickhouse-server --user seluser --password 8d969eef6ecad3c29a3a629280e686cf0c3f5d5a86aff3ca12020c923adc6c92
+ +

相关阅读

+ +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/2019/12/23/mysql-error-sqlstate/index.html b/2019/12/23/mysql-error-sqlstate/index.html new file mode 100644 index 0000000000..2d65b3aad9 --- /dev/null +++ b/2019/12/23/mysql-error-sqlstate/index.html @@ -0,0 +1,458 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + mysql_error_sqlstate | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ mysql_error_sqlstate +

+ + +
+ + + + +
+ + +

当我们使用jdbc 或者pdo或者其他mysql的驱动的时候,经常看到错误会有两个错误码
举个例子

+
1
Error number: 1005; Symbol: ER_CANT_CREATE_TABLE; SQLSTATE: HY000
+

1005HY000
或者是

+
1
SQLSTATE[23000]: Integrity constraint violation: 1062 Duplicate entry '34' for key 'PRIMARY',
+

230001062

+

那么两者的关系是怎么样的呢?

+
+
+

Error code: This value is numeric. It is MySQL-specific and is not portable to other database systems.

+
+
+
+

SQLSTATE value: This value is a five-character string (for example, ‘42S02’). SQLSTATE values are taken from ANSI SQL and ODBC and are more standardized than the numeric error codes.

+
+

不管怎么样,你会看到两个错误一个是SQLSTATE,一个是 errorcode,两者区别就是SQLSTATE更加标准或者通用一些,而errorcode则是mysql自己的

+

相关阅读

+ +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git "a/2020/01/07/php-\345\217\215\345\260\204/index.html" "b/2020/01/07/php-\345\217\215\345\260\204/index.html" new file mode 100644 index 0000000000..9e1e32e9eb --- /dev/null +++ "b/2020/01/07/php-\345\217\215\345\260\204/index.html" @@ -0,0 +1,441 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + php 反射 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ php 反射 +

+ + +
+ + + + +
+ + +

反射是一个很特别的api,php的反射是一个很特别的回调

+
1
2
3
4
5
6
7
8
9
10
(gdb) bt
#0 zim_reflection_class_hasProperty (execute_data=0x7ffff3a14220, return_value=0x7ffff3a14180) at /home/ubuntu/php-src-php-7.4.1/ext/reflection/php_reflection.c:4186
#1 0x0000555555af49b2 in ZEND_DO_FCALL_SPEC_RETVAL_USED_HANDLER () at /home/ubuntu/php-src-php-7.4.1/Zend/zend_vm_execute.h:1729
#2 0x0000555555b58295 in execute_ex (ex=0x7ffff3a14020) at /home/ubuntu/php-src-php-7.4.1/Zend/zend_vm_execute.h:53588
#3 0x0000555555b5c32d in zend_execute (op_array=0x7ffff3a61c00, return_value=0x0) at /home/ubuntu/php-src-php-7.4.1/Zend/zend_vm_execute.h:57664
#4 0x0000555555a80b27 in zend_execute_scripts (type=8, retval=0x0, file_count=3) at /home/ubuntu/php-src-php-7.4.1/Zend/zend.c:1663
#5 0x00005555559e2bad in php_execute_script (primary_file=0x7fffffffd0e0) at /home/ubuntu/php-src-php-7.4.1/main/main.c:2619
#6 0x0000555555b5ee34 in do_cli (argc=2, argv=0x55555678ab30) at /home/ubuntu/php-src-php-7.4.1/sapi/cli/php_cli.c:961
#7 0x0000555555b5ff9e in main (argc=2, argv=0x55555678ab30) at /home/ubuntu/php-src-php-7.4.1/sapi/cli/php_cli.c:1352

+ +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git "a/2020/02/20/\347\274\226\350\257\221\345\216\237\347\220\206/index.html" "b/2020/02/20/\347\274\226\350\257\221\345\216\237\347\220\206/index.html" new file mode 100644 index 0000000000..3fe2be3d68 --- /dev/null +++ "b/2020/02/20/\347\274\226\350\257\221\345\216\237\347\220\206/index.html" @@ -0,0 +1,454 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 编译原理 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ 编译原理 +

+ + +
+ + + + +
+ + +

什么是编译?

一个从一种状态集转换为另外一个状态集的过程

+

什么是优化?

什么是类型,类型就是集合的约束

一个类型就是一个集合

+

什么是隐式转换

就是一个集合被编译器材自动从一个集合映射到另外一个集合

+

因为不同类型的运算是未定义的(也可能是不闭合的,但是更多是未定义的 )

+

举个例子sql的谓词有些是二值的有些是三值的,导致语义会很难每个人都清楚

+

同构

什么是同构? 这是我最想弄明白的东西,真的很奇妙

+

语法和语义(syntax and semantic)

自然语言的语法

如果学过英语,那么i eat apple就是一个主谓宾结构,我对自然语言的语法的理解就是满足某些结构的结构(好吧可能是错误的结论)

+

数理逻辑的语法

数理逻辑也有相类似的语法

+

编程语言的语法

编程语言也是特定的token组合就是一个语法结构;
举个例子:

+
1
a = 1 ;   // 由三个token组成  token<a> token<=> token <1> , 由parse规约而成
+ +

语义

操作语义(operate semantic)

描述这个语法对应的操作

+

表达式(Expressions)

如果是c++的表达式,就是一个序列,这个序列有返回值
举个例子:

+
1
The result of the expression always has type void [1]
+

返回值或者求值结果是void

+

相关阅读

+ +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/2020/03/04/induction/index.html b/2020/03/04/induction/index.html new file mode 100644 index 0000000000..3d4cecf146 --- /dev/null +++ b/2020/03/04/induction/index.html @@ -0,0 +1,561 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + induction | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ induction +

+ + +
+ + + + +
+ + +

前言

归纳法是一个很特别的推理方式。使用自然数的映射。(我的理解可能不太对)

+

Mathematical induction

数学归纳法

+
    +
  • P(0) is true
  • +
  • If P(m) is true then so is P(m + 1) for any natural number m.
    (P(O) & (Vm E w. P(m) =} P(m + 1)) =} Vn E w. P(n).
  • +
+

well define

Q 的非空子集有最小值

+

induction 举例

BNF 是一个典型的递归定义集合(induction define set) 。 归纳法有个很特别的东西,就是用一个很短的表达式描述一个有限集合

+

Recursive definitions of sets

举例:
自然数集合:
P(0) = 0 ;
P(N+1) = P(N)+1

+

相关阅读

+ +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/2020/03/10/math/index.html b/2020/03/10/math/index.html new file mode 100644 index 0000000000..5b2408fefc --- /dev/null +++ b/2020/03/10/math/index.html @@ -0,0 +1,449 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + math | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ math +

+ + +
+ + + + +
+ + +

其中一种是基于锁
我之前一直对acid理解有问题,锁和事务的关系,其实是这样的:

+

1 read(x) 和write(x)是不可以交换顺序的
2 write(x) write(x) 是不可以交换顺序的
3 write(x) 和read(x) 也是不可以交换顺序的

+

我们的事务 t1和t2 如果完全按照先执行t1再执行t2 就一点问题都没有,就是有点慢,并发低。

+

那么我们就用一些等价的方法,尽量减少阻塞。我们不锁住整个事务,只对冲突的部分进行锁定,其他就因为等价所以顺序没有关系,因为其他部分没有顺序关系,所以不用上锁,所以并发会上去

+
+

类型是什么?
类型描述了一个特别的集合

+

结构体是什么?

+

结构图本质是类型的组合,也就是关系

+

举个例子

+
1
2
3
4
struct{
int a,
int b
}
+

这个本质是 RXR 的关系 ,那一个结构体的变量又是什么? 是这个关系的一个元素

+

递归是什么?
递归是差分方程,递归是不动点,但是递归的内容还得看

+

什么是可扩展性?

+

BNF 或者类似的规则系统为什么是正确的? 靠什么保证?是依赖范畴学或者其他数学的什么定理或者和数学的什么模型一致?
我一直很好奇规则系统的约束怎么做到的?因为规则系统真的很神奇

+ +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git "a/2020/03/15/\350\247\204\345\210\231\347\263\273\347\273\237/index.html" "b/2020/03/15/\350\247\204\345\210\231\347\263\273\347\273\237/index.html" new file mode 100644 index 0000000000..11bb1ed1b5 --- /dev/null +++ "b/2020/03/15/\350\247\204\345\210\231\347\263\273\347\273\237/index.html" @@ -0,0 +1,444 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 规则系统 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ 规则系统 +

+ + +
+ + + + +
+ + +

我一直对所谓的可扩展性什么的很有疑惑,或者说我们要怎么设计一个规则系统,怎么知道这个规则的集合的边界在哪里

+

第一个例子: 流水线

+

流水线上每个节点都是一个回调,我们可以随意添加或者删除

+

有向无环图
等价于原始递归函数

+

这个规则系统的路径则需要输入来确定,所以和语言是等价的

+

所以一个规则系统等价于一个语言,所以我们可以使用一些内容来等价和变换

+ + +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/2020/03/16/ssa-optimistic/index.html b/2020/03/16/ssa-optimistic/index.html new file mode 100644 index 0000000000..978e5af619 --- /dev/null +++ b/2020/03/16/ssa-optimistic/index.html @@ -0,0 +1,457 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + ssa optimistic | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ ssa optimistic +

+ + +
+ + + + +
+ + +

优化的本质是什么呢?

比如ssa,是减少死代码,通过常量传播和常量折叠减少运行时的计算

+

比如sql的逻辑优化: 就是一个逻辑下推 通过变换减少读io

+

编译的一般步骤:

lex : 词法分析
parse: 语法分析构造语法树
cfg优化
codegen

+

在golang 和php都有ssa 优化,ssa 优化是通过控制流图来做常量传递 常量折叠 和 死代码去除

+

php的ssa 优化在opcache中,而golang的也在类似的包里面

+

structure induction

+

CFG

construct cfg

ssa

what is ssa

+

A program is defined to be in SSA form if each variable is a target of exactly one assignment
statement in the program text.

+
+

如果程序里面每个变量只被赋值一次那么这个程序就具有ssa 形式

+

def-use chain and use-def chain

+

Under SSA form, each variable is defined once. Def-use chains?are data structures
that provide, for the single definition of a variable, the set of all its uses.
In turn, a use-def chain?, which under SSA consists of a single name, uniquely
specifies the definition that reaches the use.

+
+

def-use chain 就是输入是 定义(赋值) , 输出是使用被使用的变量的集合

+

use-def chain 刚好相反 输入是使用的变量 而 输出是他的定义(赋值)的集合, 对于ssa 的程序来说, 每个变量只被赋值(定义)一次,所以这个use-def这个数据结构在ssa形式下这个集合只有一个元素
ssa 形式下

+

ssa properties

ssa 有什么性质 ?

+

DG

JG

insert φ-function

construct ssa

destruct ssa

相关阅读

+ +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/2020/04/08/a-language-to-machine-code/index.html b/2020/04/08/a-language-to-machine-code/index.html new file mode 100644 index 0000000000..9f92efd099 --- /dev/null +++ b/2020/04/08/a-language-to-machine-code/index.html @@ -0,0 +1,445 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + a language to machine code | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ a language to machine code +

+ + +
+ + + + +
+ + +

计算机语言是什么?

+

我感觉是一个数学系统

+

编译成机器码是什么?

+

是绑定了动作

+

// lex parse 类型系统 ssa asm elf abi

+
1
2
3
keyword   :
int bool
for while if
+ +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/2020/04/10/golang-stack/index.html b/2020/04/10/golang-stack/index.html new file mode 100644 index 0000000000..9dd4580d70 --- /dev/null +++ b/2020/04/10/golang-stack/index.html @@ -0,0 +1,446 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + golang compile | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ golang compile +

+ + +
+ + + + +
+ + +

golang 的 lex和parse 在src\cmd\compile\internal\gc\main.go开始

+

核心步骤

+
    +
  • parseFiles lex and parse
  • +
  • typecheck 语法树的遍历做类型检查
  • +
  • ssa
  • +
+

之后会经过link 连接和加载器ld

+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
func Main(archInit func(*Arch)) {
lines := parseFiles(flag.Args()) // lex and parse
...
typecheckok = true

// Process top-level declarations in phases.

// Phase 1: const, type, and names and types of funcs.
// This will gather all the information about types
// and methods but doesn't depend on any of it.
//
// We also defer type alias declarations until phase 2
// to avoid cycles like #18640.
// TODO(gri) Remove this again once we have a fix for #25838.

// Don't use range--typecheck can add closures to xtop.
timings.Start("fe", "typecheck", "top1")
for i := 0; i < len(xtop); i++ {
n := xtop[i]
if op := n.Op; op != ODCL && op != OAS && op != OAS2 && (op != ODCLTYPE || !n.Left.Name.Param.Alias) {
xtop[i] = typecheck(n, ctxStmt)
}
}

// Phase 2: Variable assignments.
// To check interface assignments, depends on phase 1.

// Don't use range--typecheck can add closures to xtop.
timings.Start("fe", "typecheck", "top2")
for i := 0; i < len(xtop); i++ {
n := xtop[i]
if op := n.Op; op == ODCL || op == OAS || op == OAS2 || op == ODCLTYPE && n.Left.Name.Param.Alias {
xtop[i] = typecheck(n, ctxStmt)
}
}

// Phase 3: Type check function bodies.
// Don't use range--typecheck can add closures to xtop.
timings.Start("fe", "typecheck", "func")
var fcount int64
for i := 0; i < len(xtop); i++ {
n := xtop[i]
if op := n.Op; op == ODCLFUNC || op == OCLOSURE {
Curfn = n
decldepth = 1
saveerrors()
typecheckslice(Curfn.Nbody.Slice(), ctxStmt)
checkreturn(Curfn)
if nerrors != 0 {
Curfn.Nbody.Set(nil) // type errors; do not compile
}
// Now that we've checked whether n terminates,
// we can eliminate some obviously dead code.
deadcode(Curfn)
fcount++
}
}
// With all types checked, it's now safe to verify map keys. One single
// check past phase 9 isn't sufficient, as we may exit with other errors
// before then, thus skipping map key errors.
}
+ + +
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
(gdb) bt
#0 cmd/go/internal/load.LoadImport (path=..., srcDir=..., parent=0xc000147200, stk=0xc0001db698, importPos=..., mode=1, ~r6=<optimized out>) at /home/dinosaur/newgo/go/src/cmd/go/internal/load/pkg.go:530
#1 0x000000000079fa1b in cmd/go/internal/load.(*Package).load (p=0xc000147200, stk=0xc0001db698, bp=0xc0001b8a80, err=...) at /home/dinosaur/newgo/go/src/cmd/go/internal/load/pkg.go:1707
#2 0x0000000000799827 in cmd/go/internal/load.loadImport (pre=0x0, path=..., srcDir=..., parent=0xc000146d80, stk=0xc0001db698, importPos=..., mode=1, ~r7=<optimized out>)
at /home/dinosaur/newgo/go/src/cmd/go/internal/load/pkg.go:578
#3 0x000000000079890a in cmd/go/internal/load.LoadImport (path=..., srcDir=..., parent=0xc000146d80, stk=0xc0001db698, importPos=..., mode=1, ~r6=<optimized out>) at /home/dinosaur/newgo/go/src/cmd/go/internal/load/pkg.go:531
#4 0x000000000079fa1b in cmd/go/internal/load.(*Package).load (p=0xc000146d80, stk=0xc0001db698, bp=0xc0001b8700, err=...) at /home/dinosaur/newgo/go/src/cmd/go/internal/load/pkg.go:1707
#5 0x0000000000799827 in cmd/go/internal/load.loadImport (pre=0x0, path=..., srcDir=..., parent=0xc000146900, stk=0xc0001db698, importPos=..., mode=1, ~r7=<optimized out>)
at /home/dinosaur/newgo/go/src/cmd/go/internal/load/pkg.go:578
#6 0x000000000079890a in cmd/go/internal/load.LoadImport (path=..., srcDir=..., parent=0xc000146900, stk=0xc0001cf698, importPos=..., mode=1, ~r6=<optimized out>) at /home/dinosaur/newgo/go/src/cmd/go/internal/load/pkg.go:531
#7 0x000000000079fa1b in cmd/go/internal/load.(*Package).load (p=0xc000146900, stk=0xc0001db698, bp=0xc0001b8380, err=...) at /home/dinosaur/newgo/go/src/cmd/go/internal/load/pkg.go:1707
#8 0x00000000007a4c66 in cmd/go/internal/load.GoFilesPackage (gofiles=..., ~r1=<optimized out>) at /home/dinosaur/newgo/go/src/cmd/go/internal/load/pkg.go:2230
#9 0x00000000007a3c54 in cmd/go/internal/load.PackagesAndErrors (patterns=..., ~r1=...) at /home/dinosaur/newgo/go/src/cmd/go/internal/load/pkg.go:2056
#10 0x00000000007a417d in cmd/go/internal/load.PackagesForBuild (args=..., ~r1=...) at /home/dinosaur/newgo/go/src/cmd/go/internal/load/pkg.go:2123
#11 0x0000000000842528 in cmd/go/internal/work.runBuild (cmd=<optimized out>, args=...) at /home/dinosaur/newgo/go/src/cmd/go/internal/work/build.go:348
#12 0x0000000000932219 in main.main () at /home/dinosaur/newgo/go/src/cmd/go/main.go:189

+
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/2020/04/15/coding/index.html b/2020/04/15/coding/index.html new file mode 100644 index 0000000000..f7b3eaa99a --- /dev/null +++ b/2020/04/15/coding/index.html @@ -0,0 +1,448 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + coding | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ coding +

+ + +
+ + + + +
+ + +

我很蠢,什么都不会,代码也不会写

+

我只是喜欢一个我不存在的东西

+

我觉得写工作的代码很恶心

+

但是我喜欢修bug

+

最近最开心的是给php-src提了两个pr并且通过了 ,但是我还是不会写php,我也不会c,我也背不过php的array系列的函数.

+

说到底,我还是太弱了

+

我其实不会写代码,但是会修bug.

+

因为修bug是体力活,写代码是脑力活

+

我果然很笨,什么都不会 只会写一堆bug

+

准备看看能不能修mysql的代码

+

没有银弹 No Silver Bullet

+

我真的会写代码吗? 我其实什么都不懂吧

+

我真的什么都不懂,有点怀疑自己这几年是不是只会copy and plaste 了

+ +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git "a/2020/04/27/\345\255\227\347\254\246\344\270\262\345\210\260\344\273\243\347\240\201/index.html" "b/2020/04/27/\345\255\227\347\254\246\344\270\262\345\210\260\344\273\243\347\240\201/index.html" new file mode 100644 index 0000000000..9f950c8dd9 --- /dev/null +++ "b/2020/04/27/\345\255\227\347\254\246\344\270\262\345\210\260\344\273\243\347\240\201/index.html" @@ -0,0 +1,441 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 字符串和代码和编译 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ 字符串和代码和编译 +

+ + +
+ + + + +
+ + +

代码是一堆字符串

+

代码会映射操作 然后这个就是一个编译的过程

+

所以编译就是一个映射的过程

+

很多编程语言其实都很像的,比如php和sql ,php的函数会放到一个全局的function_table 的hashmap里面,然后可以被调用.
key是函数名,value是op_array

+

而mysql的内部函数也很类似,注册到一个hashmap里面:key是函数名,value则是相应的指针.

+

而类的加载方面,java和php也差不多.java的加载class其实就是反序列化的过程,然后放到内存,而php的opcache从某种程度上也是那种效果了

+ +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git "a/2020/05/06/\344\270\200\344\270\252sql\347\232\204\347\273\204\346\210\220/index.html" "b/2020/05/06/\344\270\200\344\270\252sql\347\232\204\347\273\204\346\210\220/index.html" new file mode 100644 index 0000000000..c60b9613ad --- /dev/null +++ "b/2020/05/06/\344\270\200\344\270\252sql\347\232\204\347\273\204\346\210\220/index.html" @@ -0,0 +1,442 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 一个sql的组成 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ 一个sql的组成 +

+ + +
+ + + + +
+ + +

前言

写了很多mysql的sql,看了很多blog,兜兜转转,发现很多都是不太严谨的.
很多解释也是有点盲人摸象的感觉,不能说是错但是有些片面.

+

BNF和indeductly define set

1
select  name from  table where table.file =1;
+

三值

+
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/2020/05/08/php-opcode-to-handler/index.html b/2020/05/08/php-opcode-to-handler/index.html new file mode 100644 index 0000000000..950b616999 --- /dev/null +++ b/2020/05/08/php-opcode-to-handler/index.html @@ -0,0 +1,444 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + php opcode to handler | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ php opcode to handler +

+ + +
+ + + + +
+ + +

php 的opcode 对应很多的handler

+

选哪个handler 是怎么选择的呢?

+

和tcp协议一个连接 是一个五元组一样

+

php的opcode 的handler 是一个三元组
分别是 opcode , op1 , op2

+

核心函数

+
1
2
3
4
5
6
7
8
ZEND_API void ZEND_FASTCALL zend_vm_set_opcode_handler(zend_op* op)
{
zend_uchar opcode = zend_user_opcodes[op->opcode];

...
// zend_opcode_handlers 是什么呢? 是一堆函数指针的数组 ,每个opcode + op1+ op2 决定一个 函数指针
op->handler = zend_opcode_handlers[zend_vm_get_opcode_handler_idx(zend_spec_handlers[opcode], op)];
}
+

所以核心的核心就是

+
1
zend_vm_get_opcode_handler_idx(zend_spec_handlers[opcode], op)
+

他做了什么呢?
其实就是算出zend_opcode_handlers这个函数指针数组的偏移值

+ +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/2020/05/11/raft/index.html b/2020/05/11/raft/index.html new file mode 100644 index 0000000000..20f3413215 --- /dev/null +++ b/2020/05/11/raft/index.html @@ -0,0 +1,455 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + raft | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ raft +

+ + +
+ + + + +
+ + +

性质

推导

状态机(state machine)

状态机是如何定义呢?

+

https://zh.wikipedia.org/zh-hans/%E6%9C%89%E9%99%90%E7%8A%B6%E6%80%81%E6%9C%BA

+

确认(ACK)

假设A要向B发送消息,我们假设
下面这个表达式:

+
1
(number: int , message : string)   => ( acknumber: int )
+

含义是这样的:

+
    +
  • 输入: 一个number + 这个number 对应的消息 ,
  • +
  • 返回: 如果B收到就把A的number原样发送回来
  • +
+

raft 选举

raft的选举貌似和basic paxos过程差不多,那么basic paxosvalue是raft的选举的master节点的id

+

任期term如何生成

选举的正确性(如何保证一个任期(term)内只有一个一个值,或者一个都没有)

前提:
1一个accept一个任期只能投票一次
2 一个选举者能获取多数票
需要证明:
当且仅当获取多数票最多只有一个:

+

相关阅读

+ +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git "a/2020/05/18/\350\214\203\347\225\264\345\222\214\347\261\263\347\224\260\345\274\225\347\220\206/index.html" "b/2020/05/18/\350\214\203\347\225\264\345\222\214\347\261\263\347\224\260\345\274\225\347\220\206/index.html" new file mode 100644 index 0000000000..4e4fbd823d --- /dev/null +++ "b/2020/05/18/\350\214\203\347\225\264\345\222\214\347\261\263\347\224\260\345\274\225\347\220\206/index.html" @@ -0,0 +1,450 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 范畴和米田引理 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ 范畴和米田引理 +

+ + +
+ + + + +
+ + +

https://segmentfault.com/a/1190000018331788

+

对于计算机来说,没有类型,没有函数,也没有oom,什么都没有

+

那么我们的程序是怎么来的呢?

+

映射和等价

+

https://bartoszmilewski.com/2015/10/28/yoneda-embedding/

+

我一直觉得sql的各种下推优化可以用米田引理或者范畴学来描述

+

最近在看一些范畴学的内容,我一直觉得我的逻辑很差,经常写bug,是不是我的逻辑太差呢?

+

我考虑的边界有问题?

+

是什么边界问题呢? 是我问题没有描述清楚还是什么呢?

+

还是需求提供者提的需求有问题呢?

+

范畴学真的很神奇

+

或者说,映射真的很神奇

+

如果我们要比较两个东西,那么我们先把他们映射成一个可比较的集合里面,然后他们就能比较了.

+

泛型是什么?

+

我一直想知道

+ +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/2020/05/27/tired/index.html b/2020/05/27/tired/index.html new file mode 100644 index 0000000000..a46c60a136 --- /dev/null +++ b/2020/05/27/tired/index.html @@ -0,0 +1,438 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + tired | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ tired +

+ + +
+ + + + +
+ + +

我是理解能力有问题吗?

+

我真的不懂那些需求文档写的是什么,文档写的是什么.

+

文档应该是傻逼都能通过文档get到的才叫文档,
难道我比傻逼还傻?

+ +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/2020/05/28/functor/index.html b/2020/05/28/functor/index.html new file mode 100644 index 0000000000..8333902404 --- /dev/null +++ b/2020/05/28/functor/index.html @@ -0,0 +1,433 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + functor | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ functor +

+ + +
+ + + + +
+ + + +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/2020/06/01/functor-1/index.html b/2020/06/01/functor-1/index.html new file mode 100644 index 0000000000..ef3db30be4 --- /dev/null +++ b/2020/06/01/functor-1/index.html @@ -0,0 +1,455 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + functor | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ functor +

+ + +
+ + + + +
+ + +

什么是范畴(category)?

+

范畴就是两个对象+一个映射关系组成的东西

+

什么是函子(functor)?

+

函子就是两个范畴加上这两个范畴的映射关系而组成

+

也就是说函子就是范畴+两个范畴的映射

+

hom函子是什么(hom functor)?
// todo

+

Set

1
2
3
4
Set is the category with
objects: all sets.
arrows: given sets X, Y , every (total) set-function f : X -> Y is an
arrow.
+ +

type and function

类型是范畴里面的对象
函数是其中的箭头, 这样是对的吗?
好像不太对,那就应该是错的,但是他们隐隐中有关系,但是不像是是范畴.

+

相关阅读

+ +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git "a/2020/06/01/\344\276\235\350\265\226\345\222\214\345\206\262\347\252\201/index.html" "b/2020/06/01/\344\276\235\350\265\226\345\222\214\345\206\262\347\252\201/index.html" new file mode 100644 index 0000000000..5ad9139c25 --- /dev/null +++ "b/2020/06/01/\344\276\235\350\265\226\345\222\214\345\206\262\347\252\201/index.html" @@ -0,0 +1,438 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 依赖和冲突 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ 依赖和冲突 +

+ + +
+ + + + +
+ + +

纸带

state(状态)

我看到有关状态的书籍是在可计算理论相关的书籍,或者类型系统相关的书籍.

+

展开

f(f(A,B),f(D,E))
f(A,B,D,E);
循环依赖
f(f)

+ +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git "a/2020/06/11/\345\217\257\346\211\251\345\261\225\346\200\247/index.html" "b/2020/06/11/\345\217\257\346\211\251\345\261\225\346\200\247/index.html" new file mode 100644 index 0000000000..7dffdfd883 --- /dev/null +++ "b/2020/06/11/\345\217\257\346\211\251\345\261\225\346\200\247/index.html" @@ -0,0 +1,440 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 可扩展性 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ 可扩展性 +

+ + +
+ + + + +
+ + +

什么是可扩展性?

+

我们先看有类型的情况

+
1
2
3
func(int x, int y){
return x+y
}
+

这个时候输入的是两个整数返回的是一个整数 x->y->z

+

可扩展性需要用类型系统或者形式化去描述

+ +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/2020/06/12/curry/index.html b/2020/06/12/curry/index.html new file mode 100644 index 0000000000..4da916e82b --- /dev/null +++ b/2020/06/12/curry/index.html @@ -0,0 +1,451 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + curry | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ curry +

+ + +
+ + + + +
+ + +

haskell 有个高阶函数curry

+

我们可以通过:t identify来确定标识符的类型

+

我们使用haskellcli看看

+
1
2
Prelude> :t curry
curry :: ((a, b) -> c) -> a -> b -> c
+ +

这是什么意思呢?
我还没用懂

+
1
2
Lectures on Curry-Howard isomorphism[Sørensen & Urzyczyn 2006]Derivation and computation[Simmons 2000]Proofs and types[Girard, Lafont, Taylor 1996]

+ + + + +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git "a/2020/06/12/js-define\345\207\275\346\225\260/index.html" "b/2020/06/12/js-define\345\207\275\346\225\260/index.html" new file mode 100644 index 0000000000..9835a2f3a0 --- /dev/null +++ "b/2020/06/12/js-define\345\207\275\346\225\260/index.html" @@ -0,0 +1,445 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + js define函数 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + + + + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git "a/2020/06/18/\345\217\214\345\220\221\347\273\221\345\256\232/index.html" "b/2020/06/18/\345\217\214\345\220\221\347\273\221\345\256\232/index.html" new file mode 100644 index 0000000000..2e5d08ccae --- /dev/null +++ "b/2020/06/18/\345\217\214\345\220\221\347\273\221\345\256\232/index.html" @@ -0,0 +1,438 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 双向绑定 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ 双向绑定 +

+ + +
+ + + + +
+ + +

双向绑定是什么?
这个问题我一直很疑惑,直到我了解了同构和双射

+

所以双向绑定的本质就是视图和数据同构?

+

从某种角度上来说,这也是一个米田引理的应用?

+ +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/2020/06/29/basic-paxos/index.html b/2020/06/29/basic-paxos/index.html new file mode 100644 index 0000000000..a0e1ac98c9 --- /dev/null +++ b/2020/06/29/basic-paxos/index.html @@ -0,0 +1,459 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + basic paxos | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ basic paxos +

+ + +
+ + + + +
+ + +

basic paxos

目的:
basic paxos 目的是为了让多个副本最多只有一个值.

+

paxos make simple

有一篇论文,描述了basic paxos 的证明和推导过程,描述了prepare 过程的原理

+

p1 每个acceptor必须有接收第一个它收到的proposal

+

p2 当一个proposal的value 被chosen , 那么所有后续proposal 的值等于value

+

p2a 当一个proposal的value 被chosen , 那么后续所有acceptor接收的值等于value

+

p2b 当一个proposal的value被chosen , 那么后续所有proposer issue 的proposal number 对应的值等于value

+

p2c 对应数字n 和值v , 当acceptor 有一个最大集合S ,这个集合满足其中一个条件: 1 没有accetor 一个大于n的值 2 issue 的v 等于这个最大集合S中proposal number 最大的值

+

到p2c 的时候就是prepare的规则和条件了

+

每个guarantee的原因

P1: 为了保证只有一个proposal也能chose a value
P2: 为了保证多个被chosen的proposal 都有同样的值(We can allow multiple proposals to be chosen, but we must guarantee
that all chosen proposals have the same value)
P2a: 为了满足p2 , 我们给出当被chosen的时候,所有acceptor都具有被chosen的value
P2b:为了满足p2a,我们给出,当被chosen的时候,所有issue的值都有被chseon的value
P2c:

+
    +
  • 大前提
    1
    2
    3
    assume that some proposal with number m and value
    v is chosen and show that any proposal issued with number n > m also
    has value v
  • +
  • 小前提
    1
    2
    assumption that every proposal issued with a number in m . . (n − 1) has
    value v , where i . . j denotes the set of numbers from i through j
  • +
+

相关阅读:

+ + +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/2020/07/07/c-auto-cast/index.html b/2020/07/07/c-auto-cast/index.html new file mode 100644 index 0000000000..f9980eb07d --- /dev/null +++ b/2020/07/07/c-auto-cast/index.html @@ -0,0 +1,437 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + c auto cast | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + + + + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git "a/2020/07/10/group-concat\347\234\213mysql\345\207\275\346\225\260/index.html" "b/2020/07/10/group-concat\347\234\213mysql\345\207\275\346\225\260/index.html" new file mode 100644 index 0000000000..1e9f7baf30 --- /dev/null +++ "b/2020/07/10/group-concat\347\234\213mysql\345\207\275\346\225\260/index.html" @@ -0,0 +1,433 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + group_concat看mysql函数 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ group_concat看mysql函数 +

+ + +
+ + + + +
+ + + +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git "a/2020/07/13/\345\273\266\350\277\237\346\261\202\345\200\274/index.html" "b/2020/07/13/\345\273\266\350\277\237\346\261\202\345\200\274/index.html" new file mode 100644 index 0000000000..6f1047973b --- /dev/null +++ "b/2020/07/13/\345\273\266\350\277\237\346\261\202\345\200\274/index.html" @@ -0,0 +1,433 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 延迟求值 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ 延迟求值 +

+ + +
+ + + + +
+ + + +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/2020/07/21/sql-join/index.html b/2020/07/21/sql-join/index.html new file mode 100644 index 0000000000..73395e2bd9 --- /dev/null +++ b/2020/07/21/sql-join/index.html @@ -0,0 +1,462 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + sql join | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ sql join +

+ + +
+ + + + +
+ + +

什么是语言 ?

inductively defined sets

inductively defined sets 是由三部分组成

+
    +
  • 1 一个初始集合
  • +
  • 2 一个生成规则
  • +
  • 3 声明除了这个1 2两个条件之外没有其他的元素属于这个集合
  • +
+

例子

自然数集合

+
1
{0 , 1 , 2 ...}
+

这个集合
首先一个元素

+
1
{0}
+

然后是规则

+
1
suc(i) 
+

left join 和right join的区别?

+

我是大学学通信工程的,有些数学概念还是不太全,只能偶尔补一下啦.
我一直找left joinright join的定义或者rfc文件.
就像c语言看c89 c99一样,你看sql也可以看sql 99, 这个文档有描述什么是left join

+
1
2
3
4
5
6
7
8
9
10
11
12
Let XN1 and XN2 be effective distinct names for X1 and X2, respectively. Let TN be an effective
name for T.
Case:
a) If INNER or <cross join> is specified, then let S be the multiset of rows of T.
b) If LEFT is specified, then let S be the multiset of rows resulting from:
SELECT FROM T
UNION ALL
SELECT FROM X1
c) If RIGHT is specified, then let S be the multiset of rows resulting from:
SELECT FROM T
UNION ALL
SELECT FROM X2
+ +

相关阅读

+ +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git "a/2020/08/11/\346\234\200\345\244\247\347\206\265/index.html" "b/2020/08/11/\346\234\200\345\244\247\347\206\265/index.html" new file mode 100644 index 0000000000..a422a170e4 --- /dev/null +++ "b/2020/08/11/\346\234\200\345\244\247\347\206\265/index.html" @@ -0,0 +1,443 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 最大熵 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + + + + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git "a/2020/09/01/\346\210\221\347\232\204es\344\271\213\350\267\257/index.html" "b/2020/09/01/\346\210\221\347\232\204es\344\271\213\350\267\257/index.html" new file mode 100644 index 0000000000..c764598538 --- /dev/null +++ "b/2020/09/01/\346\210\221\347\232\204es\344\271\213\350\267\257/index.html" @@ -0,0 +1,455 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 我的es之旅 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ 我的es之旅 +

+ + +
+ + + + +
+ + +

分词

什么是分词,分词是一个分类问题,一般是基于权重判断是否是需要切分.机器是识别不了文字的,所以只是一个权重的切分

+

分词会发生在两个步骤: 写入doc , 查询query

+

在lucene的堆栈一般是这样的,最后调用的是incrementToken 接口

+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
incrementToken:147, StandardTokenizer (org.apache.lucene.analysis.standard)
incrementToken:37, LowerCaseFilter (org.apache.lucene.analysis)
incrementToken:51, FilteringTokenFilter (org.apache.lucene.analysis)
fillCache:91, CachingTokenFilter (org.apache.lucene.analysis)
incrementToken:70, CachingTokenFilter (org.apache.lucene.analysis)
createFieldQuery:318, QueryBuilder (org.apache.lucene.util)
createFieldQuery:257, QueryBuilder (org.apache.lucene.util)
newFieldQuery:468, QueryParserBase (org.apache.lucene.queryparser.classic)
getFieldQuery:457, QueryParserBase (org.apache.lucene.queryparser.classic)
handleBareTokenQuery:824, QueryParserBase (org.apache.lucene.queryparser.classic)
Term:494, QueryParser (org.apache.lucene.queryparser.classic)
Clause:366, QueryParser (org.apache.lucene.queryparser.classic)
Query:251, QueryParser (org.apache.lucene.queryparser.classic)
TopLevelQuery:223, QueryParser (org.apache.lucene.queryparser.classic)
parse:136, QueryParserBase (org.apache.lucene.queryparser.classic)
+ +

搜索

搜索的原理是 倒排+权重 ,然后取出权重最高的前几个,所以也可以看成是一个权重分类问题.

+

高可用

// todo

+

冗余

// todo

+

错误转移

// todo

+

lucence

lucence 的源码有简单的例子,主要分成三个部分

+
1
2
3
4
5
6
7
- 1 索引
- 1.1分词
- 2 存储
- Lucene有很多类,不过我抽象成存储应该不过分,这个我没有仔细看

- 3 搜索
- 3.1 计算权重(一般是idf-td)
+

我还没用仔细看es的内容,不过根据我编译原理的理解,es就是在上面加一层parse然后转换成相应的操作

+

相关阅读

+ +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/2020/09/18/crf/index.html b/2020/09/18/crf/index.html new file mode 100644 index 0000000000..4244519393 --- /dev/null +++ b/2020/09/18/crf/index.html @@ -0,0 +1,592 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + crf | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ crf +

+ + +
+ + + + +
+ + +

一点也不懂nlp的我

+

吉布斯分布

+

Hammersley-Clifford_Theorem

+

损失函数

+

crf

+

计算参数的方式

crf 的参数非常多,怎么求呢?
通过最大似然估计

+

最大似然函数的本质是什么?

+

本质就是: 已知统计的分布 , 那么我们假定我们统计到的数据是最大可能出现的.那么这个最大的概率对应的参数就是我们想要的参数

+

拟牛顿法

+

标注方式

+

函数

1
2
Y = { B , M , E , S}
X = {今,天,天,气,真, 热}
+ +

liner crf 参数求解

+

如何训练

    +
  • 标注
    我们要怎么标注呢?
    比如我有两个人民日报的句子
  • +
+
1
2
3
4
5
// 句子1 
全总/j 致/v 全国/n 各族/r 职工/n 慰问信/n
// 句子2
勉励/v 广大/b 职工/n 发挥/v 工人阶级/n 主力军/n 作用/n ,/w 为/p 企业/n 改革/vn 发展/vn 建功立业/l

+ +

怎么标注呢

+

下面是例子

+
1
2
3
4
// 句子1
全/B 总/E 致/S 全/B 国/E 各/B 族/E 职/B 工/E 慰/B 问/M 信/E
// 句子2
勉/B 励/E 广/B 大E 职/B 工/E 发/B 挥/E 工/B 人/M 阶/M 级/E 主/B 力/M 军/E 作/B 用/E ,/S 为/S 企/B 业/E 改/B 革/E 发/B 展/E 建/B 功/M 立/M 业/E
+ +

参数估计

+ +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/2020/09/25/invariants/index.html b/2020/09/25/invariants/index.html new file mode 100644 index 0000000000..cff9cb92c4 --- /dev/null +++ b/2020/09/25/invariants/index.html @@ -0,0 +1,441 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + invariants | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + + + + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git "a/2020/09/27/\345\276\252\347\216\257\344\270\215\345\217\230\345\274\217loop-invariants/index.html" "b/2020/09/27/\345\276\252\347\216\257\344\270\215\345\217\230\345\274\217loop-invariants/index.html" new file mode 100644 index 0000000000..bb8d49d579 --- /dev/null +++ "b/2020/09/27/\345\276\252\347\216\257\344\270\215\345\217\230\345\274\217loop-invariants/index.html" @@ -0,0 +1,445 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 循环不变式loop invariants | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ 循环不变式loop invariants +

+ + +
+ + + + +
+ + +

控制流图

入口和出口

1
2
3
4
入口 --->   判断 ---> 出口
| |
| |
|____|
+

对于每个判断,有两个入口: 第一次入口 , 后续的入口
对于每个判断,有两个出口:exit跳出循环, 继续循环

+

1 第一次入口满足断言
2 每次判断继续循环满足断言

+

我们可以得出结论:
出口必然满足断言

+
1
2
3
- 判断不改变断言
- 每个入口都满足断言
可以推出exit满足断言
+ +

如何形式化证明

如果证明,或者如何抽象出这个证明或者我们找到一个同构的问题

+

循环不变式核心是 满足约束:
1 初始化条件满足断言
2 每次迭代后满足断言
3 循环是可计算的(不是死循环)

+

其实3只是为了不会死循环,核心条件是1和2

+ +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git "a/2020/10/19/\346\204\237\347\237\245\346\234\272/index.html" "b/2020/10/19/\346\204\237\347\237\245\346\234\272/index.html" new file mode 100644 index 0000000000..63de50d8bf --- /dev/null +++ "b/2020/10/19/\346\204\237\347\237\245\346\234\272/index.html" @@ -0,0 +1,441 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 感知机 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ 感知机 +

+ + +
+ + + + +
+ + +

感知机是一个输出是+1 和-1两个值的分类器

+

多层感知机
输入: 上一层的输出
处理: 感知函数
输出: 经过感知函数处理的值

+ +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/2020/10/21/parser/index.html b/2020/10/21/parser/index.html new file mode 100644 index 0000000000..3f1a56f728 --- /dev/null +++ b/2020/10/21/parser/index.html @@ -0,0 +1,440 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + parser | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ parser +

+ + +
+ + + + +
+ + +

最大生成树

初始化:
放入一个node

+

循环不变量:
是整颗最大生成树的子集

+

将最大权的放入节点,更新最大权

+

为什么work

如果放入的该节点不是最大的生成树的子节点,那么加起来权是最小的

+

mlp

训练

+
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/2020/10/28/todolist/index.html b/2020/10/28/todolist/index.html new file mode 100644 index 0000000000..36c0b10b6e --- /dev/null +++ b/2020/10/28/todolist/index.html @@ -0,0 +1,438 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + todolist | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ todolist +

+ + +
+ + + + +
+ + +

1 编译原理ssa
2 tensorflow
3 vue原理
4 nlp parser
5 es的search

+
+

2020-11-17
我感觉好像什么东西都离不开编译原理,数理逻辑,是我的错觉吗?

+ +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/2020/11/12/mvcc/index.html b/2020/11/12/mvcc/index.html new file mode 100644 index 0000000000..eb4d662e33 --- /dev/null +++ b/2020/11/12/mvcc/index.html @@ -0,0 +1,567 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + mvcc | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ mvcc +

+ + +
+ + + + +
+ + +

mvcc论文
http://www.cs.cmu.edu/~pavlo/papers/p781-wu.pdf

+

zhihu相关内容:
https://zhuanlan.zhihu.com/p/45734268

+

mvcc是什么?

mvcc是多版本并发控制

+

数学基础:

    +
  • 偏序 (part order ) : 偏序则部分元素可以互相比较
  • +
  • 全序 (full order ) : 全序描述的是每个元素都可以比较
  • +
+

complete mutipversion history

complete mv history 满足下面性质:

+
    +
  • +
  • for each and all operations in , if , then
  • +
+

相关阅读

+ +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git "a/2020/11/12/\346\225\260\347\220\206\351\200\273\350\276\221/index.html" "b/2020/11/12/\346\225\260\347\220\206\351\200\273\350\276\221/index.html" new file mode 100644 index 0000000000..cc45d9ce5d --- /dev/null +++ "b/2020/11/12/\346\225\260\347\220\206\351\200\273\350\276\221/index.html" @@ -0,0 +1,436 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 数理逻辑 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ 数理逻辑 +

+ + +
+ + + + +
+ + +

数理逻辑我是没有学过的,但是感觉很像编译原理的前端

+ +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/2020/11/18/btree/index.html b/2020/11/18/btree/index.html new file mode 100644 index 0000000000..62561bceef --- /dev/null +++ b/2020/11/18/btree/index.html @@ -0,0 +1,456 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + btree | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ btree +

+ + +
+ + + + +
+ + + +

tree

A free tree T is an undirected graph that is connected and acyclic.

+

树有三个属性:

+
    +
  • 无向
  • +
  • 连通
  • +
  • 无环
  • +
+

定义

一共有两个参数: kh

+

性质

1 每个叶子节点的高度都一样
2.1 除了叶子节点根节点,其他节点至少有k+1个子节点
2.2 根节点是叶节点或者根节点至少有两个子节点.
3 每个节点最多有2k+1个子节点

+

核心性质

1
2
v ∈ Parent(p)   有
<a+1
+ + +

插入

插入方式有几种:
1 插入到第一个大于他的节点的同一个页
2 和大于他的最小节点同一个页

+

删除

查询

+
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git "a/2020/11/27/\347\272\246\346\235\237\345\222\214\347\273\223\346\236\204/index.html" "b/2020/11/27/\347\272\246\346\235\237\345\222\214\347\273\223\346\236\204/index.html" new file mode 100644 index 0000000000..717331e9c9 --- /dev/null +++ "b/2020/11/27/\347\272\246\346\235\237\345\222\214\347\273\223\346\236\204/index.html" @@ -0,0 +1,441 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 约束和结构 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ 约束和结构 +

+ + +
+ + + + +
+ + +

结构和性质是同构的吗?

+

约束和结构是同构的吗?

+

我一直觉得,我们之所以很难维护好业务代码,是因为我们的约束和我们的业务不是同构的,我们一个又一个函数在传递,大部分时间都工作得很好,但是总有一些误差经过传递之后会被触发.

+

为什么代码庞大之后很难改?
因为经过传递之后依赖太多值了.对于一个函数来说(a , b) -> (c) , 一开始只依赖a和b

+

过了几天,我们改了a的依赖,a依赖于d,e 然后就这样了(d,e) -> (a)

+

这样一直迭代后,依赖就没人能理清了

+ +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git "a/2020/12/07/\346\263\233\345\236\213/index.html" "b/2020/12/07/\346\263\233\345\236\213/index.html" new file mode 100644 index 0000000000..3308c3e7c2 --- /dev/null +++ "b/2020/12/07/\346\263\233\345\236\213/index.html" @@ -0,0 +1,448 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 泛型 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ 泛型 +

+ + +
+ + + + +
+ + +

对于没有泛型的情况
比如

+
1
2
3
max(a : int , b:int){
xxx
}
+

入参是a , b ,这两个参数的类型的约束是 int ,
也就是这个函数的约束是 max(int , int)
泛型的语义就是:

+
1
2
3
max(a :<T> , b:<T>){

}
+

这个约束是什么?

+

约束变成
max(<T> ,<T>)

+

作用:
我们的约束变更加范化了,这个如果按照编译原理或者解空间来说,我们可选的映射更多了.

+

泛型的作用:和类的继承差不多,因为继承既是优点也是缺点.

+

泛型这个约束也是既有优点也有缺点,泛型的优点是更加接近无类型类型,所以缺点也是大家会滥用无类型的内容

+

就像继承一样,其实很多继承是没有必要的,或者重构的继承非常难.

+

说到底,如果我们写了一个通用的轮子,如果和多地方用到了这个轮子,那么如果这个轮子经常更改,就需要考虑用到这个轮子的相关代码是不是要兼容

+

如果这个轮子被共用的地方少,那么就不用兼容那么多

+

所以我们抽象就比如面临改动频繁的缺点

+ +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/2020/12/17/tcp/index.html b/2020/12/17/tcp/index.html new file mode 100644 index 0000000000..a022900f43 --- /dev/null +++ b/2020/12/17/tcp/index.html @@ -0,0 +1,439 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + tcp与消息队列与paxos与顺序 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ tcp与消息队列与paxos与顺序 +

+ + +
+ + + + +
+ + +

我们如何保证消息的可靠性?

+

前提:
每块消息都是分割成一小块

+

如何保证不丢消息?

+

每块消息映射一个id , 我们只要保证每个id都有就能保证我们的消息必然是全的(没有丢失的 , 因为id是全序的 , id映射的内容本身已是不会丢失的)

+ +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/2020/12/21/pushdown/index.html b/2020/12/21/pushdown/index.html new file mode 100644 index 0000000000..4801ed7967 --- /dev/null +++ b/2020/12/21/pushdown/index.html @@ -0,0 +1,443 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + pushdown | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + + + + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git "a/2020/12/25/\345\275\242\345\274\217\345\214\226\350\257\255\344\271\211/index.html" "b/2020/12/25/\345\275\242\345\274\217\345\214\226\350\257\255\344\271\211/index.html" new file mode 100644 index 0000000000..a644b056dc --- /dev/null +++ "b/2020/12/25/\345\275\242\345\274\217\345\214\226\350\257\255\344\271\211/index.html" @@ -0,0 +1,444 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 形式化语义和类型系统 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ 形式化语义和类型系统 +

+ + +
+ + + + +
+ + +

rule

judugment

语法

语法是一堆token组成的结构

+

语义

语义是某个语法结构映射的内容

+

比如操作语义: 某个语法映射相关操作

+

指称语义: 语法映射某个映射关系

+

相关阅读

+ +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/2021/01/06/pdf-format/index.html b/2021/01/06/pdf-format/index.html new file mode 100644 index 0000000000..01f4353929 --- /dev/null +++ b/2021/01/06/pdf-format/index.html @@ -0,0 +1,438 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + pdf format | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + + + + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/2021/01/06/three-value-prediate/index.html b/2021/01/06/three-value-prediate/index.html new file mode 100644 index 0000000000..e65fd66e48 --- /dev/null +++ b/2021/01/06/three-value-prediate/index.html @@ -0,0 +1,450 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + three value prediate and sql | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ three value prediate and sql +

+ + +
+ + + + +
+ + +

三值逻辑

三值的谓词:
谓词求值后有三个元素{T,F,U} ,也就是true , false , 和unknow

+

求值和谓词

1
SELECT 1=NULL
+

在mysql , 这个会返回一行,这行是值是null

+

① 然后根据规范 where casehaving 子句都只取三值逻辑真值中的true

+
1
2
SELECT 1 NOT IN (1,  NULL)    ## false 所以不会被条件筛选出来
SELECT 1 NOT IN ( NULL) ## null , 因为上面的规则①所以也不会被筛选出来
+

所以在where子句中使用not innot int 中包含null的时候会筛选不出来

+

exist 规范规定exist是个二值函数,所以要映射成true或者false , mysql中则是只要返回一行真值就是true

+ + +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/2021/01/13/RSA/index.html b/2021/01/13/RSA/index.html new file mode 100644 index 0000000000..3c27e8e43f --- /dev/null +++ b/2021/01/13/RSA/index.html @@ -0,0 +1,563 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + RSA | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ RSA +

+ + +
+ + + + +
+ + +

前提相关定义

质数的集合

+

rsa 的证明

M是需要任意整数 , 需要满足

我们要证明

其中

+

欧拉函数

定义是小于或等于n的正整数中与n互质的数的数目.
其中,如果x是质数,则欧拉函数的求值结果为x-1

+

下面我们来证明公式的右边部分 , 也就是:

+
    +
  • 当M可以被n整除的时候,也就是满足

    成立
  • +
+

相关阅读

+ +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git "a/2021/01/28/mysql\347\232\204select/index.html" "b/2021/01/28/mysql\347\232\204select/index.html" new file mode 100644 index 0000000000..1c6ca5f864 --- /dev/null +++ "b/2021/01/28/mysql\347\232\204select/index.html" @@ -0,0 +1,451 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + mysql的select | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ mysql的select +

+ + +
+ + + + +
+ + +

为什么想看select的代码

有一个场景,遇到一个表只有十多万,但是表大小有几十g,为什么呢?因为有个字段是longtext.
放了很多很长的文本.发现select * from table limit 1000就已经读不出来了

+

一个简单的select语句

1
select id from test wher  id < 100 ;
+ +

这个流程究竟发生了什么?

+

第一步其实和php差不多,先是编译原理的前端几步lex和parse
第二步就是逻辑优化和物理优化, 其实可以想成是常用的编程语言的常量折叠或者数据流图的分析,就是编译时的优化
第三步也就是语义动作了,也就是真正的执行过程也可以想做是运行时:
但是条件对于读来说是不可见的,条件是作用于索引上 , 然后返回所有行 , 再根据列筛选出来,然后再join和排序,对于这个sql来说,他唯一作用就是通过索引读出内容,内存和硬盘对于他来说是不存在的

+

首先是读表,这个表是从ibd文件来的.所以终究是需要调用系统调用读文件,那么linux用系统调用是pread

+

索引在哪保存?
保存在表空间里面
内容保存到哪里?
保存到表空间里面
关联是在哪里发生?
发生在从索引从表空间读出来
关联是整整一行关联吗?
是的.
二级索引怎么读的?
通过二级索引读一级索引,一级索引读内容.

+

所以实际上是有一个语义上的层是:
sql -> 语义动作 作用于索引 -> 索引访问表空间(有点像交换空间或者物理内存和虚拟内存的关系一样一样, 需要的时候才从硬盘硬盘加载)

+

下面是相关代码

1
2
3
4
5
6
7
8
9
10
11
12
(gdb) bt
#0 srv_start (create_new_db=create_new_db@entry=false) at /home/ubuntu/mysql-8.0.23/storage/innobase/srv/srv0start.cc:1857
#1 0x00005555577275b6 in innobase_init_files (tablespaces=0x7fffea1f1380, dict_init_mode=DICT_INIT_CHECK_FILES) at /home/ubuntu/mysql-8.0.23/storage/innobase/handler/ha_innodb.cc:5042
#2 innobase_ddse_dict_init (dict_init_mode=DICT_INIT_CHECK_FILES, version=<optimized out>, tables=0x7fffea1f1360, tablespaces=0x7fffea1f1380)
at /home/ubuntu/mysql-8.0.23/storage/innobase/handler/ha_innodb.cc:12323
#3 0x00005555573d2aef in dd::bootstrap::DDSE_dict_init (thd=thd@entry=0x55555b899410, dict_init_mode=dict_init_mode@entry=DICT_INIT_CHECK_FILES, version=80023)
at /home/ubuntu/mysql-8.0.23/sql/dd/impl/bootstrap/bootstrapper.cc:737
#4 0x00005555575f92e4 in dd::upgrade_57::do_pre_checks_and_initialize_dd (thd=0x55555b899410) at /home/ubuntu/mysql-8.0.23/sql/dd/upgrade_57/upgrade.cc:911
#5 0x0000555556697ec5 in bootstrap::handle_bootstrap (arg=arg@entry=0x7fffffffda10) at /home/ubuntu/mysql-8.0.23/sql/bootstrap.cc:323
#6 0x0000555557b934a1 in pfs_spawn_thread (arg=0x55555b834c80) at /home/ubuntu/mysql-8.0.23/storage/perfschema/pfs.cc:2900
#7 0x00007ffff7bbb6db in start_thread (arg=0x7fffea1f2700) at pthread_create.c:463
#8 0x00007ffff61b571f in clone () at ../sysdeps/unix/sysv/linux/x86_64/clone.S:95
+ +
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
(gdb) info threads 
Id Target Id Frame
1 Thread 0x7ffff7fe7880 (LWP 17988) "mysqld" 0x00007ffff7bbcd2d in __GI___pthread_timedjoin_ex (threadid=140737121298176, thread_return=thread_return@entry=0x0, abstime=abstime@entry=0x0,
block=block@entry=true) at pthread_join_common.c:89
* 2 Thread 0x7fffea1f2700 (LWP 18000) "mysqld" __libc_pread64 (fd=4, buf=0x7fffe81d0000, count=65536, offset=0) at ../sysdeps/unix/sysv/linux/pread64.c:29
4 Thread 0x7fffe8f49700 (LWP 18269) "mysqld" 0x00007ffff7bc1ad3 in futex_wait_cancelable (private=<optimized out>, expected=0, futex_word=0x7fffe42eda10)
at ../sysdeps/unix/sysv/linux/futex-internal.h:88
5 Thread 0x7fffe3b32700 (LWP 18270) "mysqld" 0x00007ffff7bc1ad3 in futex_wait_cancelable (private=<optimized out>, expected=0, futex_word=0x7fffe42edab0)
at ../sysdeps/unix/sysv/linux/futex-internal.h:88
6 Thread 0x7fffe3331700 (LWP 18271) "mysqld" 0x00007ffff7bc1ad3 in futex_wait_cancelable (private=<optimized out>, expected=0, futex_word=0x7fffe42edb50)
at ../sysdeps/unix/sysv/linux/futex-internal.h:88
7 Thread 0x7fffe2b30700 (LWP 18272) "mysqld" 0x00007ffff7bc1ad3 in futex_wait_cancelable (private=<optimized out>, expected=0, futex_word=0x7fffe42edbf0)
at ../sysdeps/unix/sysv/linux/futex-internal.h:88
8 Thread 0x7fffe232f700 (LWP 18273) "mysqld" 0x00007ffff7bc1ad3 in futex_wait_cancelable (private=<optimized out>, expected=0, futex_word=0x7fffe42edc90)
at ../sysdeps/unix/sysv/linux/futex-internal.h:88
9 Thread 0x7fffe1b2e700 (LWP 18274) "mysqld" 0x00007ffff7bc1ad3 in futex_wait_cancelable (private=<optimized out>, expected=0, futex_word=0x7fffe42edd30)
at ../sysdeps/unix/sysv/linux/futex-internal.h:88
10 Thread 0x7fffe132d700 (LWP 18275) "mysqld" 0x00007ffff7bc1ad3 in futex_wait_cancelable (private=<optimized out>, expected=0, futex_word=0x7fffe42eddd0)
at ../sysdeps/unix/sysv/linux/futex-internal.h:88
11 Thread 0x7fffe0b2c700 (LWP 18276) "mysqld" 0x00007ffff7bc1ad3 in futex_wait_cancelable (private=<optimized out>, expected=0, futex_word=0x7fffe42ede70)
at ../sysdeps/unix/sysv/linux/futex-internal.h:88
12 Thread 0x7fffd3d5f700 (LWP 18277) "mysqld" 0x00007ffff7bc1ad3 in futex_wait_cancelable (private=<optimized out>, expected=0, futex_word=0x7fffe42edf10)
at ../sysdeps/unix/sysv/linux/futex-internal.h:88
13 Thread 0x7fffd355e700 (LWP 18278) "mysqld" 0x00007ffff7bc1ad3 in futex_wait_cancelable (private=<optimized out>, expected=0, futex_word=0x7fffe42edfb0)
at ../sysdeps/unix/sysv/linux/futex-internal.h:88
14 Thread 0x7fffd2d5d700 (LWP 18279) "mysqld" 0x00007ffff6ace280 in operator new(unsigned long) () from /usr/lib/x86_64-linux-gnu/libstdc++.so.6

+ +

相关阅读

+ +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/2021/02/18/mvcc-translate/index.html b/2021/02/18/mvcc-translate/index.html new file mode 100644 index 0000000000..64c753daa1 --- /dev/null +++ b/2021/02/18/mvcc-translate/index.html @@ -0,0 +1,463 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + mvcc translate | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ mvcc translate +

+ + +
+ + + + +
+ + +

5.1 INTRODUCTION
对于多版本并发控制算法来说,对于每个数据实例x写操作都会生成一个X的新的副本(或者叫做版本).
数据管理器因此会保存一个包含数据管理器赋值过给X的所有版本的列表.
对于每个读操作Read(x),调度器不仅把读操作发送给数据管理器,还会告诉数据管理器他想要读x的哪一个版本.
In a multiversion concurrency control algorithm, each Write on a data item x
produces a new copy (or version) of X. The DM that manages x therefore
keeps a list of versions of X, which is the history of values that the DM has
assigned to X. For each Read(x), the scheduler not only decides when to send
the Read to the DM, but it also tells the DM which one of the versions of x to
read.
多版本并发控制的优点在于帮助调度器避免拒绝太晚的操作(也就是说晚一点的操作不会被拒绝).举个例子,(单版本的情况下)调度器会拒绝读他本该读但是被覆盖的数据.
在多版本的情况下,旧的值不会被覆盖,因此可以延迟去读.调度器可以通过读旧版本的值来避免(单版本下)对于读操作的拒绝.
保持多版本并对于并发控制并不会花费太多成本,因为对于故障恢复算法来说也是需要版本的信息.
The benefit of multiple versions for concurrency control is to help the
scheduler avoid rejecting operations that arrive too late. For example, the
scheduler normally rejects a Read because the value it was supposed to read
has already been overwritten. With multiversions, such old values are never
overwritten and are therefore always available to tardy Reads. The scheduler
can avoid rejecting the Read simply by having the Read read an old version.’
Maintaining multiple versions may not add much to the cost of concurrency
control, because the versions may be needed anyway by the recovery
algorithm.
As we’ll see in the next chapter, many recovery algorithms have to
maintain some before image information, at least of those data items that have
been updated by active transactions; the recovery algorithm needs those before
images in case any of the active transactions abort. The before images of a data
item are exactly its list of old versions. It is a small step for the DM to make
those versions explicitly available to the scheduler.
一个很明显的花费是保存多个版本需要很多存储空间,为了控制这些存储的现在,版本内容必须要定期周期性地清理或者归档.
因为某些特定的版本还会被活跃的事务(也就是没有提交或者没有回滚回滚的事务),清理版本信息需要同时兼顾活跃的事务.
清理动作也是mvcc的另外一个花费.
An obvious cost of maintaining multiple versions is storage space. To
control this storage requirement, versions must periodically be purged or archived. Since certain versions may be needed by active transactions, purging
versions must be synchronized with respect to active transactions. This purging
activity is another cost of multiversion concurrency control.

+

我们假设当事务被抛弃,那些那么这个事务创建的版本也会被销毁.在我们后续的讨论中,词版本描述的是已提交事务或者活跃事务的数据对应的值.因此,当调度器决定分配x的特定版本给操作Read(x),返回的值不会包含被抛弃的事务.
如果版本读产生于活跃的事务,可恢复性(也就是回滚)要求读操作对应的事务的提交必须晚于被读的活跃事务的提交.

+

We assume that if a transaction is aborted, any versions it created are
destroyed. In our subsequent discussion, the term “version” will refer to the
value of a data item produced by a transaction that’s either active or committed.
Thus, when the scheduler decides to assign a particular version of x to
Read(x), the value returned is not one produced by an aborted transaction. If
the version read is one produced by an active transaction, recoverability
requires that the reader’s commitment be delayed until the transaction that
produced the version has committed.
如果被读的事务最后被抛弃了(这个事务对应的版本也无效了),该活跃的事务也需要因此而被抛弃.
If that transaction actually aborts
(thereby invalidating its version), the reader must also be aborted.
当前存在的多版本内容仅仅对调度器和数据管理器可见,对于用户使用事务是不可见的.
The existence of multiple versions is only visible to the scheduler and DM,
not to user transactions.
事务只会持有该数据比如x和y.除了数据库处理系统,用户本身看上去只有一个版本,即,在用户的角度看是最后一个会写入
Transactions still reference data items, such as x and
y. Users therefore expect the DBS to behave as if there were only one version of
each data item, namely, the last one that was written from that user’s perspective.
调度器会通过使用多版本来减少拒绝的操作,从而提升性能.
The scheduler may use multiple versions to improve performance by
rejecting operations less frequently.
不过最后的结果会和单版本的结果看上去一样.
But it must not change the system’s
functionality over a single version view of the database.
There are many applications of databases in which users do want to
explicitly access each of the multiple versions of a data item. For example, a
user may wish to maintain several versions of a design database: the last design
released for manufacturing, the last design checked for correctness, and the
most recent working design. The user may update each version of the design
independently. Since the existence of these multiple versions is not transparent
to the user, such applications are not appropriate for the multiversion concurrency
control algorithms described in this chapter.

+

Analyzing Correctness
分析mvcc的正确性
为了分析mvcc算法的正确性,我们需要扩展可序列化理论.我们需要扩展两类历史:运行在多版本数据库的多版本历史,在用户看来是单版本的单版本历史.用户会把序列化但版本历史(因为我们把事务看成只有一个版本的,我们所有的目标就是多版本执行的内容和但版本看到的一样,相类似的例子就是编译器经过ssr优化后很多顺序都变了但是看上去都一样)
To analyze the correctness of multiversion concurrency control algorithms, we
need to extend serializability theory. This extension requires two types of
histories: multiversion (MV) histories that represent the DM’s execution of
operations on a multiversion database, and single version (IV) histories that
represent the interpretation of MV histories in the users’ single version view of
the database. Serial 1V histories are the histories that the user regards as
correct.
不过实际上系统是多版本的(只是看上去和单版本的一样).所以为了证明这个并发控制算法是正确的,我们必须证明多版本的历史的约束和单版本的是等价的.那么多版本历史单版本历史等价是什么意思?(也就是多版本历史和单版本历史等价的动作语义是什么)
But the system actually produces MV histories. So, to prove that a
concurrency control aIgorithm is correct, we must prove that each of the MV
histories that it can produce is equivalent to a serial 1V history,
What does it mean for an MV history to be equivalent to a 1V history?
我们通过拓展单版本历史之间的等价来描述多版本历史单版本历史等价.为了做这个扩展,我们需要引入一些符号.
Let’s try to answer this by extending the definition of equivalence of 1V histories
that we used in Chapters 2-4. To attempt this extension, we need a little
notation.
对于每个数据实例x,我们用xi,xj…来表示x的版本,下标是写这个版本的事务的编号(也就是xi就是表示事务i写了一个版本x),
因此对于多版本历史,永远都是这个下标Wi[Xi],版本的下标和和事务的下标一样.多版本历史的读操作则没有那么特殊,举个例子ri[xj].
For each data item X, we denote the versions of x by xi, xj, . . . , where
the subscript is the index of the transaction that wrote the version. Thus, each
Write in an MV history is always of the form Wi[Xi], where the version
subscript equals the transaction subscript. Reads are denoted in the usual way,
such as ri[xj].
假如我们说多版本历史单版本历史等价的(equivalence)的定义是:如果多版本的每个操作的重复和单版本的冲突都一样.
考虑多版本历史

+
1
H1 = w0[x0]c0w1[x1]c1r2[x0]w2[y2]c2
+

H1这个历史里面,只有w0[x0]r2[x0]是冲突的,写操作w1[x1]w0[x0]以及r2[x0]不冲突,因为他们操作的是不同版本的数据,即x1.现在我们来考虑单版本历史:

+
1
H2 = w0[x]c0w1[x]c1r2[x]w2[y]c2 
+

我们通过去掉操作的版本的下标(也就是去掉版本号)来构造历史H2,比如x1映射成x,x0也映射成x,y2映射成y.这种情况下(H2的单版本历史),w0[x]r2[x0]是冲突的.按照定义(如果他们冲突一样)则他们等价,但是这其实是不合理的(虽然他们都冲突).在历史H2,T2T1x.但是在历史H2,T1读的是T0(也就是说冲突是一样但是读的数据来源是不一样的).因为这两个历史(H1和H2)读的来源不一致,所以他们最后写的操作也不一致
Suppose we adopt a definition of equivalence that says an MV history
HM” is equivalent to a 1V history HIV if every pair of conflicting operations in
Hbp, is in the same order in HIV. Consider the MV history
H, = wobol co WEA cl rz[xol w,[yzl cz.
The only two operations in H, that conflict are w,[x,] and r,[x,]. The operation
w,[x,] does not conflict with either w,[x,] or r,[x,], because it operates on a
different
We constructed H, by mapping each operation on versions x0, x,, and yz in H,
into the same operation on the corresponding data items x and y. Notice that
the two operations in H, that conflict, w,[x,] and r,[x,], are in the same order
in H, as in H,. So, according to the definition of equivalence just given, H, is
equivalent to H,. But this is not reasonable. In H,, T, reads x from T,, whereas
in H,, T, reads x from T,,.’ Since T2 reads a different value of x in H, and H,, it
may write a different value in y.
我们的对于(单版本和多版本之间)的冲突之所以有一点问题,是因为多版本历史单版本历史操作的是不同的对象:
一个是对版本的操作,一个是对数据的操作(可以类比一个是”打11岁的你”和”打你”是不同语义的).他们的操作是有不同的冲突属性.
举个例子,多版本情况下w1[x1]r2[x0]不冲突,(相对应的版本历史.怎么对应?当然是把下标去掉)单版本情况下w1[x]r2[x]是冲突的.
因此,如果仅仅通过冲突来定义他们是等价的,这是不精确的.
This definition of equivalence based on conflicts runs into trouble because
MVand 1V histories have slightly different operations - version operations
versus data item operations. These operations have different conflict properties.
For example, w,[x,] does not conflict with yz[xo], but their corresponding
1V operations w,[x] and TJX] do conflict. Therefore, a definition of equivalence
based on conflicts is inappropriate.
因此,为了解决这个问题(读的来源不一样的问题),我们需要回到2.6节的视图等价.
回想一下,如果两个历史的读的源都一样而且最后写也一样则他们视图等价.
比较历史H1H2.在H1(多版本历史)中,T2从T1读x,但是对于H2(单版本历史),T2从T0读x,因此H1和H2视图不等价
To solve this problem, we need to return to first principles by adopting the
more fundamental definition of view equivalence developed in Section 2.6.
Recall that two histories are view equivalent if they have the same reads-from
relationships and the same final writes. Comparing histories H, and H,, we see
that T, reads x from T, in H,, but T, reads x from T, in H,. Thus, H, is not
view equivalent to H2.
然后我们获得满足条件等价(单版本和多版本之间的等价,定义是视图等价,那么两者是等价的)的定义.
我们需要通过一个对单版本历史与多版本历史之间等价的方式.
其中一个方式是SG(H)是无环的,所以历史H是等价于序列化多版本历史,但是仅仅是这样是没有太大帮助.因为(无环)序列化历史不是等价于序列化单版本历史.
举个例子:
H3 = w0[x0]w0[y0]c0R// todo
如果我们把版本的内容作为单独的数据实例,就会构造一下的序列化图,虽然这个序列化图是无环的,H3还是和单版本的不等价,因为他们映射的读来源不一样

+

Now that we have a satisfactory definition of equivalence, we need a way
of showing that every MV history H produced by a given multiversion concurrency
control algorithm is equivalent to a serial 1V history. One way would be
to show that SG( H) is acyclic, so H is equivalent to a serial MV history, Unfortunately,
this doesn’t help much, because not every serial MV history is equivalent
to a serial 1V history. For example, consider the serial MV history
仅仅是多版本历史的子集,也就是l-serial MV histories才会等价于序列化单版本历史,如果所有的read-from关系,要么读自己的事务,要么读最新的事务,这样就说l-serial MV histories,
这样的serial MV histories是可以于单版本的历史等价
Only a subset of serial MV histories, called l-serial MV histories, are
equivalent to serial 1V histories. Intuitively, a serial MV history is I-serial if
for each reads-from relationship, say T, reads x from T,, T, is the last trdnsaction
preceding T, that writes any version of x. Notice that Ii, is not l-serial
because TL reads x from T,), not T,, which is the last transaction preceding T2
that writes x.

+

mv2pl > 1sr > 5.3 Let H be an MV history over ?: C(H) is equivalent to a
serial, 1V history over T iff H is 1SR.

+
+
+

我整理的mvcc原理

1SR 定义

An MV history is one-copy serializable (or 1SR) if its committed projection is equivalent to a l-serial MV history.

+

1-series MV history:

每个事务读先于他的事务里面写版本最大的
1 多版本事务Hi 和事务Hj,如果Hi从事务Hj读x,那么满足约束: Hj的x的版本是先于Hi的所有事务里面写x的版本最新的一个

+
    +
  • 一个小问题:一个事务先于另外一个事务,这是个偏序关系,这里有个疑问,怎么定义这个先于
  • +
+

MV History Equivalence:

两个历史的事务中,读的来源都一样则两个多版本事务视图等价

+

mv2pl imply mvsg
mvsg imply 1sr
1sr imply 1-serial

+

相关阅读

+ +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git "a/2021/03/05/\345\246\202\344\275\225\345\206\231\344\270\200\344\270\252\346\255\243\347\241\256\347\232\204\344\273\243\347\240\201/index.html" "b/2021/03/05/\345\246\202\344\275\225\345\206\231\344\270\200\344\270\252\346\255\243\347\241\256\347\232\204\344\273\243\347\240\201/index.html" new file mode 100644 index 0000000000..7163049b10 --- /dev/null +++ "b/2021/03/05/\345\246\202\344\275\225\345\206\231\344\270\200\344\270\252\346\255\243\347\241\256\347\232\204\344\273\243\347\240\201/index.html" @@ -0,0 +1,438 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 如何写一个正确的代码 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ 如何写一个正确的代码 +

+ + +
+ + + + +
+ + +

我们如何写一个正确的代码?

+

答案是形式化验证,其中一个是霍尔逻辑

+

举个例子

+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
快速幂
x> 0
x/2/2 ... 必定会归到1 或者0, 所以递归会有限次

快速幂
a^x = floor(a^(x/2))^2 *a^(x%1);
fun(a,x) ={
if(x== 0){
return 1;
}elseif(x== 1){
return a;
}
return fun(a , floor(x/2) )*a^(x%1);
}
+
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/2021/03/10/docker-compose-spec/index.html b/2021/03/10/docker-compose-spec/index.html new file mode 100644 index 0000000000..336145622e --- /dev/null +++ b/2021/03/10/docker-compose-spec/index.html @@ -0,0 +1,458 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + docker-compose spec | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ docker-compose spec +

+ + +
+ + + + +
+ + +

docker-compose 的spec

+
1
2
3
4
5
6
7
TOP :
version
service
network
volumes
config
secrets
+ +

service :
build
deploy

+
+
+

Each service MAY also include a Build section, which defines how to create the Docker image for the service. Compose implementations MAY support building docker images using this service definition. If not implemented the Build section SHOULD be ignored and the Compose file MUST still be considered valid.

+
+
+
+

Build support is an OPTIONAL aspect of the Compose specification, and is described in detail here

+
+
+

Each Service defines runtime constraints and requirements to run its containers. The deploy section groups these constraints and allows the platform to adjust the deployment strategy to best match containers’ needs with available resources.

+
+

相关文档

+ +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/2021/03/11/mysqlbinlog/index.html b/2021/03/11/mysqlbinlog/index.html new file mode 100644 index 0000000000..fef3217514 --- /dev/null +++ b/2021/03/11/mysqlbinlog/index.html @@ -0,0 +1,448 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + mysqlbinlog | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ mysqlbinlog +

+ + +
+ + + + +
+ + +

如何读取mysql的binlog?

可以使用mysql默认的mysqlbinlog

+

重要的是需要加v
命令

+
1
mysqlbinlog   -v  mysql-bin.000006
+

你会看到具体的sql了,我这里的sql是

+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
#210311 14:51:10 server id 1  end_log_pos 1606 CRC32 0x783f56ab 	Query	thread_id=64	exec_time=0	error_code=0
SET TIMESTAMP=1615445470/*!*/;
BEGIN
/*!*/;
# at 1606
#210311 14:51:10 server id 1 end_log_pos 1672 CRC32 0x52632df5 Table_map: `user_db`.`tb_user` mapped to number 119
# at 1672
#210311 14:51:10 server id 1 end_log_pos 1783 CRC32 0xf565f574 Update_rows: table id 119 flags: STMT_END_F

BINLOG '
3r1JYBMBAAAAQgAAAIgGAAAAAHcAAAAAAAEAB3VzZXJfZGIAB3RiX3VzZXIABgMPDw8BDwgUAEAA
FAAyADj1LWNS
3r1JYB8BAAAAbwAAAPcGAAAAAHcAAAAAAAEAAgAG///AYAAAAAI5NgYxMjM0NTYFOTk5OTcSD3po
YW5nc2FuQGJ1Zy5jbsBgAAAAATUGMTIzNDU2BTk5OTk3Eg96aGFuZ3NhbkBidWcuY2509WX1
'/*!*/;
### UPDATE `user_db`.`tb_user`
### WHERE
### @1=96
### @2='96'
### @3='123456'
### @4='99997'
### @5=18
### @6='zhangsan@bug.cn'
### SET
### @1=96
### @2='5'
### @3='123456'
### @4='99997'
### @5=18
### @6='zhangsan@bug.cn'
# at 1783
#210311 14:51:10 server id 1 end_log_pos 1814 CRC32 0xa90279ec Xid = 1794
COMMIT/*!*/;
SET @@SESSION.GTID_NEXT= 'AUTOMATIC' /* added by mysqlbinlog */ /*!*/;
DELIMITER ;
# End of log file
/*!50003 SET COMPLETION_TYPE=@OLD_COMPLETION_TYPE*/;
/*!50530 SET @@SESSION.PSEUDO_SLAVE_MODE=0*/;

+ +

协议流程

COM_BINLOG_DUMP -> com_binlog_dump

+

相关阅读

https://www.cnblogs.com/netsa/p/7350629.html
https://dev.mysql.com/doc/internals/en/replication-protocol.html

+ +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git "a/2021/03/15/c-\346\240\207\345\207\206\345\272\223\347\232\204vector/index.html" "b/2021/03/15/c-\346\240\207\345\207\206\345\272\223\347\232\204vector/index.html" new file mode 100644 index 0000000000..09e5929ad2 --- /dev/null +++ "b/2021/03/15/c-\346\240\207\345\207\206\345\272\223\347\232\204vector/index.html" @@ -0,0 +1,444 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + c++ 标准库的vector | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + + + + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git "a/2021/03/15/\345\277\203\350\267\263\345\222\214tcp/index.html" "b/2021/03/15/\345\277\203\350\267\263\345\222\214tcp/index.html" new file mode 100644 index 0000000000..1402ee5927 --- /dev/null +++ "b/2021/03/15/\345\277\203\350\267\263\345\222\214tcp/index.html" @@ -0,0 +1,444 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 心跳和tcp | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + + + + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git "a/2021/03/17/\350\256\272\346\226\207\347\232\204\346\234\211\350\266\243\346\200\247/index.html" "b/2021/03/17/\350\256\272\346\226\207\347\232\204\346\234\211\350\266\243\346\200\247/index.html" new file mode 100644 index 0000000000..30d299361a --- /dev/null +++ "b/2021/03/17/\350\256\272\346\226\207\347\232\204\346\234\211\350\266\243\346\200\247/index.html" @@ -0,0 +1,442 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 论文的有趣性 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ 论文的有趣性 +

+ + +
+ + + + +
+ + +

背景

论文很好玩的,我很希望可以看很多很多的论文.

+

有时候我感觉论文里面的数学真的很漂亮

+ +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git "a/2021/03/22/\344\270\200\346\254\241tcp\351\224\231\350\257\257\346\216\222\346\237\245/index.html" "b/2021/03/22/\344\270\200\346\254\241tcp\351\224\231\350\257\257\346\216\222\346\237\245/index.html" new file mode 100644 index 0000000000..e357e7937e --- /dev/null +++ "b/2021/03/22/\344\270\200\346\254\241tcp\351\224\231\350\257\257\346\216\222\346\237\245/index.html" @@ -0,0 +1,444 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 一次tcp错误排查 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ 一次tcp错误排查 +

+ + +
+ + + + +
+ + +

一个使用php的workman的代码抛出了这样的异常

+
1
fwrite(): send of 157 bytes failed with errno=11 Resource temporarily unavailable
+

socket是使用了noblocking的,结果有一个这个错误,结果发现是已经修复了不抛异常了,但是在我的php5.6的版本还是旧的代码,所以还是会抛这个异常,所以排查了一天,不是bug
最新的修改

+
1
https://github.com/php/php-src/pull/5026/files
+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
	didwrite = send(sock->socket, buf, XP_SOCK_BUF_SIZE(count), (sock->is_blocked && ptimeout) ? MSG_DONTWAIT : 0);

if (didwrite <= 0) {
char *estr;
int err = php_socket_errno();
if (err == EWOULDBLOCK || err == EAGAIN) {
if (sock->is_blocked) {
int retval;

sock->timeout_event = 0;

do {
retval = php_pollfd_for(sock->socket, POLLOUT, ptimeout);

if (retval == 0) {
sock->timeout_event = 1;
break;
}

if (retval > 0) {
/* writable now; retry */
goto retry;
}

err = php_socket_errno();
} while (err == EINTR);
} else {
/* EWOULDBLOCK/EAGAIN is not an error for a non-blocking stream.
* Report zero byte write instead. */
return 0;
}
}

estr = php_socket_strerror(err, NULL, 0);
php_error_docref(NULL, E_NOTICE, "Send of " ZEND_LONG_FMT " bytes failed with errno=%d %s",
(zend_long)count, err, estr);
efree(estr);
}

if (didwrite > 0) {
php_stream_notify_progress_increment(PHP_STREAM_CONTEXT(stream), didwrite, 0);
}

return didwrite;
}
+ +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/2021/03/27/build-grpc/index.html b/2021/03/27/build-grpc/index.html new file mode 100644 index 0000000000..f52cb2c8db --- /dev/null +++ b/2021/03/27/build-grpc/index.html @@ -0,0 +1,443 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + build grpc | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + + + + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git "a/2021/03/27/docker\346\214\201\344\271\205\345\214\226/index.html" "b/2021/03/27/docker\346\214\201\344\271\205\345\214\226/index.html" new file mode 100644 index 0000000000..0245eb4b32 --- /dev/null +++ "b/2021/03/27/docker\346\214\201\344\271\205\345\214\226/index.html" @@ -0,0 +1,443 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + docker持久化 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + + + + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git "a/2021/03/27/mysql-\344\270\273\344\273\216\345\244\215\345\210\266/index.html" "b/2021/03/27/mysql-\344\270\273\344\273\216\345\244\215\345\210\266/index.html" new file mode 100644 index 0000000000..3e7a6b1790 --- /dev/null +++ "b/2021/03/27/mysql-\344\270\273\344\273\216\345\244\215\345\210\266/index.html" @@ -0,0 +1,439 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + mysql 主从复制 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + + + + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/2021/03/30/ipc/index.html b/2021/03/30/ipc/index.html new file mode 100644 index 0000000000..b52935873c --- /dev/null +++ b/2021/03/30/ipc/index.html @@ -0,0 +1,440 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + ipc | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ ipc +

+ + +
+ + + + +
+ + +

管道

+
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git "a/2021/03/31/\350\267\263\350\241\250/index.html" "b/2021/03/31/\350\267\263\350\241\250/index.html" new file mode 100644 index 0000000000..fe1cb30fff --- /dev/null +++ "b/2021/03/31/\350\267\263\350\241\250/index.html" @@ -0,0 +1,440 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 跳表 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + + + + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git "a/2021/04/01/tcp\345\215\217\350\256\256/index.html" "b/2021/04/01/tcp\345\215\217\350\256\256/index.html" new file mode 100644 index 0000000000..024ed5c593 --- /dev/null +++ "b/2021/04/01/tcp\345\215\217\350\256\256/index.html" @@ -0,0 +1,454 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + tcp协议 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ tcp协议 +

+ + +
+ + + + +
+ + +

状态机

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
                             +---------+ ---------\      active OPEN
| CLOSED | \ -----------
+---------+<---------\ \ create TCB
| ^ \ \ snd SYN
passive OPEN | | CLOSE \ \
------------ | | ---------- \ \
create TCB | | delete TCB \ \
V | \ \
+---------+ CLOSE | \
| LISTEN | ---------- | |
+---------+ delete TCB | |
rcv SYN | | SEND | |
----------- | | ------- | V
+---------+ snd SYN,ACK / \ snd SYN +---------+
| |<----------------- ------------------>| |
| SYN | rcv SYN | SYN |
| RCVD |<-----------------------------------------------| SENT |
| | snd ACK | |
| |------------------ -------------------| |
+---------+ rcv ACK of SYN \ / rcv SYN,ACK +---------+
| -------------- | | -----------
| x | | snd ACK
| V V
| CLOSE +---------+
| ------- | ESTAB |
| snd FIN +---------+
| CLOSE | | rcv FIN
V ------- | | -------
+---------+ snd FIN / \ snd ACK +---------+
| FIN |<----------------- ------------------>| CLOSE |
| WAIT-1 |------------------ | WAIT |
+---------+ rcv FIN \ +---------+
| rcv ACK of FIN ------- | CLOSE |
| -------------- snd ACK | ------- |
V x V snd FIN V
+---------+ +---------+ +---------+
|FINWAIT-2| | CLOSING | | LAST-ACK|
+---------+ +---------+ +---------+
| rcv ACK of FIN | rcv ACK of FIN |
| rcv FIN -------------- | Timeout=2MSL -------------- |
| ------- x V ------------ x V
\ snd ACK +---------+delete TCB +---------+
------------------------>|TIME WAIT|------------------>| CLOSED |
+---------+ +---------+
+ + + +

拥塞控制

1 慢开始和拥塞避免
2 快速重传

+

慢开始

慢开始为了什么?

+

快速重传

快速恢复

我的理解

tcp本质是什么?
本质是一个字节流
为什么会有大小端问题?
因为字节流终究是字节流,如果你只要一个字节的的话在不同端的机器一点问题都没有
怎么保证不丢包?
序号+重传,为什么序号可以?
因为序号是和包一一映射的,所以序号和报文是同构的,也就是一一映射的
重传有什么问题吗?
因为序号和包一一对应,也就是幂等的,所以重传没有什么问题

+

为什么需要状态机?
因为状态机是从一个状态到另外一个状态,这样我们更加明确整个流程

+

相关阅读

+ + +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git "a/2021/04/02/canal\351\234\200\350\246\201\346\263\250\346\204\217\347\232\204\347\202\271/index.html" "b/2021/04/02/canal\351\234\200\350\246\201\346\263\250\346\204\217\347\232\204\347\202\271/index.html" new file mode 100644 index 0000000000..e214ec791d --- /dev/null +++ "b/2021/04/02/canal\351\234\200\350\246\201\346\263\250\346\204\217\347\232\204\347\202\271/index.html" @@ -0,0 +1,458 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + canal需要注意的点 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ canal需要注意的点 +

+ + +
+ + + + +
+ + +

比较坑的点:

1 每次同步的内容会每秒持久化到file或者zk ,binlog一般只保留的几天,如果你持久化到文件/zk的配置的binlog文件在mysql已经不存在了会报错,
报错信息大概如下

+
1
java.io.IOException: Received error packet: errno = 1236, sqlstate = HY000 errmsg = Could not find first log file name in binary log index file
+

这个时候只能调整配置或者删除mate.dat 文件,然后重启canal , 这个时候他会使用mysql的语句show status去取最新位点

+

2 重启canal有个非常非常坑的点在于会读information_schema 这个库的内容去读表名和表id等信息 ,而这个往往会很久,不知道是不是测试环境原因,读了挺久的

+

mate刷新的逻辑

根据配置每秒刷新到mate信息
也就是文件或者zk上,所以重启会有重复消费

+

找到位点

加载顺序:
1 从mate中获取位点: getLatestIndexBy 也就是从 memeory/zk或者file的mate信息中读取位点
2 根据配置读取:

+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
protected EntryPosition findStartPositionInternal(ErosaConnection connection) {
MysqlConnection mysqlConnection = (MysqlConnection) connection;
LogPosition logPosition = logPositionManager.getLatestIndexBy(destination);
if (logPosition == null) {// 找不到历史成功记录
EntryPosition entryPosition = null;
if (masterInfo != null && mysqlConnection.getConnector().getAddress().equals(masterInfo.getAddress())) {
entryPosition = masterPosition;
} else if (standbyInfo != null
&& mysqlConnection.getConnector().getAddress().equals(standbyInfo.getAddress())) {
entryPosition = standbyPosition;
}

if (entryPosition == null) {
entryPosition = findEndPositionWithMasterIdAndTimestamp(mysqlConnection); // 默认从当前最后一个位置进行消费
}

// 判断一下是否需要按时间订阅
if (StringUtils.isEmpty(entryPosition.getJournalName())) {
// 如果没有指定binlogName,尝试按照timestamp进行查找
if (entryPosition.getTimestamp() != null && entryPosition.getTimestamp() > 0L) {
logger.warn("prepare to find start position {}:{}:{}",
new Object[] { "", "", entryPosition.getTimestamp() });
return findByStartTimeStamp(mysqlConnection, entryPosition.getTimestamp());
} else {
logger.warn("prepare to find start position just show master status");
return findEndPositionWithMasterIdAndTimestamp(mysqlConnection); // 默认从当前最后一个位置进行消费
}
} else {
if (entryPosition.getPosition() != null && entryPosition.getPosition() > 0L) {
// 如果指定binlogName + offest,直接返回
entryPosition = findPositionWithMasterIdAndTimestamp(mysqlConnection, entryPosition);
logger.warn("prepare to find start position {}:{}:{}",
new Object[] { entryPosition.getJournalName(), entryPosition.getPosition(),
entryPosition.getTimestamp() });
return entryPosition;
} else {
EntryPosition specificLogFilePosition = null;
if (entryPosition.getTimestamp() != null && entryPosition.getTimestamp() > 0L) {
// 如果指定binlogName +
// timestamp,但没有指定对应的offest,尝试根据时间找一下offest
EntryPosition endPosition = findEndPosition(mysqlConnection);
if (endPosition != null) {
logger.warn("prepare to find start position {}:{}:{}",
new Object[] { entryPosition.getJournalName(), "", entryPosition.getTimestamp() });
specificLogFilePosition = findAsPerTimestampInSpecificLogFile(mysqlConnection,
entryPosition.getTimestamp(),
endPosition,
entryPosition.getJournalName(),
true);
}
}

if (specificLogFilePosition == null) {
// position不存在,从文件头开始
entryPosition.setPosition(BINLOG_START_OFFEST);
return entryPosition;
} else {
return specificLogFilePosition;
}
}
}
} else {
if (logPosition.getIdentity().getSourceAddress().equals(mysqlConnection.getConnector().getAddress())) {
if (dumpErrorCountThreshold >= 0 && dumpErrorCount > dumpErrorCountThreshold) {
// binlog定位位点失败,可能有两个原因:
// 1. binlog位点被删除
// 2.vip模式的mysql,发生了主备切换,判断一下serverId是否变化,针对这种模式可以发起一次基于时间戳查找合适的binlog位点
boolean case2 = (standbyInfo == null || standbyInfo.getAddress() == null)
&& logPosition.getPostion().getServerId() != null
&& !logPosition.getPostion().getServerId().equals(findServerId(mysqlConnection));
if (case2) {
EntryPosition findPosition = fallbackFindByStartTimestamp(logPosition, mysqlConnection);
dumpErrorCount = 0;
return findPosition;
}
// 处理 binlog 位点被删除的情况,提供自动重置到当前位点的功能
// 应用场景: 测试环境不稳定,位点经常被删。强烈不建议在正式环境中开启此控制参数,因为binlog 丢失调到最新位点也即意味着数据丢失
if (isAutoResetLatestPosMode()) {
dumpErrorCount = 0;
return findEndPosition(mysqlConnection);
}
Long timestamp = logPosition.getPostion().getTimestamp();
if (isRdsOssMode() && (timestamp != null && timestamp > 0)) {
// 如果binlog位点不存在,并且属于timestamp不为空,可以返回null走到oss binlog处理
return null;
}
} else if (StringUtils.isBlank(logPosition.getPostion().getJournalName())
&& logPosition.getPostion().getPosition() <= 0
&& logPosition.getPostion().getTimestamp() > 0) {
return fallbackFindByStartTimestamp(logPosition,mysqlConnection);
}
// 其余情况
logger.warn("prepare to find start position just last position\n {}",
JsonUtils.marshalToString(logPosition));
return logPosition.getPostion();
} else {
// 针对切换的情况,考虑回退时间
long newStartTimestamp = logPosition.getPostion().getTimestamp() - fallbackIntervalInSeconds * 1000;
logger.warn("prepare to find start position by switch {}:{}:{}", new Object[] { "", "",
logPosition.getPostion().getTimestamp() });
return findByStartTimeStamp(mysqlConnection, newStartTimestamp);
}
}
+ + +

事件类型

事件有很多类型,我现在只对update和insert 感兴趣

+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
public enum EventType
implements com.google.protobuf.ProtocolMessageEnum {
/**
* <code>INSERT = 1;</code>
*/
INSERT(0, 1),
/**
* <code>UPDATE = 2;</code>
*/
UPDATE(1, 2),
/**
* <code>DELETE = 3;</code>
*/
DELETE(2, 3),
/**
* <code>CREATE = 4;</code>
*/
CREATE(3, 4),
/**
* <code>ALTER = 5;</code>
*/
ALTER(4, 5),
/**
* <code>ERASE = 6;</code>
*/
ERASE(5, 6),
/**
* <code>QUERY = 7;</code>
*/
QUERY(6, 7),
/**
* <code>TRUNCATE = 8;</code>
*/
TRUNCATE(7, 8),
/**
* <code>RENAME = 9;</code>
*/
RENAME(8, 9),
/**
* <code>CINDEX = 10;</code>
*
* <pre>
**CREATE INDEX*
* </pre>
*/
CINDEX(9, 10),
/**
* <code>DINDEX = 11;</code>
*/
DINDEX(10, 11),
/**
* <code>GTID = 12;</code>
*/
GTID(11, 12),
/**
* <code>XACOMMIT = 13;</code>
*
* <pre>
** XA *
* </pre>
*/
XACOMMIT(12, 13),
/**
* <code>XAROLLBACK = 14;</code>
*/
XAROLLBACK(13, 14),
/**
* <code>MHEARTBEAT = 15;</code>
*
* <pre>
** MASTER HEARTBEAT *
* </pre>
*/
MHEARTBEAT(14, 15),
;
+ +
    +
  • 相关阅读
  • +
+

http://www.tianshouzhi.com/api/tutorials/canal

+ +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git "a/2021/04/13/20210413\345\217\215\346\200\235\350\277\207\345\216\273/index.html" "b/2021/04/13/20210413\345\217\215\346\200\235\350\277\207\345\216\273/index.html" new file mode 100644 index 0000000000..c108476914 --- /dev/null +++ "b/2021/04/13/20210413\345\217\215\346\200\235\350\277\207\345\216\273/index.html" @@ -0,0 +1,454 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 20210413反思过去 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ 20210413反思过去 +

+ + +
+ + + + +
+ + +

突然间不想写这些业务代码了,就是那么突然.我已经快工作四年了,毫无进展,什么都不会

+

这几年我究竟做了什么?

+

给PHP修了两个内存泄漏,相对于写业务,我更喜欢修bug

+

引入了canal,对binlog更加了解了,而且效果真的很好但是canal其实很多坑.

+

这些其实很好,但是其他对于我来说都是垃圾时间,我其实真的不喜欢写前端,为什么PHP一定要写前端?

+

我一点都不会写算法题,真的一点都不会,最近才弄懂循环不变式和霍尔逻辑还有一点点数理逻辑

+

对于db,我最近才分清隔离级别/事务/mvcc的关系,花了三年半太久了,我也是一年的时候才弄懂PHP关闭连接是什么时候

+

对于redis其实我根本不懂,我只会简单的set aaa bbb 这样用

+

对于es,我对分词有所了解因为做过nlp相关需求,但是还是没有用过es

+

对于我来说,PHP会用,还天天写bug,mysql很多不会用,对于网络,我基本没有用过长连接挺失败的这三年半

+

对于我这三年半还是挺失败的

+

开心的地方

最开心的地方在于会找相关的论文来看了,虽然对工作一点用都没有

+

对于我来说很多东西不是黑盒了,虽然对于面试一点用都没有

+

我唯一知道的东西

只有数学才是有且仅有的可以提供正确性的工具,实践并不是什么真正有用的东西.

+ +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git "a/2021/04/14/c99\346\237\224\346\200\247\346\225\260\347\273\204/index.html" "b/2021/04/14/c99\346\237\224\346\200\247\346\225\260\347\273\204/index.html" new file mode 100644 index 0000000000..8450d607f3 --- /dev/null +++ "b/2021/04/14/c99\346\237\224\346\200\247\346\225\260\347\273\204/index.html" @@ -0,0 +1,443 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + c99柔性数组 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ c99柔性数组 +

+ + +
+ + + + +
+ + +

As a special case, the last element of a structure with more than one named member may
have an incomplete array type; this is called a flexible array member. With two
exceptions, the flexible array member is ignored. First, the size of the structure shall be
equal to the offset of the last element of an otherwise identical structure that replaces the
flexible array member with an array of unspecified length.106) Second, when a . (or ->)
operator has a left operand that is (a pointer to) a structure with a flexible array member
and the right operand names that member, it behaves as if that member were replaced
with the longest array (with the same element type) that would not make the structure
larger than the object being accessed; the offset of the array shall remain that of the
flexible array member, even if this would differ from that of the replacement array. If this
array would have no elements, it behaves as if it had one element but the behavior is
undefined if any attempt is made to access that element or to generate a pointer one past
it.

+ + +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git "a/2021/04/14/mysql\344\270\273\344\273\216/index.html" "b/2021/04/14/mysql\344\270\273\344\273\216/index.html" new file mode 100644 index 0000000000..9c4243be9d --- /dev/null +++ "b/2021/04/14/mysql\344\270\273\344\273\216/index.html" @@ -0,0 +1,438 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + mysql主从 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ mysql主从 +

+ + +
+ + + + +
+ + +
1
2
3
4
5
6
7
8
DBUG_PRINT("info", ("Creating new slave thread"));
if (mysql_thread_create(thread_key, &th, &connection_attrib, h_func,
(void *)mi)) {
LogErr(ERROR_LEVEL, ER_RPL_CANT_CREATE_SLAVE_THREAD,
mi->get_for_channel_str());
my_error(ER_SLAVE_THREAD, MYF(0));
goto err;
}
+

slave 线程

+
1
2
3
4
5
6
7
8
9
10
11
12
13
/**
Slave SQL thread entry point.

@param arg Pointer to Relay_log_info object that holds information
for the SQL thread.

@return Always 0.
*/
extern "C" void *handle_slave_sql(void *arg) {
THD *thd; /* needs to be first for thread_stack */
bool thd_added = false;
bool main_loop_error = false;
char llbuff[22], llbuff1[22];
+ +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git "a/2021/04/28/mysql\346\217\241\346\211\213/index.html" "b/2021/04/28/mysql\346\217\241\346\211\213/index.html" new file mode 100644 index 0000000000..888b7c8ffb --- /dev/null +++ "b/2021/04/28/mysql\346\217\241\346\211\213/index.html" @@ -0,0 +1,433 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + mysql握手 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ mysql握手 +

+ + +
+ + + + +
+ + + +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git "a/2021/05/11/c\345\255\227\350\212\202\345\257\271\351\275\220/index.html" "b/2021/05/11/c\345\255\227\350\212\202\345\257\271\351\275\220/index.html" new file mode 100644 index 0000000000..41301dad0a --- /dev/null +++ "b/2021/05/11/c\345\255\227\350\212\202\345\257\271\351\275\220/index.html" @@ -0,0 +1,447 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + c字节对齐 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ c字节对齐 +

+ + +
+ + + + +
+ + +
1
_attribute_((packed))
+

你会在redis的sds.h看到这个gcc的扩展属性,这个属性是拿来干嘛呢?其实是拿来压缩字段长度的

+
1
2
3
4
This attribute, attached to an enum, struct, or union type definition, specified that the minimum required memory be used to represent the type.
Specifying this attribute for struct and union types is equivalent to specifying the packed attribute on each of the structure or union members. Specifying the -fshort-enums flag on the line is equivalent to specifying the packed attribute on all enum definitions.

You may only specify this attribute after a closing curly brace on an enum definition, not in a typedef declaration, unless that declaration also contains the definition of the enum.
+ +

相关阅读

+ +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git "a/2021/05/11/mysql-binlog\350\216\267\345\217\226/index.html" "b/2021/05/11/mysql-binlog\350\216\267\345\217\226/index.html" new file mode 100644 index 0000000000..354f907f8b --- /dev/null +++ "b/2021/05/11/mysql-binlog\350\216\267\345\217\226/index.html" @@ -0,0 +1,445 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + mysql binlog获取 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ mysql binlog获取 +

+ + +
+ + + + +
+ + +

mysql 报文:

mysql报文分为两部分:headerpayload

+

有四个字节,其中前三个字节是标识这个包的长度描述payload的长度,也就是payload最长的长度为2^24-1字节,最后一个字节则是类似于tcp的序列号,每次从0开始递增,描述的是第几个包

+

payload

payload则是具体负载

+

mysql握手

tcp三次握手之后,整个传输层的连接已经建立了,那么怎么登陆呢?
握手文档
加密的方式
举个例子:加密套件是mysql_native_password,那么第一个包会是由 server发出,附带20字节的随机码, 然后在客户端的用户提交的密码做多次sha1哈希然后回传给mysql

+

binlog获取分为两步:

1: COM_REGISTER_SLAVE 把slave注册到master里面
2: COM_BINLOG_DUMP 这个包主要是告诉master锁需要的binlog的名字和位点,然后就会返回一堆binlog事件给客户端

+

mysql 发送binlog流程

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
int Binlog_sender::get_binlog_end_pos(File_reader *reader, my_off_t *end_pos) {
DBUG_TRACE;
my_off_t read_pos = reader->position();

do {
/*
MYSQL_BIN_LOG::binlog_end_pos is atomic. We should only acquire the
LOCK_binlog_end_pos if we reached the end of the hot log and are going
to wait for updates on the binary log (Binlog_sender::wait_new_event()).
*/
*end_pos = mysql_bin_log.get_binlog_end_pos();

/* If this is a cold binlog file, we are done getting the end pos */
if (unlikely(!mysql_bin_log.is_active(m_linfo.log_file_name))) {
*end_pos = 0;
return 0;
}

DBUG_PRINT("info", ("Reading file %s, seek pos %llu, end_pos is %llu",
m_linfo.log_file_name, read_pos, *end_pos));
DBUG_PRINT("info", ("Active file is %s", mysql_bin_log.get_log_fname()));

if (read_pos < *end_pos) return 0;

/* Some data may be in net buffer, it should be flushed before waiting */
if (!m_wait_new_events || flush_net()) return 1;

if (unlikely(wait_new_events(read_pos))) return 1;
} while (unlikely(!m_thd->killed));

return 1;
}
+
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/2021/05/21/vxlan/index.html b/2021/05/21/vxlan/index.html new file mode 100644 index 0000000000..736c4171da --- /dev/null +++ b/2021/05/21/vxlan/index.html @@ -0,0 +1,439 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + vxlan | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + + + + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git "a/2021/05/24/\345\240\206\346\240\210/index.html" "b/2021/05/24/\345\240\206\346\240\210/index.html" new file mode 100644 index 0000000000..76e45575b0 --- /dev/null +++ "b/2021/05/24/\345\240\206\346\240\210/index.html" @@ -0,0 +1,449 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 堆栈 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ 堆栈 +

+ + +
+ + + + +
+ + +

在x86_64中,调用栈是从高地址到低地址增长的, %rbp寄存器有两个核心的内容:

+
    +
  • 0(%rbp)也就是%rbp寄存器指向的寄存器内容,这个指针指向的地址也存着上一个堆栈的%rbp堆栈的地址
  • +
  • 8(%rbp)也就是%rbp寄存器存着另外一个存的是返回地址,什么是返回地址?就是指令段的地址
  • +
+

也就是rbp通过寄存器我们可以得到

+
    +
  • 8(%rbp):上一个堆栈的代码段开始
  • +
  • (%rbp): 上一个堆栈的开始
  • +
+

相关阅读

+ +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git "a/2021/05/24/\351\234\215\345\260\224\351\200\273\350\276\221-\344\273\216\345\277\253\346\216\222\345\274\200\345\247\213/index.html" "b/2021/05/24/\351\234\215\345\260\224\351\200\273\350\276\221-\344\273\216\345\277\253\346\216\222\345\274\200\345\247\213/index.html" new file mode 100644 index 0000000000..f78cd253ef --- /dev/null +++ "b/2021/05/24/\351\234\215\345\260\224\351\200\273\350\276\221-\344\273\216\345\277\253\346\216\222\345\274\200\345\247\213/index.html" @@ -0,0 +1,447 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 霍尔逻辑_从快排开始 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ 霍尔逻辑_从快排开始 +

+ + +
+ + + + +
+ + +
    +
  • 什么是霍尔逻辑:
    霍尔逻辑由三个部分组成:
  • +
  • ϕ被称为前置条件,
  • +
  • P是程序片段,
  • +
  • ψ称为后置条件
  • +
+

相关阅读

    +
  • https://arxiv.org/pdf/1211.4470.pdf
  • +
  • C. A. R. Hoare. Proof of correctness of data representations. Acta Inf.,
    1:271–281, 1972
  • +
  • Bertrand Meyer. Object-oriented software construction. Prentice Hall, 2nd
    edition, 1997. First Ed.: 1988.
  • +
+ +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/2021/06/04/mongoinsert/index.html b/2021/06/04/mongoinsert/index.html new file mode 100644 index 0000000000..58127543c1 --- /dev/null +++ b/2021/06/04/mongoinsert/index.html @@ -0,0 +1,445 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + mongoinsert | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ mongoinsert +

+ + +
+ + + + +
+ + +

mongo 的wiredtiger 是怎么组织kv结构的呢?
我现在还是没有弄懂

+

堆栈:

+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
Thread 41 "conn1" hit Breakpoint 5, __wt_btcur_insert (cbt=cbt@entry=0x555560bd2000) at src/third_party/wiredtiger/src/btree/bt_cursor.c:763
763 {
(gdb) bt
#0 __wt_btcur_insert (cbt=cbt@entry=0x555560bd2000) at src/third_party/wiredtiger/src/btree/bt_cursor.c:763
#1 0x0000555556d6149a in __curfile_insert (cursor=0x555560bd2000) at src/third_party/wiredtiger/src/cursor/cur_file.c:266
#2 0x0000555556cd3ef4 in mongo::wiredTigerCursorInsert (opCtx=opCtx@entry=0x555560af8180, cursor=cursor@entry=0x555560bd2000)
at src/mongo/db/storage/wiredtiger/wiredtiger_cursor_helpers.cpp:39
#3 0x0000555556d131bd in mongo::WiredTigerRecordStore::_insertRecords (this=0x55555bfddc00, opCtx=0x555560af8180, records=<optimized out>, timestamps=0x55555c01a478, nRecords=1)
at src/mongo/db/storage/wiredtiger/wiredtiger_record_store.cpp:1319
#4 0x0000555556d13ae7 in mongo::WiredTigerRecordStore::insertRecords (this=<optimized out>, opCtx=<optimized out>, records=<optimized out>, timestamps=...)
at /usr/include/c++/9/bits/stl_vector.h:915
#5 0x0000555557bf9972 in mongo::CollectionImpl::_insertDocuments (this=0x55555bfe1410, opCtx=0x555560af8180, begin=..., end=..., opDebug=0x555560b40a50, fromMigrate=false)
at /usr/include/c++/9/bits/unique_ptr.h:360
#6 0x0000555557bfa333 in mongo::CollectionImpl::insertDocuments (this=this@entry=0x55555bfe1410, opCtx=opCtx@entry=0x555560af8180, begin=begin@entry=
{stmtIds = std::vector of length 1, capacity 1 = {-1}, oplogSlot = {static kTermFieldName = {_data = 0x555559e64ab2 "t", _size = 1}, static kTimestampFieldName = {_data = 0x555559e5c074 "ts", _size = 2}, static kUninitializedTerm = -1, static kInitialTerm = 0, _timestamp = {static kAllowUnstableCheckpointsSentinel = {static kAllowUnstableCheckpointsSentinel = <same as static member of an already seen type>, i = 1, secs = 0}, i = 0, secs = 0}, _term = -1}, doc = {static kMinBSONLength = 5 '\005', static kEmptyObjectPrototype = "\005\000\000\000", _objdata = 0x55556036f92b "-", _ownedBuffer = {_buffer = {_holder = {px = 0x55556036f900}, static kHolderSize = 8}}}}, end=end@entry=
{stmtIds = std::vector of length -28, capacity 23456275625363 = {<error reading variable>, opDebug=0x555560b40a50, fromMigrate=false)
at src/mongo/db/catalog/collection_impl.cpp:663
#7 0x0000555557a0a1ae in mongo::write_ops_exec::(anonymous namespace)::insertDocuments (opCtx=0x555560af8180, collection=..., begin=..., end=
{stmtIds = std::vector of length -28, capacity 23456275625363 = {<error reading variable>, fromMigrate=<optimized out>) at /usr/include/c++/9/bits/stl_iterator.h:871
#8 0x0000555557a0a631 in mongo::write_ops_exec::(anonymous namespace)::<lambda()>::operator()(void) const (__closure=0x7fffe5962a30) at src/mongo/db/catalog_raii.h:151
#9 0x0000555557a0ac1b in mongo::writeConflictRetry<mongo::write_ops_exec::(anonymous namespace)::insertBatchAndHandleErrors(mongo::OperationContext*, const mongo::write_ops::InsertCommandRequest&, std::vector<mongo::InsertStatement>&, mongo::write_ops_exec::(anonymous namespace)::LastOpFixer*, mongo::write_ops_exec::WriteResult*, mongo::OperationSource)::<lambda()> > (f=..., ns=..., opStr=..., opCtx=0x555560af8180) at /usr/include/c++/9/bits/stl_iterator.h:806
#10 mongo::write_ops_exec::(anonymous namespace)::insertBatchAndHandleErrors (source=<optimized out>, out=<optimized out>, lastOpFixer=<optimized out>,
batch=std::vector of length 1, capacity 1 = {...}, wholeOp=..., opCtx=<optimized out>) at src/mongo/db/ops/write_ops_exec.cpp:502
#11 mongo::write_ops_exec::performInserts (opCtx=<optimized out>, opCtx@entry=0x555560af8180, wholeOp=..., source=@0x7fffe5962e00: mongo::kStandard)
at src/mongo/db/ops/write_ops_exec.cpp:655
#12 0x000055555791d28e in mongo::(anonymous namespace)::CmdInsert::Invocation::typedRun (this=0x555560ba0000, opCtx=0x555560af8180) at src/mongo/db/commands.h:1173
#13 0x000055555791e8a0 in mongo::TypedCommand<mongo::(anonymous namespace)::CmdInsert>::InvocationBase::_callTypedRun (opCtx=<optimized out>, this=<optimized out>)
at src/mongo/db/commands.h:1255
#14 mongo::TypedCommand<mongo::(anonymous namespace)::CmdInsert>::InvocationBase::_runImpl (reply=0x555561392000, opCtx=<optimized out>, this=<optimized out>)
at src/mongo/db/commands.h:1256
#15 mongo::TypedCommand<mongo::(anonymous namespace)::CmdInsert>::InvocationBase::run (this=<optimized out>, opCtx=<optimized out>, reply=0x555561392000)
at src/mongo/db/commands.h:1261
#16 0x0000555558791662 in mongo::CommandHelpers::runCommandInvocation (opCtx=0x555560af8180, request=..., invocation=0x555560ba0000, response=0x555561392000)
at src/mongo/db/commands.cpp:200
#17 0x0000555558797d73 in mongo::CommandHelpers::<lambda()>::operator() (__closure=0x7fffe5963180) at src/mongo/db/commands.cpp:184
#18 mongo::makeReadyFutureWith<mongo::CommandHelpers::runCommandInvocation(std::shared_ptr<mongo::RequestExecutionContext>, std::shared_ptr<mongo::CommandInvocation>, mongo::transport::ServiceExecutor::ThreadingModel)::<lambda()> > (func=...) at src/mongo/util/future.h:1208
#19 mongo::CommandHelpers::runCommandInvocation (rec=std::shared_ptr<class mongo::RequestExecutionContext> (use count 11, weak count 0) = {...},
invocation=std::shared_ptr<class mongo::CommandInvocation> (use count 3, weak count 0) = {...}, threadingModel=<optimized out>) at src/mongo/db/commands.cpp:185
#20 0x0000555556c48367 in mongo::(anonymous namespace)::runCommandInvocation (rec=std::shared_ptr<class mongo::RequestExecutionContext> (empty) = {...},
invocation=std::shared_ptr<class mongo::CommandInvocation> (empty) = {...}) at /usr/include/c++/9/bits/shared_ptr_base.h:756
--Type <RET> for more, q to quit, c to continue without paging--
#21 0x0000555556c5c389 in mongo::(anonymous namespace)::InvokeCommand::<lambda()>::operator() (__closure=<optimized out>) at /usr/include/c++/9/bits/shared_ptr_base.h:756
#22 mongo::makeReadyFutureWith<mongo::(anonymous namespace)::InvokeCommand::run()::<lambda()> > (func=...) at src/mongo/util/future.h:1211
#23 mongo::(anonymous namespace)::InvokeCommand::run (this=0x55555c01a4a0) at src/mongo/db/service_entry_point_common.cpp:842
#24 mongo::(anonymous namespace)::RunCommandImpl::<lambda(auto:78*)>::operator()<mongo::(anonymous namespace)::InvokeCommand> (__closure=<optimized out>, path=0x55555c01a4a0)
at src/mongo/db/service_entry_point_common.cpp:1188
#25 mongo::future_util::AsyncState<mongo::(anonymous namespace)::InvokeCommand>::<lambda()>::operator() (this=<optimized out>, this=<optimized out>)
at src/mongo/util/future_util.h:742
#26 mongo::makeReadyFutureWith<mongo::future_util::AsyncState<State>::thenWithState(Launcher&&) && [with Launcher = mongo::(anonymous namespace)::RunCommandImpl::_runCommand()::<lambda(auto:78*)>; State = mongo::(anonymous namespace)::InvokeCommand]::<lambda()> > (func=...) at src/mongo/util/future.h:1211
#27 mongo::future_util::AsyncState<mongo::(anonymous namespace)::InvokeCommand>::thenWithState<mongo::(anonymous namespace)::RunCommandImpl::_runCommand()::<lambda(auto:78*)> > (
launcher=..., this=<optimized out>) at src/mongo/util/future_util.h:747
#28 mongo::(anonymous namespace)::RunCommandImpl::_runCommand (this=<optimized out>) at src/mongo/db/service_entry_point_common.cpp:1188
#29 0x0000555556c5cc32 in mongo::(anonymous namespace)::RunCommandAndWaitForWriteConcern::_runCommandWithFailPoint (this=0x555560ba00e0)
at src/mongo/db/service_entry_point_common.cpp:1299
#30 0x0000555556c5d1c3 in mongo::(anonymous namespace)::RunCommandAndWaitForWriteConcern::_runImpl (this=0x555560ba00e0) at src/mongo/db/service_entry_point_common.cpp:1219
#31 0x0000555556c4b9ad in mongo::(anonymous namespace)::RunCommandImpl::<lambda()>::operator() (__closure=<optimized out>) at src/mongo/db/service_entry_point_common.cpp:730
#32 mongo::makeReadyFutureWith<mongo::(anonymous namespace)::RunCommandImpl::run()::<lambda()> > (func=...) at src/mongo/util/future.h:1211
#33 mongo::(anonymous namespace)::RunCommandImpl::run (this=0x555560ba00e0) at src/mongo/db/service_entry_point_common.cpp:728
#34 0x0000555556c4f5b2 in mongo::(anonymous namespace)::ExecCommandDatabase::<lambda()>::<lambda(auto:79*)>::operator()<mongo::(anonymous namespace)::RunCommandAndWaitForWriteConcern> (__closure=<optimized out>, runner=0x555560ba00e0) at src/mongo/db/service_entry_point_common.cpp:1651
#35 mongo::future_util::AsyncState<mongo::(anonymous namespace)::RunCommandAndWaitForWriteConcern>::<lambda()>::operator() (this=<optimized out>, this=<optimized out>)
at src/mongo/util/future_util.h:742
#36 mongo::makeReadyFutureWith<mongo::future_util::AsyncState<State>::thenWithState(Launcher&&) && [with Launcher = mongo::(anonymous namespace)::ExecCommandDatabase::_commandExec()::<lambda()>::<lambda(auto:79*)>; State = mongo::(anonymous namespace)::RunCommandAndWaitForWriteConcern]::<lambda()> > (func=...) at src/mongo/util/future.h:1211
#37 mongo::future_util::AsyncState<mongo::(anonymous namespace)::RunCommandAndWaitForWriteConcern>::thenWithState<mongo::(anonymous namespace)::ExecCommandDatabase::_commandExec()::<lambda()>::<lambda(auto:79*)> > (launcher=..., this=<optimized out>) at src/mongo/util/future_util.h:747
#38 mongo::(anonymous namespace)::ExecCommandDatabase::<lambda()>::operator() (__closure=<synthetic pointer>) at src/mongo/db/service_entry_point_common.cpp:1651
#39 mongo::(anonymous namespace)::ExecCommandDatabase::_commandExec (this=0x555560b57800) at src/mongo/db/service_entry_point_common.cpp:1658
#40 0x0000555556c58516 in mongo::(anonymous namespace)::ExecCommandDatabase::<lambda()>::operator() (__closure=<optimized out>) at src/mongo/db/service_entry_point_common.cpp:625
#41 mongo::makeReadyFutureWith<mongo::(anonymous namespace)::ExecCommandDatabase::run()::<lambda()> > (func=...) at src/mongo/util/future.h:1211
#42 mongo::(anonymous namespace)::ExecCommandDatabase::run (this=0x555560b57800) at src/mongo/db/service_entry_point_common.cpp:623
#43 mongo::(anonymous namespace)::<lambda()>::<lambda(auto:81*)>::operator()<mongo::(anonymous namespace)::ExecCommandDatabase> (__closure=<optimized out>, runner=0x555560b57800)
at src/mongo/db/service_entry_point_common.cpp:1880
#44 mongo::future_util::AsyncState<mongo::(anonymous namespace)::ExecCommandDatabase>::<lambda()>::operator()(void) const (this=<optimized out>, this=<optimized out>)
at src/mongo/util/future_util.h:742
#45 0x0000555556c58be2 in mongo::makeReadyFutureWith<mongo::future_util::AsyncState<State>::thenWithState(Launcher&&) && [with Launcher = mongo::(anonymous namespace)::executeCommand(std::shared_ptr<mongo::(anonymous namespace)::HandleRequest::ExecutionContext>)::<lambda()> mutable::<lambda(auto:81*)>; State = mongo::(anonymous namespace)::ExecCommandDatabase]::<lambda()> > (func=...) at src/mongo/util/future.h:1206
#46 mongo::future_util::AsyncState<mongo::(anonymous namespace)::ExecCommandDatabase>::thenWithState<mongo::(anonymous namespace)::executeCommand(std::shared_ptr<mongo::(anonymous namespace)::HandleRequest::ExecutionContext>)::<lambda()> mutable::<lambda(auto:81*)> > (launcher=..., this=0x7fffe59639c0) at src/mongo/util/future_util.h:747
#47 mongo::(anonymous namespace)::<lambda()>::operator() (__closure=<optimized out>) at src/mongo/db/service_entry_point_common.cpp:1880
#48 mongo::future_details::call<mongo::(anonymous namespace)::executeCommand(std::shared_ptr<mongo::(anonymous namespace)::HandleRequest::ExecutionContext>)::<lambda()>&> (func=...)
at src/mongo/util/future_impl.h:255
--Type <RET> for more, q to quit, c to continue without paging--
#49 mongo::future_details::throwingCall<mongo::(anonymous namespace)::executeCommand(std::shared_ptr<mongo::(anonymous namespace)::HandleRequest::ExecutionContext>)::<lambda()>&, mongo::future_details::FakeVoid> (func=...) at src/mongo/util/future_impl.h:308
#50 mongo::future_details::FutureImpl<mongo::future_details::FakeVoid>::<lambda()>::<lambda(mongo::future_details::SharedState<mongo::future_details::FakeVoid>*, mongo::future_details::SharedState<void>*)>::operator() (output=0x555560b43200, input=<optimized out>, this=<optimized out>) at src/mongo/util/future_impl.h:935
#51 mongo::future_details::FutureImpl<mongo::future_details::FakeVoid>::<lambda(mongo::future_details::SharedStateBase*)>::operator() (ssb=<optimized out>, this=<optimized out>)
at src/mongo/util/future_impl.h:1257
#52 mongo::unique_function<void(mongo::future_details::SharedStateBase*)>::callRegularVoid<mongo::future_details::FutureImpl<T>::makeContinuation(OnReady&&) [with Result = void; OnReady = mongo::future_details::FutureImpl<T>::then(Func&&) && [with Func = mongo::(anonymous namespace)::executeCommand(std::shared_ptr<mongo::(anonymous namespace)::HandleRequest::ExecutionContext>)::<lambda()>; T = mongo::future_details::FakeVoid]::<lambda()>::<lambda(mongo::future_details::SharedState<mongo::future_details::FakeVoid>*, mongo::future_details::SharedState<void>*)>; T = mongo::future_details::FakeVoid]::<lambda(mongo::future_details::SharedStateBase*)> > (args#0=<optimized out>, f=..., isVoid=...)
at src/mongo/util/functional.h:145
#53 mongo::unique_function<void(mongo::future_details::SharedStateBase*)>::SpecificImpl::call(mongo::future_details::SharedStateBase *&&) (this=<optimized out>,
args#0=<optimized out>) at src/mongo/util/functional.h:159
#54 0x0000555556c14b27 in mongo::unique_function<void (mongo::future_details::SharedStateBase*)>::operator()(mongo::future_details::SharedStateBase*) const (args#0=<optimized out>,
this=0x555560b43818) at src/mongo/util/invariant.h:66
#55 mongo::future_details::SharedStateBase::transitionToFinished (this=0x555560b43800) at src/mongo/util/future_impl.h:441
#56 0x0000555556c5fcd2 in mongo::future_details::SharedStateImpl<mongo::future_details::FakeVoid>::emplaceValue<mongo::future_details::FakeVoid> (this=<optimized out>)
at /usr/include/c++/9/new:174
#57 mongo::future_details::FutureImpl<mongo::future_details::FakeVoid>::propagateResultTo(mongo::future_details::SharedStateImpl<mongo::future_details::FakeVoid>*) &&::{lambda(mongo::future_details::FakeVoid&&)#1}::operator()(mongo::future_details::FakeVoid&&) const (this=<optimized out>, val=...) at src/mongo/util/future_impl.h:1146
#58 mongo::future_details::FutureImpl<mongo::future_details::FakeVoid>::generalImpl<mongo::future_details::FutureImpl<mongo::future_details::FakeVoid>::propagateResultTo(mongo::future_details::SharedStateImpl<mongo::future_details::FakeVoid>*) &&::{lambda(mongo::future_details::FakeVoid&&)#1}, mongo::future_details::FutureImpl<mongo::future_details::FakeVoid>::propagateResultTo(mongo::future_details::SharedStateImpl<mongo::future_details::FakeVoid>*) &&::{lambda(mongo::Status&&)#2}, mongo::future_details::FutureImpl<mongo::future_details::FakeVoid>::propagateResultTo(mongo::future_details::SharedStateImpl<mongo::future_details::FakeVoid>*) &&::{lambda()#3}>(mongo::future_details::FutureImpl<mongo::future_details::FakeVoid>::propagateResultTo(mongo::future_details::SharedStateImpl<mongo::future_details::FakeVoid>*) &&::{lambda(mongo::future_details::FakeVoid&&)#1}&&, mongo::future_details::FutureImpl<mongo::future_details::FakeVoid>::propagateResultTo(mongo::future_details::SharedStateImpl<mongo::future_details::FakeVoid>*) &&::{lambda(mongo::Status&&)#2}&&, mongo::future_details::FutureImpl<mongo::future_details::FakeVoid>::propagateResultTo(mongo::future_details::SharedStateImpl<mongo::future_details::FakeVoid>*) &&::{lambda()#3}&&) (notReady=...,
fail=..., success=..., this=<optimized out>) at src/mongo/util/future_impl.h:1191
#59 mongo::future_details::FutureImpl<mongo::future_details::FakeVoid>::generalImpl<mongo::future_details::FutureImpl<mongo::future_details::FakeVoid>::propagateResultTo(mongo::future_details::SharedStateImpl<mongo::future_details::FakeVoid>*) &&::{lambda(mongo::future_details::FakeVoid&&)#1}, mongo::future_details::FutureImpl<mongo::future_details::FakeVoid>::propagateResultTo(mongo::future_details::SharedStateImpl<mongo::future_details::FakeVoid>*) &&::{lambda(mongo::Status&&)#2}, mongo::future_details::FutureImpl<mongo::future_details::FakeVoid>::propagateResultTo(mongo::future_details::SharedStateImpl<mongo::future_details::FakeVoid>*) &&::{lambda()#3}>(mongo::future_details::FutureImpl<mongo::future_details::FakeVoid>::propagateResultTo(mongo::future_details::SharedStateImpl<mongo::future_details::FakeVoid>*) &&::{lambda(mongo::future_details::FakeVoid&&)#1}&&, mongo::future_details::FutureImpl<mongo::future_details::FakeVoid>::propagateResultTo(mongo::future_details::SharedStateImpl<mongo::future_details::FakeVoid>*) &&::{lambda(mongo::Status&&)#2}&&, mongo::future_details::FutureImpl<mongo::future_details::FakeVoid>::propagateResultTo(mongo::future_details::SharedStateImpl<mongo::future_details::FakeVoid>*) &&::{lambda()#3}&&) (
this=<optimized out>, success=..., fail=..., notReady=...) at src/mongo/util/future_impl.h:1182
#60 0x0000555556c520ac in mongo::future_details::FutureImpl<mongo::future_details::FakeVoid>::propagateResultTo(mongo::future_details::SharedStateImpl<mongo::future_details::FakeVoid>*) && (output=<optimized out>, this=0x7fffe5963b40) at src/mongo/util/future_impl.h:1143
#61 mongo::SemiFuture<void>::propagateResultTo<mongo::future_details::SharedStateImpl<mongo::future_details::FakeVoid>*&>(mongo::future_details::SharedStateImpl<mongo::future_details::FakeVoid>*&) && (arg=<synthetic pointer>: <optimized out>, this=0x7fffe5963b40) at src/mongo/util/future.h:285
#62 mongo::future_details::FutureImpl<mongo::future_details::FakeVoid>::<lambda()>::<lambda(mongo::future_details::SharedState<mongo::future_details::FakeVoid>*, mongo::future_details::SharedState<void>*)>::operator() (output=0x555560b43800, input=<optimized out>, this=<optimized out>) at src/mongo/util/future_impl.h:935
#63 mongo::future_details::FutureImpl<mongo::future_details::FakeVoid>::<lambda(mongo::future_details::SharedStateBase*)>::operator() (ssb=<optimized out>, this=<optimized out>)
--Type <RET> for more, q to quit, c to continue without paging--
at src/mongo/util/future_impl.h:1257
#64 mongo::unique_function<void(mongo::future_details::SharedStateBase*)>::callRegularVoid<mongo::future_details::FutureImpl<T>::makeContinuation(OnReady&&) [with Result = void; OnReady = mongo::future_details::FutureImpl<T>::then(Func&&) && [with Func = mongo::(anonymous namespace)::executeCommand(std::shared_ptr<mongo::(anonymous namespace)::HandleRequest::ExecutionContext>)::<lambda()>; T = mongo::future_details::FakeVoid]::<lambda()>::<lambda(mongo::future_details::SharedState<mongo::future_details::FakeVoid>*, mongo::future_details::SharedState<void>*)>; T = mongo::future_details::FakeVoid]::<lambda(mongo::future_details::SharedStateBase*)> > (args#0=<optimized out>, f=..., isVoid=...)
at src/mongo/util/functional.h:145
#65 mongo::unique_function<void(mongo::future_details::SharedStateBase*)>::SpecificImpl::call(mongo::future_details::SharedStateBase *&&) (this=<optimized out>,
args#0=<optimized out>) at src/mongo/util/functional.h:159
#66 0x0000555556c14b27 in mongo::unique_function<void (mongo::future_details::SharedStateBase*)>::operator()(mongo::future_details::SharedStateBase*) const (args#0=<optimized out>,
this=0x555560b42d18) at src/mongo/util/invariant.h:66
#67 mongo::future_details::SharedStateBase::transitionToFinished (this=0x555560b42d00) at src/mongo/util/future_impl.h:441
#68 0x0000555556c59141 in mongo::future_details::SharedStateImpl<mongo::future_details::FakeVoid>::emplaceValue<>() (this=0x555560b42d00) at /usr/include/c++/9/new:174
#69 mongo::Promise<void>::emplaceValue<, 0>()::{lambda(boost::intrusive_ptr<mongo::future_details::SharedStateImpl<mongo::future_details::FakeVoid> >&&)#1}::operator()(boost::intrusive_ptr<mongo::future_details::SharedStateImpl<mongo::future_details::FakeVoid> >&&) const (this=<optimized out>, sharedState=<synthetic pointer>) at src/mongo/util/future.h:854
#70 mongo::Promise<void>::setImpl<mongo::Promise<void>::emplaceValue<, 0>()::{lambda(boost::intrusive_ptr<mongo::future_details::SharedStateImpl<mongo::future_details::FakeVoid> >&&)#1}>(mongo::Promise<void>::emplaceValue<, 0>()::{lambda(boost::intrusive_ptr<mongo::future_details::SharedStateImpl<mongo::future_details::FakeVoid> >&&)#1}&&) (doSet=...,
this=0x7fffe5963ce0) at src/mongo/util/future.h:895
#71 mongo::Promise<void>::emplaceValue<, 0>() (this=0x7fffe5963ce0) at src/mongo/util/future.h:853
#72 mongo::(anonymous namespace)::executeCommand (execContext=...) at src/mongo/db/service_entry_point_common.cpp:1892
#73 0x0000555556c59cbf in mongo::(anonymous namespace)::<lambda()>::operator() (__closure=<optimized out>) at /usr/include/c++/9/bits/shared_ptr_base.h:756
#74 mongo::future_details::call<mongo::(anonymous namespace)::receivedCommands(std::shared_ptr<mongo::(anonymous namespace)::HandleRequest::ExecutionContext>)::<lambda()>&> (
func=...) at src/mongo/util/future_impl.h:255
#75 mongo::future_details::throwingCall<mongo::(anonymous namespace)::receivedCommands(std::shared_ptr<mongo::(anonymous namespace)::HandleRequest::ExecutionContext>)::<lambda()>&, mongo::future_details::FakeVoid> (func=...) at src/mongo/util/future_impl.h:308
#76 mongo::future_details::FutureImpl<mongo::future_details::FakeVoid>::<lambda(mongo::future_details::FakeVoid&&)>::operator()(mongo::future_details::FakeVoid &&) (val=...,
this=<optimized out>) at src/mongo/util/future_impl.h:917
#77 0x0000555556c59e8a in mongo::future_details::FutureImpl<mongo::future_details::FakeVoid>::generalImpl<mongo::future_details::FutureImpl<T>::then(Func&&) && [with Func = mongo::(anonymous namespace)::receivedCommands(std::shared_ptr<mongo::(anonymous namespace)::HandleRequest::ExecutionContext>)::<lambda()>; T = mongo::future_details::FakeVoid]::<lambda(mongo::future_details::FakeVoid&&)>, mongo::future_details::FutureImpl<T>::then(Func&&) && [with Func = mongo::(anonymous namespace)::receivedCommands(std::shared_ptr<mongo::(anonymous namespace)::HandleRequest::ExecutionContext>)::<lambda()>; T = mongo::future_details::FakeVoid]::<lambda(mongo::Status&&)>, mongo::future_details::FutureImpl<T>::then(Func&&) && [with Func = mongo::(anonymous namespace)::receivedCommands(std::shared_ptr<mongo::(anonymous namespace)::HandleRequest::ExecutionContext>)::<lambda()>; T = mongo::future_details::FakeVoid]::<lambda()> > (fail=..., notReady=..., success=..., this=0x7fffe5964110) at src/third_party/boost/boost/optional/detail/optional_aligned_storage.hpp:64
#78 mongo::future_details::FutureImpl<mongo::future_details::FakeVoid>::then<mongo::(anonymous namespace)::receivedCommands(std::shared_ptr<mongo::(anonymous namespace)::HandleRequest::ExecutionContext>)::<lambda()> > (func=..., this=0x7fffe5964110) at src/mongo/util/future_impl.h:940
#79 mongo::Future<void>::then<mongo::(anonymous namespace)::receivedCommands(std::shared_ptr<mongo::(anonymous namespace)::HandleRequest::ExecutionContext>)::<lambda()> > (
func=..., this=0x7fffe5964110) at src/mongo/util/future.h:405
#80 mongo::(anonymous namespace)::receivedCommands (
execContext=std::shared_ptr<class mongo::(anonymous namespace)::HandleRequest::ExecutionContext> (use count 11, weak count 0) = {...})
at src/mongo/db/service_entry_point_common.cpp:1939
#81 0x0000555556c5b130 in mongo::(anonymous namespace)::CommandOpRunner::run (this=<optimized out>) at /usr/include/c++/9/ext/atomicity.h:96
#82 0x0000555556c54f9f in mongo::ServiceEntryPointCommon::handleRequest (opCtx=opCtx@entry=0x555560af8180, m=...,
behaviors=std::unique_ptr<const class mongo::ServiceEntryPointCommon::Hooks> = {...}) at src/mongo/db/service_entry_point_common.cpp:2441
#83 0x0000555556c41514 in mongo::ServiceEntryPointMongod::handleRequest (this=<optimized out>, opCtx=0x555560af8180, m=...) at /usr/include/c++/9/bits/move.h:74
--Type <RET> for more, q to quit, c to continue without paging--
#84 0x0000555556cabe0a in mongo::transport::ServiceStateMachine::Impl::processMessage (this=0x555560b81090) at src/mongo/transport/service_state_machine.cpp:466
#85 0x0000555556caf176 in mongo::transport::ServiceStateMachine::Impl::<lambda()>::operator() (__closure=<optimized out>) at src/mongo/transport/service_state_machine.cpp:559
#86 mongo::future_details::call<mongo::transport::ServiceStateMachine::Impl::startNewLoop(const mongo::Status&)::<lambda()>&> (func=...) at src/mongo/util/future_impl.h:255
#87 mongo::future_details::throwingCall<mongo::transport::ServiceStateMachine::Impl::startNewLoop(const mongo::Status&)::<lambda()>&, mongo::future_details::FakeVoid> (func=...)
at src/mongo/util/future_impl.h:308
#88 mongo::future_details::FutureImpl<mongo::future_details::FakeVoid>::<lambda(mongo::future_details::FakeVoid&&)>::operator() (this=<optimized out>, val=...)
at src/mongo/util/future_impl.h:917
#89 mongo::future_details::FutureImpl<mongo::future_details::FakeVoid>::generalImpl<mongo::future_details::FutureImpl<T>::then(Func&&) && [with Func = mongo::transport::ServiceStateMachine::Impl::startNewLoop(const mongo::Status&)::<lambda()>; T = mongo::future_details::FakeVoid]::<lambda(mongo::future_details::FakeVoid&&)>, mongo::future_details::FutureImpl<T>::then(Func&&) && [with Func = mongo::transport::ServiceStateMachine::Impl::startNewLoop(const mongo::Status&)::<lambda()>; T = mongo::future_details::FakeVoid]::<lambda(mongo::Status&&)>, mongo::future_details::FutureImpl<T>::then(Func&&) && [with Func = mongo::transport::ServiceStateMachine::Impl::startNewLoop(const mongo::Status&)::<lambda()>; T = mongo::future_details::FakeVoid]::<lambda()> > (fail=..., notReady=..., success=..., this=0x7fffe5964780) at src/mongo/util/future_impl.h:1184
#90 mongo::future_details::FutureImpl<mongo::future_details::FakeVoid>::then<mongo::transport::ServiceStateMachine::Impl::startNewLoop(const mongo::Status&)::<lambda()> > (
func=..., this=0x7fffe5964780) at src/mongo/util/future_impl.h:940
#91 mongo::Future<void>::then<mongo::transport::ServiceStateMachine::Impl::startNewLoop(const mongo::Status&)::<lambda()> > (func=..., this=0x7fffe5964780)
at src/mongo/util/future.h:405
#92 mongo::transport::ServiceStateMachine::Impl::startNewLoop (execStatus=..., this=0x555560b81090) at src/mongo/transport/service_state_machine.cpp:559
#93 mongo::transport::ServiceStateMachine::Impl::startNewLoop (this=0x555560b81090, execStatus=...) at src/mongo/transport/service_state_machine.cpp:546
#94 0x0000555556caf8e4 in mongo::transport::ServiceStateMachine::Impl::<lambda(mongo::Status)>::<lambda(mongo::Status)>::<lambda()>::operator() (__closure=<synthetic pointer>,
__closure=<synthetic pointer>) at src/mongo/transport/service_state_machine.cpp:588
#95 mongo::ClientStrand::run<mongo::transport::ServiceStateMachine::Impl::startNewLoop(const mongo::Status&)::<lambda(mongo::Status)>::<lambda(mongo::Status)>::<lambda()> > (
task=..., this=<optimized out>) at src/mongo/db/client_strand.h:165
#96 mongo::transport::ServiceStateMachine::Impl::<lambda(mongo::Status)>::<lambda(mongo::Status)>::operator() (__closure=<optimized out>, execStatus=...)
at src/mongo/transport/service_state_machine.cpp:588
#97 mongo::unique_function<void(mongo::Status)>::callRegularVoid<mongo::transport::ServiceStateMachine::Impl::startNewLoop(const mongo::Status&)::<lambda(mongo::Status)>::<lambda(mongo::Status)> > (args#0=..., f=..., isVoid=...) at src/mongo/util/functional.h:145
#98 mongo::unique_function<void(mongo::Status)>::SpecificImpl::call(mongo::Status &&) (this=<optimized out>, args#0=...) at src/mongo/util/functional.h:159
#99 0x00005555592c9065 in mongo::unique_function<void (mongo::Status)>::operator()(mongo::Status) const (args#0=..., this=<optimized out>) at src/mongo/util/invariant.h:66
#100 mongo::transport::ServiceExecutorSynchronous::<lambda(mongo::Status)>::operator() (status=..., __closure=<optimized out>)
at src/mongo/transport/service_executor_synchronous.cpp:163
#101 mongo::unique_function<void(mongo::Status)>::callRegularVoid<mongo::transport::ServiceExecutorSynchronous::runOnDataAvailable(const SessionHandle&, mongo::OutOfLineExecutor::Task)::<lambda(mongo::Status)> > (args#0=..., f=..., isVoid=...) at src/mongo/util/functional.h:145
#102 mongo::unique_function<void(mongo::Status)>::SpecificImpl::call(mongo::Status &&) (this=<optimized out>, args#0=...) at src/mongo/util/functional.h:159
#103 0x000055555741b9d8 in mongo::unique_function<void (mongo::Status)>::operator()(mongo::Status) const (args#0=..., this=<optimized out>) at src/mongo/util/invariant.h:66
#104 mongo::transport::ServiceExecutor::schedule(mongo::unique_function<void (mongo::Status)>)::{lambda()#1}::operator()() (__closure=<optimized out>)
at src/mongo/transport/service_executor.h:111
#105 mongo::unique_function<void ()>::callRegularVoid<mongo::transport::ServiceExecutor::schedule(mongo::unique_function<void (mongo::Status)>)::{lambda()#1}>(std::integral_constant<bool, true>, mongo::transport::ServiceExecutor::schedule(mongo::unique_function<void (mongo::Status)>)::{lambda()#1}&) (f=..., isVoid=...) at src/mongo/util/functional.h:145
#106 mongo::unique_function<void ()>::makeImpl<mongo::transport::ServiceExecutor::schedule(mongo::unique_function<void (mongo::Status)>)::{lambda()#1}>(mongo::transport::ServiceExecutor::schedule(mongo::unique_function<void (mongo::Status)>)::{lambda()#1}&&)::SpecificImpl::call() (this=<optimized out>) at src/mongo/util/functional.h:159
#107 0x00005555592c923f in mongo::unique_function<void ()>::operator()() const (this=<optimized out>) at src/mongo/util/invariant.h:66
#108 mongo::transport::ServiceExecutorSynchronous::<lambda()>::operator() (__closure=0x55556037a1a8) at src/mongo/transport/service_executor_synchronous.cpp:131
#109 mongo::unique_function<void()>::callRegularVoid<mongo::transport::ServiceExecutorSynchronous::scheduleTask(mongo::transport::ServiceExecutor::Task, mongo::transport::ServiceExec--Type <RET> for more, q to quit, c to continue without paging--
utor::ScheduleFlags)::<lambda()> > (f=..., isVoid=...) at src/mongo/util/functional.h:145
#110 mongo::unique_function<void()>::SpecificImpl::call(void) (this=0x55556037a1a0) at src/mongo/util/functional.h:159
#111 0x00005555592cdc28 in mongo::unique_function<void ()>::operator()() const (this=0x555560b58f58) at src/mongo/util/invariant.h:66
#112 mongo::<lambda()>::operator() (__closure=0x555560b58f48) at src/mongo/transport/service_executor_utils.cpp:111
#113 mongo::unique_function<void()>::callRegularVoid<mongo::launchServiceWorkerThread(mongo::unique_function<void()>)::<lambda()> > (f=..., isVoid=...)
at src/mongo/util/functional.h:145
#114 mongo::unique_function<void()>::SpecificImpl::call(void) (this=0x555560b58f40) at src/mongo/util/functional.h:159
#115 0x00005555592cdca1 in mongo::unique_function<void ()>::operator()() const (this=0x55555c01a4b8) at src/mongo/util/invariant.h:66
#116 mongo::(anonymous namespace)::runFunc (ctx=0x55555c01a4b8) at src/mongo/transport/service_executor_utils.cpp:64
#117 0x00007ffff7b8a609 in start_thread (arg=<optimized out>) at pthread_create.c:477
#118 0x00007ffff777f293 in clone () at ../sysdeps/unix/sysv/linux/x86_64/clone.S:95

+ + +

相关阅读

+ +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/2021/06/22/skiplist/index.html b/2021/06/22/skiplist/index.html new file mode 100644 index 0000000000..c736027abd --- /dev/null +++ b/2021/06/22/skiplist/index.html @@ -0,0 +1,442 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + skiplist | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ skiplist +

+ + +
+ + + + +
+ + +

跳表 skiplist

Binary trees can be used for representing abstract data
types such as dictionaries and ordered lists. They work
well when the elements are inserted in a random order.
二叉树可以使用来表达一个词典或者一个有序列表.当二叉树的元素插入是随机顺序的时候,他可以工作很好

+

Some sequences of operations, such as inserting the
elements in order, produce degenerate data structures
that perform very poo.rly.
但是一些特定的顺序,举个例子,如果插入的数据本身已经是有序的,那么效果则会很差
If it were possible to randomly permute the list of items to be inserted, trees
would work well with high probability for any input sequence.
如果可以产生随机的数据去插入,那么树会对输入的性能变得非常好
In most cases queries must be answered online, so randomly permuting the input is impractical.
Balanced tree algorithms rearrange the tree as operations are performed to maintain certain balance conditions and assure good performance.
在大部分的情况必须实时查询,所以随机交换输入是不可能的,平衡树需要重排来保证某些平衡的条件和确保比较好的性能
Skip lists are a probabilistic alternative to balanced trees.
跳表相对于平衡树来说是另外一个选择
Skip lists are balanced by consulting a random number generator.
跳表的平衡是通过获取一个随机数
Although skip lists have bad worstcase performance, no input sequence consistently produces the worst-case performance (much like quicksort when the pivot element is chosen randomly).
虽然跳表也有bad case(举个例子每次随机的高度都一样) , 但是没有一个输入序列可以把这个bad case稳定的生成(因为是随机产生所以不会一直产生最坏情况)
It is very unlikely a skip list data structure will be significantly unbalanced (e.g., for a dictionary of more than 250 elements, the chance that a search will take more than three-times the expeci.ed time is less than one in a million). Skip lists have balance properties similar to that of search trees built by random insertions, yet do not require insertions to be random.
这不太可能跳表变得非常显著地不平衡,跳表的平衡性质和一个随机插入的搜索树相类似,但是不需要随机去插入
It is easier to balance a data structure probabilistitally than to explicitly maintain the balance.
通过概率去平衡一个暑假结构比显式保持它的平衡要简单
For many applications, skip lists are a more natural representation than trees, and they lead to simpler algorithms.
对于很多应用,相对于树来说跳表会更加自然.
The simplicity of skip list algorithms makes them easier to implement and provides significant constant factor speed improvements over balanced tree and self-adjusting tree algorithms.
跳表的简易型可以实现和提供一个显著的常量因子去比平衡树和自适应树更好
Skip lists are also very space efficient.
跳表的空间利用率很高
They can easily be configured to require an average of 1% pointers per element (or even less) and do not require balance or priority information to be stored with each node.
他们可以需要平均百分之1的指针和不需要平衡或者权重信息取存储每个节点

+

SKIP LISTS
跳表
We might need to examine every node of the list when searching a linked list (Figure la).
当我们使用链表这个结构来存储数据,如果我们要搜索某个节点,我们需要顺序遍历每个节点.
If the list is stored in sorted order and every other node of the list also has a pointer to the node two ahead of it in the list (Figure lb), we have to examine no more than [n/21 +1 nodes (where n is the length of the list).
如果这个list是
Also giving
every fourth node a pointer four ahead (Figure lc) requires that no more than rn/41 + 2 nodes be examined.
If every (27th node has a pointer 2’ nodes ahead (Figure Id), the number of nodes that must be examined
can be reduced to rlog,nl while only doubling the number of pointers. This data structure could be used for
fast searching, but insertion and deletion would be impractical.
A node that has k forward pointers is called a level k
node. If every (2’)th node has a pointer 2’ nodes ahead,
then levels of nodes are distributed in a simple pattern:
50 percent are level 1, 25 percent are level 2, 12.5
percent are level 3 and so on. What would happen if
the levels of nodes were chosen randomly, but in the
same proportions (e.g., as in Figure le)? A node’s ith
forward pointer, instead of pointing 2’-’ nodes ahead,
points to the next node of level i or higher. Insertions or
deletions would require only local modifications; the
level of a node, chosen randomly when the node is
inserted, need never change. Some arrangements of
levels would give poor execution times, but we will see
that such arrangements are rare. Because these data
structures are linked lists with extra pointers that s

+

相关阅读

+ +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/2021/07/15/tcp-nodelay/index.html b/2021/07/15/tcp-nodelay/index.html new file mode 100644 index 0000000000..254f891f60 --- /dev/null +++ b/2021/07/15/tcp-nodelay/index.html @@ -0,0 +1,442 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + tcp_nodelay | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ tcp_nodelay +

+ + +
+ + + + +
+ + +

tcp_nodealy

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
 The solution to the small-packet problem
解决小包问题的方法

Clearly an adaptive approach is desirable. One would expect a
proposal for an adaptive inter-packet time limit based on the
round-trip delay observed by TCP. While such a mechanism could
certainly be implemented, it is unnecessary. A simple and
elegant solution has been discovered.

The solution is to inhibit the sending of new TCP segments when
new outgoing data arrives from the user if any previously
transmitted data on the connection remains unacknowledged. This
inhibition is to be unconditional; no timers, tests for size of
data received, or other conditions are required. Implementation
typically requires one or two lines inside a TCP program.
解决的方式是如果之前发送的数据没有被ack,阻止发送新的tcp段.这个抑制条件是不需要前置的条件的:不需要定时器,不需要探测包是否被接收,以及其他条件.实现上只需要添加一两行代码在tcp程序里面


At first glance, this solution seems to imply drastic changes in
the behavior of TCP. This is not so. It all works out right in
the end. Let us see why this is so.
乍看起来,这会很大地改变tcp的行为.但是实际上并不是这样,这从头到尾都没有太大变化.让我们看看为什么是这样.

When a user process writes to a TCP connection, TCP receives some
data. It may hold that data for future sending or may send a
packet immediately. If it refrains from sending now, it will
typically send the data later when an incoming packet arrives and
changes the state of the system. The state changes in one of two
ways; the incoming packet acknowledges old data the distant host
has received, or announces the availability of buffer space in
the distant host for new data. (This last is referred to as
"updating the window"). Each time data arrives on a connec-
tion, TCP must reexamine its current state and perhaps send some
packets out. Thus, when we omit sending data on arrival from the
user, we are simply deferring its transmission until the next
message arrives from the distant host. A message must always
arrive soon unless the connection was previously idle or communi-
cations with the other end have been lost. In the first case,
the idle connection, our scheme will result in a packet being
sent whenever the user writes to the TCP connection. Thus we do
not deadlock in the idle condition. In the second case, where
当一个用户写消息到tcp连接,Tcp协议栈会受到这些信息.tcp协议栈会保持这些内容或者立马发送这些内容.



RFC 896 Congestion Control in IP/TCP Internetworks 1/6/84

the distant host has failed, sending more data is futile anyway.
Note that we have done nothing to inhibit normal TCP retransmis-
sion logic, so lost messages are not a problem.

Examination of the behavior of this scheme under various condi-
tions demonstrates that the scheme does work in all cases. The
first case to examine is the one we wanted to solve, that of the
character-oriented Telnet connection. Let us suppose that the
user is sending TCP a new character every 200ms, and that the
connection is via an Ethernet with a round-trip time including
software processing of 50ms. Without any mechanism to prevent
small-packet congestion, one packet will be sent for each charac-
ter, and response will be optimal. Overhead will be 4000%, but
this is acceptable on an Ethernet. The classic timer scheme,
with a limit of 2 packets per second, will cause two or three
characters to be sent per packet. Response will thus be degraded
even though on a high-bandwidth Ethernet this is unnecessary.
Overhead will drop to 1500%, but on an Ethernet this is a bad
tradeoff. With our scheme, every character the user types will
find TCP with an idle connection, and the character will be sent
at once, just as in the no-control case. The user will see no
visible delay. Thus, our scheme performs as well as the no-
control scheme and provides better responsiveness than the timer
scheme.

The second case to examine is the same Telnet test but over a
long-haul link with a 5-second round trip time. Without any
mechanism to prevent small-packet congestion, 25 new packets
would be sent in 5 seconds.* Overhead here is 4000%. With the
classic timer scheme, and the same limit of 2 packets per second,
there would still be 10 packets outstanding and contributing to
congestion. Round-trip time will not be improved by sending many
packets, of course; in general it will be worse since the packets
will contend for line time. Overhead now drops to 1500%. With
our scheme, however, the first character from the user would find
an idle TCP connection and would be sent immediately. The next
24 characters, arriving from the user at 200ms intervals, would
be held pending a message from the distant host. When an ACK
arrived for the first packet at the end of 5 seconds, a single
packet with the 24 queued characters would be sent. Our scheme
thus results in an overhead reduction to 320% with no penalty in
response time. Response time will usually be improved with our
scheme because packet overhead is reduced, here by a factor of
4.7 over the classic timer scheme. Congestion will be reduced by
this factor and round-trip delay will decrease sharply. For this
________
* This problem is not seen in the pure ARPANET case because the
IMPs will block the host when the count of packets
outstanding becomes excessive, but in the case where a pure
datagram local net (such as an Ethernet) or a pure datagram
gateway (such as an ARPANET / MILNET gateway) is involved, it
is possible to have large numbers of tiny packets
outstanding.



RFC 896 Congestion Control in IP/TCP Internetworks 1/6/84

case, our scheme has a striking advantage over either of the
other approaches.

We use our scheme for all TCP connections, not just Telnet con-
nections. Let us see what happens for a file transfer data con-
nection using our technique. The two extreme cases will again be
considered.

As before, we first consider the Ethernet case. The user is now
writing data to TCP in 512 byte blocks as fast as TCP will accept
them. The user's first write to TCP will start things going; our
first datagram will be 512+40 bytes or 552 bytes long. The
user's second write to TCP will not cause a send but will cause
the block to be buffered. Assume that the user fills up TCP's
outgoing buffer area before the first ACK comes back. Then when
the ACK comes in, all queued data up to the window size will be
sent. From then on, the window will be kept full, as each ACK
initiates a sending cycle and queued data is sent out. Thus,
after a one round-trip time initial period when only one block is
sent, our scheme settles down into a maximum-throughput condi-
tion. The delay in startup is only 50ms on the Ethernet, so the
startup transient is insignificant. All three schemes provide
equivalent performance for this case.

Finally, let us look at a file transfer over the 5-second round
trip time connection. Again, only one packet will be sent until
the first ACK comes back; the window will then be filled and kept
full. Since the round-trip time is 5 seconds, only 512 bytes of
data are transmitted in the first 5 seconds. Assuming a 2K win-
dow, once the first ACK comes in, 2K of data will be sent and a
steady rate of 2K per 5 seconds will be maintained thereafter.
Only for this case is our scheme inferior to the timer scheme,
and the difference is only in the startup transient; steady-state
throughput is identical. The naive scheme and the timer scheme
would both take 250 seconds to transmit a 100K byte file under
the above conditions and our scheme would take 254 seconds, a
difference of 1.6%.

Thus, for all cases examined, our scheme provides at least 98% of
the performance of both other schemes, and provides a dramatic
improvement in Telnet performance over paths with long round trip
times. We use our scheme in the Ford Aerospace Software
Engineering Network, and are able to run screen editors over Eth-
ernet and talk to distant TOPS-20 hosts with improved performance
in both cases.
+ + +

相关阅读

+ +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git "a/2021/07/29/mysql-5-7-in-\347\232\204\344\274\230\345\214\226\345\274\225\350\265\267\347\232\204bug/index.html" "b/2021/07/29/mysql-5-7-in-\347\232\204\344\274\230\345\214\226\345\274\225\350\265\267\347\232\204bug/index.html" new file mode 100644 index 0000000000..0794f3f45e --- /dev/null +++ "b/2021/07/29/mysql-5-7-in-\347\232\204\344\274\230\345\214\226\345\274\225\350\265\267\347\232\204bug/index.html" @@ -0,0 +1,481 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + mysql 5.7 in 的优化引起的bug | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ mysql 5.7 in 的优化引起的bug +

+ + +
+ + + + +
+ + +

起因

有个1700w的表需要初始化 , 然后我们需要分批取id范围是[1 , 1000) , [1000 , 2000)的值

+

问题

很简单的sql

+
1
2
update  test set    a.value=1 where id in ( 1 , 2 , 7 , 9.... 1000);
update test set a.value=1 where id in ( 1001 , 1002 , 1005 , ... 2000);
+

这里的id大概有100个左右 ,id是单调递增,基本连续

+

测试环境很正常,非常快 , 通过这个sql , 我们可以一秒update 1w以上的行

+

但是生产环境这个update特别特别慢,update 1000 行 大概需要 50s以上

+

排查

    +
  • 定位
    经过很多尝试,
    定位到是update这个sql特别慢,而且是但是测试环境非常快,生产环境非常慢
  • +
+

尝试explain

+
1
2
explain 
update test as a set a.value=1 where id in ( 1 , 2 , 7 , 9....);
+

生产环境下是这样:

+
1
Using where; Using temporary
+

但是测试环境是:

+
1
Using where
+

开始搜索,找到了类似的原因:
https://bugs.mysql.com/bug.php?id=80424
对比了一下版本:
生产环境:5.7.9-log
测试环境:5.7.22-log

+

确定binlog的记录形式:

+
1
2
SELECT @@binlog_row_image

+

结果是

+
1
FULL
+ +

这个bug被5.7.15以上修复,所以测试环境没有问题,生产环境有问题

+

解决

因为生产版本的mysql几乎没有升级的可能,这个批量的刷数据如果10条/s估计要刷一个星期,所以我们尝试了很多写法避免这个优化,最后使用了这个写法避免 生产版本的mysql的bug
不使用in 而是使用join 防止这个优化器的bug

+
1
2
3
4
5
DESC
UPDATE `test` a JOIN (
SELECT id FROM test t WHERE `id` IN (516,517,518,519,520,521,522,523,524,525,526,527,528,529,530,531,533,532)
) t ON a.id = t.id
SET a.isvisible = -1;
+ +

优化后不用temp了

+
1
2
3
"id"	"select_type"	"table"	"partitions"	"type"	   "possible_keys"	"key"	"key_len"	"ref"	"rows"	"filtered"	"Extra"
"1" "UPDATE" "a" \N "range" "PRIMARY" "PRIMARY" "4" \N "104" "100.00" "Using where"
"1" "SIMPLE" "b" \N "eq_ref" "PRIMARY" "PRIMARY" "4" "a.id" "1" "100.00" "Using index"
+

事后扒代码

通过https://bugs.mysql.com/bug.php?id=80424 提供的patch大概定位到原因

+

为什么会使用temp表?

+

第六个参数是判断是否需要使用temp的 ,也就是 !using_filesort && (used_key_is_modified || order)

+
1
2
3
4
5

Modification_plan plan(thd, MT_UPDATE, &qep_tab,
used_index, limit,
(!using_filesort && (used_key_is_modified || order)),
using_filesort, used_key_is_modified, rows);
+

查看Modification_plan这个类的定义:

+
1
2
3
4
5
Modification_plan(THD *thd_arg,
enum_mod_type mt, QEP_TAB *qep_tab,
uint key_arg, ha_rows limit_arg, bool need_tmp_table_arg,
bool need_sort_arg, bool used_key_is_modified_arg,
ha_rows rows);
+ +

在这个问题中是 used_key_is_modified = true, 所以会产生temp表

+

相关阅读:

+ + +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git "a/2021/08/09/redis-\344\270\273\344\273\216\345\210\207\346\215\242\345\222\214\351\253\230\345\217\257\347\224\250/index.html" "b/2021/08/09/redis-\344\270\273\344\273\216\345\210\207\346\215\242\345\222\214\351\253\230\345\217\257\347\224\250/index.html" new file mode 100644 index 0000000000..ea6e9b2101 --- /dev/null +++ "b/2021/08/09/redis-\344\270\273\344\273\216\345\210\207\346\215\242\345\222\214\351\253\230\345\217\257\347\224\250/index.html" @@ -0,0 +1,439 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + redis 主从切换和高可用 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ redis 主从切换和高可用 +

+ + +
+ + + + +
+ + +

相关阅读

+ + +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/2021/09/25/redis/index.html b/2021/09/25/redis/index.html new file mode 100644 index 0000000000..6ddbd8f81d --- /dev/null +++ b/2021/09/25/redis/index.html @@ -0,0 +1,476 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + redis | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ redis +

+ + +
+ + + + +
+ + +

1 类型
string
list
set
zset
hash

+

2 对应的命令

+

set 命令 edis/src/t_string.c
string 的过期不是实时的

+

list lpush lpop / 有ziplist 和双向链表组成

+

hash 有两个hashtable , 一个是内容一个谁拿来扩容

+

set 谁由intset 和 hashtable组成

+

zset ziplist+ skiplist

+

3 aof/rdb

+

aof 谁append only file 都是可读的文本 rdb谁整个盘快照

+

4 redis 淘汰

+
noeviction:如果缓存数据超过了maxmemory限定值,并且客户端正在执行的命令(大部分的写入指令,但DEL和几个指令例外)会导致内存分配,则向客户端返回错误响应
+allkeys-lru: 对所有的键都采取LRU淘汰
+volatile-lru: 仅对设置了过期时间的键采取LRU淘汰
+allkeys-random: 随机回收所有的键
+volatile-random: 随机回收设置过期时间的键
+volatile-ttl: 仅淘汰设置了过期时间的键---淘汰生存时间TTL(Time To Live)更小的键
+
+

5 HA

Redis Cluster master-replica model

+

In order to remain available when a subset of master nodes are failing or are not able to communicate with the majority of nodes, Redis Cluster uses a master-replica model where every hash slot has from 1 (the master itself) to N replicas (N-1 additional replica nodes).

+

In our example cluster with nodes A, B, C, if node B fails the cluster is not able to continue, since we no longer have a way to serve hash slots in the range 5501-11000.

+

However when the cluster is created (or at a later time) we add a replica node to every master, so that the final cluster is composed of A, B, C that are master nodes, and A1, B1, C1 that are replica nodes. This way, the system is able to continue if node B fails.

+

Node B1 replicates B, and B fails, the cluster will promote node B1 as the new master and will continue to operate correctly.

+

However, note that if nodes B and B1 fail at the same time, Redis Cluster is not able to continue to operate.

+

failover 错误转移

当master 失去连接后,slave会向master 发起一个paxos 选票

+

分片

分片会路由到不同胡slot,运算方式要crc16(key)% 16384

+

每个分片会有特定slot

+

一致性

不是强一致性,

+

Redis Cluster is not able to guarantee strong consistency. In practical terms this means that under certain conditions it is possible that Redis Cluster will lose writes that were acknowledged by the system to the client.

+

The first reason why Redis Cluster can lose writes is because it uses asynchronous replication. This means that during writes the following happens:

+
Your client writes to the master B.
+The master B replies OK to your client.
+The master B propagates the write to its replicas B1, B2 and B3.
+
+

因为是异步的

+

缓存穿透 雪崩

穿透是指通过redis
雪崩就是指同时失效

+

限流

https://segmentfault.com/a/1190000040570911

+ +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/2021/09/26/redis-cluster/index.html b/2021/09/26/redis-cluster/index.html new file mode 100644 index 0000000000..34cc968d81 --- /dev/null +++ b/2021/09/26/redis-cluster/index.html @@ -0,0 +1,457 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + redis cluster | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ redis cluster +

+ + +
+ + + + +
+ + +

拉代码

+
1
2
3
4
5
6
7
8
git clone https://github.com/redis/redis.git
cd redis/
## 带上调试信息
make CFLAGS="-g -O0"
## 创建一个目录
mkdir rediscluster
mkdir 7000 7001 7002 7003 7004 7005

+

创建几个节点胡配置

+
1
2
3
4
5
6
7
8
9
tree
.
├── 7000
├── 7001
├── 7002
├── 7003
├── 7004
└── 7005

+ +

然后像这样启动6个:

+
1
src/redis-server rediscluster/7001/redis.conf
+ + +

主动下线是一个命令

+
1
CLUSTER FAILOVER
+ + +

相关阅读

+ +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git "a/2021/10/10/\346\227\266\351\227\264\350\275\256\347\256\227\346\263\225/index.html" "b/2021/10/10/\346\227\266\351\227\264\350\275\256\347\256\227\346\263\225/index.html" new file mode 100644 index 0000000000..30926555a7 --- /dev/null +++ "b/2021/10/10/\346\227\266\351\227\264\350\275\256\347\256\227\346\263\225/index.html" @@ -0,0 +1,442 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 时间轮算法 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ 时间轮算法 +

+ + +
+ + + + +
+ + +

Hashed and Hierarchical Timing Wheels:Data Structures for the Efficient Implementation of a Timer Facility

+

Conventional algorithms to implement an Operating
System timer module take O(n) time to start or main-
rain a timer, where n is the number of outstanding
timers: this is expensive for large n. This paper be-
gins by exploring the relationship between timer algo-
rithms, t i m e flow mechanisms used in discrete event
simulations, and sorting techniques. Next a timer
a l g o r i t h m for small timer intervals is presented t h a t
is similar to the timing wheel technique used in logic
sinmlators. By using a circular buffer or timing wheel,
it takes O(1) time to start, stop, and m a i n t a i n timers
within the range of the wheel.
T w o extensions for larger values of the interval are de-
scribed. In the first, the timer interval is hashed into
a slot on the timing wheel. In the second, a hierarchy
of timing wheels with different granularities is used to
span a greater range of intervals. T h e performance of
these two schemes and various implementation trade-
offs are discussed.
传统的操作系统定时器模块的算法复杂度是O(n) ,其中n是定时器的数量:当n很大的时候代价会非常昂贵 。
这篇文章开始探讨定时器算法和时间流机制在离散的事件模拟和排序技术方面的关系.下面的小间隔的定时器算法很类似使用逻辑模拟的时间轮.
通过使用环状缓冲或者时间轮,我们可以在定时器的可维持运行的精度内使用O(1)的时间去开启,结束以及维持定时器
有两个额外的对于大的时间间隔的拓展.第一,定时器的间隔被哈希进去一个时间轮.第二,一个多层级的时间轮保证大于时间间隔的也能有位置存放.
下面会讨论这两张情况和不同实现的平衡.

+

Our model of a timer module has four component
routines:
START_TIMER(Interval, Request_ID, Expiry_
Action): The client calls this routine to start a timer
that will expire after “Interval” units of time. The
client supplies a Request_ID which is used to distinguish this timer from other timers that the client has
outstanding. Finally, the client can specify what action must be taken on expiry: for instance, calling a
client-specified routine, or setting an event flag.
STOP_TIMER(Request_ID): This routine uses its
knowledge of the client and Request_ID to locate the
timer and stop it.
PER_TICK_BOOKKEEPING: Let the granularity of
the timer-be T units. Then every T units this routine
checks whether any outstanding timers have expired;
if so, it calls STOP_TIMER, which in turn calls the
next routine.
EXPIRY_PROCESSING: This routine does the Expiry_Action specified in the START_TIMER call.
The first two routines are activated on client calls
while the last two are invoked on timer ticks. The
timer is often an external hardware clock.
The following two performance measures can be used
to choose between the various algorithms described
in the rest of this paper. Both of them are parameterized by n, the average (or worst-case) number of
outstanding timers.
我们的定时器模块有四个组件模块例程:
START_TIMER(Interval, Request_ID, Expiry_Action): 客户端会调用这个例程去启动(注册)一个会在Interval 时间后会过期的定时器.

+

相关阅读

+ +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/2021/10/15/rabbitmq-ack-reject/index.html b/2021/10/15/rabbitmq-ack-reject/index.html new file mode 100644 index 0000000000..2549d92c8a --- /dev/null +++ b/2021/10/15/rabbitmq-ack-reject/index.html @@ -0,0 +1,445 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + rabbitmq ack reject | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + + + + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/2021/10/18/llvm/index.html b/2021/10/18/llvm/index.html new file mode 100644 index 0000000000..4d33ab9789 --- /dev/null +++ b/2021/10/18/llvm/index.html @@ -0,0 +1,440 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + llvm | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + + + + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git "a/2021/10/21/lucence\346\272\220\347\240\201\345\210\206\346\236\220/index.html" "b/2021/10/21/lucence\346\272\220\347\240\201\345\210\206\346\236\220/index.html" new file mode 100644 index 0000000000..4f7a5a7278 --- /dev/null +++ "b/2021/10/21/lucence\346\272\220\347\240\201\345\210\206\346\236\220/index.html" @@ -0,0 +1,690 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + lucene源码分析 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ lucene源码分析 +

+ + +
+ + + + +
+ + +

lucene 分为两部分:

+
    +
  • 写入
    写入则是写入文件系统

    +
  • +
  • 查询
    则是通过了 分词、排序、topk提取等过程,获取对应的docid,再通过docid 回查对应的内容

    +
  • +
+

Vint

vint 是一个可变长的数组,是一个小端的变长数组,每个字节最高位置1代表后面还有(也就是最后一个字节的最高位是0)

+

相关代码

1
2
IndexWriterConfig iwc = new IndexWriterConfig(analyzer);
iwc.setUseCompoundFile(false); // 生成多个文件
+ +

开始debug

1
2
3
4
5
6
### 调试java 代码

java -agentlib:jdwp=transport=dt_socket,server=y,address=8000 -cp ./lucene-demo-9.1.0-SNAPSHOT.jar:/home/ubuntu/lucene-9.1.0/lucene/core/build/libs/lucene-core-9.1.0-SNAPSHOT.jar:/home/ubuntu/lucene-9.1.0/lucene/queryparser/build/libs/lucene-queryparser-9.1.0-SNAPSHOT.jar org.apache.lucene.demo.SearchFiles

### jdb 连接上jdk
jdb -attach 8000 -sourcepath /home/ubuntu/lucene-9.1.0/lucene/demo/src/java/
+ +

查看fdt文件

1
2
3
4
5
6
7
8
9
10
11
12
hexdump -C _0.fdt
00000000 3f d7 6c 17 1c 4c 75 63 65 6e 65 39 30 53 74 6f |?.l..Lucene90Sto|
00000010 72 65 64 46 69 65 6c 64 73 46 61 73 74 44 61 74 |redFieldsFastDat|
00000020 61 00 00 00 01 85 88 12 2b 0c 73 6b 95 30 38 76 |a.......+.sk.08v|
00000030 c9 0a 2a 52 29 00 00 0a 00 01 00 1c 02 06 03 07 |..*R)...........|
00000040 07 07 07 07 07 07 07 07 20 00 1a 60 2f 68 6f 6d |........ ..`/hom|
00000050 65 2f 60 75 62 75 6e 74 75 60 2f 64 6f 63 2f 6d |e/`ubuntu`/doc/m|
00000060 60 6f 6e 67 6f 2e 74 60 78 74 00 1a 2f 68 60 6f |`ongo.t`xt../h`o|
00000070 6d 65 2f 75 62 60 75 6e 74 75 2f 64 60 6f 63 2f |me/ub`untu/d`oc/|
00000080 68 65 6c 60 6c 6f 2e 74 78 74 c0 28 93 e8 00 00 |hel`lo.txt.(....|
00000090 00 00 00 00 00 00 c8 75 0a 41 |.......u.A|
0000009a
+

writeField

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
ubuntu@VM-0-3-ubuntu:~$ jdb -attach 8000 -sourcepath /home/ubuntu/lucene-9.1.0/lucene/demo/src/java/:/home/ubuntu/lucene-9.1.0/lucene/core/src/java/ 
Set uncaught java.lang.Throwable
Set deferred uncaught java.lang.Throwable
Initializing jdb ...
>
VM Started: No frames on the current call stack

main[1] stop in org.apache.lucene.codecs.lucene90.compressing.Lucene90CompressingStoredFieldsWriter.writeField
Deferring breakpoint org.apache.lucene.codecs.lucene90.compressing.Lucene90CompressingStoredFieldsWriter.writeField.
It will be set after the class is loaded.
main[1] cont
> Set deferred breakpoint org.apache.lucene.codecs.lucene90.compressing.Lucene90CompressingStoredFieldsWriter.writeField

Breakpoint hit: "thread=main", org.apache.lucene.codecs.lucene90.compressing.Lucene90CompressingStoredFieldsWriter.writeField(), line=276 bci=0
276 ++numStoredFieldsInDoc;

main[1] wheree^H^H
Unrecognized command: 'wher'. Try help...
main[1] where
[1] org.apache.lucene.codecs.lucene90.compressing.Lucene90CompressingStoredFieldsWriter.writeField (Lucene90CompressingStoredFieldsWriter.java:276)
[2] org.apache.lucene.index.StoredFieldsConsumer.writeField (StoredFieldsConsumer.java:65)
[3] org.apache.lucene.index.IndexingChain.processField (IndexingChain.java:749)
[4] org.apache.lucene.index.IndexingChain.processDocument (IndexingChain.java:620)
[5] org.apache.lucene.index.DocumentsWriterPerThread.updateDocuments (DocumentsWriterPerThread.java:241)
[6] org.apache.lucene.index.DocumentsWriter.updateDocuments (DocumentsWriter.java:432)
[7] org.apache.lucene.index.IndexWriter.updateDocuments (IndexWriter.java:1,531)
[8] org.apache.lucene.index.IndexWriter.updateDocument (IndexWriter.java:1,816)
[9] org.apache.lucene.index.IndexWriter.addDocument (IndexWriter.java:1,469)
[10] org.apache.lucene.demo.IndexFiles.indexDoc (IndexFiles.java:271)
[11] org.apache.lucene.demo.IndexFiles$1.visitFile (IndexFiles.java:212)
[12] org.apache.lucene.demo.IndexFiles$1.visitFile (IndexFiles.java:208)
[13] java.nio.file.Files.walkFileTree (Files.java:2,725)
[14] java.nio.file.Files.walkFileTree (Files.java:2,797)
[15] org.apache.lucene.demo.IndexFiles.indexDocs (IndexFiles.java:206)
[16] org.apache.lucene.demo.IndexFiles.main (IndexFiles.java:157)
main[1] list
272
273 @Override
274 public void writeField(FieldInfo info, IndexableField field) throws IOException {
275
276 => ++numStoredFieldsInDoc;
277
278 int bits = 0;
279 final BytesRef bytes;
280 final String string;
281
main[1] print field
field = "stored,indexed,omitNorms,indexOptions=DOCS<path:/home/ubuntu/doc/mongo.txt>"
main[1] print info
info = "org.apache.lucene.index.FieldInfo@32464a14"

+ +

分词和倒排索引

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
main[1] where
[1] org.apache.lucene.index.IndexingChain$PerField.invert (IndexingChain.java:1,138)
[2] org.apache.lucene.index.IndexingChain.processField (IndexingChain.java:729)
[3] org.apache.lucene.index.IndexingChain.processDocument (IndexingChain.java:620)
[4] org.apache.lucene.index.DocumentsWriterPerThread.updateDocuments (DocumentsWriterPerThread.java:241)
[5] org.apache.lucene.index.DocumentsWriter.updateDocuments (DocumentsWriter.java:432)
[6] org.apache.lucene.index.IndexWriter.updateDocuments (IndexWriter.java:1,531)
[7] org.apache.lucene.index.IndexWriter.updateDocument (IndexWriter.java:1,816)
[8] org.apache.lucene.demo.IndexFiles.indexDoc (IndexFiles.java:277)
[9] org.apache.lucene.demo.IndexFiles$1.visitFile (IndexFiles.java:212)
[10] org.apache.lucene.demo.IndexFiles$1.visitFile (IndexFiles.java:208)
[11] java.nio.file.Files.walkFileTree (Files.java:2,725)
[12] java.nio.file.Files.walkFileTree (Files.java:2,797)
[13] org.apache.lucene.demo.IndexFiles.indexDocs (IndexFiles.java:206)
[14] org.apache.lucene.demo.IndexFiles.main (IndexFiles.java:157)
+ +

term描述

1
2
3
IntBlockPool intPool,
ByteBlockPool bytePool,
ByteBlockPool termBytePool,
+ +

倒排索引term 在内存中用以下内容描述:
intPool 包含三个变量:

+
    +
  • 二维数组buffers[][]
  • +
  • int bufferUpto 描述的是二维数组 buffers[][]的第一级的偏移 , 一般都是这样用 int[] buff = buffers[bufferUpto + offset]
  • +
  • int intUpto 描述的是整体的偏移量,描述是偏移所有的buffers 的字节数
  • +
  • int intOffset 描述的是header buffer的偏移量
  • +
+

那么buffers[xxx][yyy]的值又是什么呢?
这个buffers二维数组存的也是偏移量.是什么的偏移量呢?

+

intPool描述的是bytePooltermBytePool 的偏移量

+

term 写入tim文件

会将term一个个写入

+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
main[1] where 
[1] org.apache.lucene.codecs.lucene90.blocktree.Lucene90BlockTreeTermsWriter$TermsWriter.writeBlock (Lucene90BlockTreeTermsWriter.java:963)
[2] org.apache.lucene.codecs.lucene90.blocktree.Lucene90BlockTreeTermsWriter$TermsWriter.writeBlocks (Lucene90BlockTreeTermsWriter.java:709)
[3] org.apache.lucene.codecs.lucene90.blocktree.Lucene90BlockTreeTermsWriter$TermsWriter.finish (Lucene90BlockTreeTermsWriter.java:1,105)
[4] org.apache.lucene.codecs.lucene90.blocktree.Lucene90BlockTreeTermsWriter.write (Lucene90BlockTreeTermsWriter.java:370)
[5] org.apache.lucene.codecs.perfield.PerFieldPostingsFormat$FieldsWriter.write (PerFieldPostingsFormat.java:171)
[6] org.apache.lucene.index.FreqProxTermsWriter.flush (FreqProxTermsWriter.java:131)
[7] org.apache.lucene.index.IndexingChain.flush (IndexingChain.java:300)
[8] org.apache.lucene.index.DocumentsWriterPerThread.flush (DocumentsWriterPerThread.java:391)
[9] org.apache.lucene.index.DocumentsWriter.doFlush (DocumentsWriter.java:493)
[10] org.apache.lucene.index.DocumentsWriter.flushAllThreads (DocumentsWriter.java:672)
[11] org.apache.lucene.index.IndexWriter.doFlush (IndexWriter.java:4,014)
[12] org.apache.lucene.index.IndexWriter.flush (IndexWriter.java:3,988)
[13] org.apache.lucene.index.IndexWriter.shutdown (IndexWriter.java:1,321)
[14] org.apache.lucene.index.IndexWriter.close (IndexWriter.java:1,361)
[15] org.apache.lucene.demo.IndexFiles.main (IndexFiles.java:166)

+ + +

查询

1
2
3
4
5
6
7
8
9
10
main[1] where
[1] org.apache.lucene.search.TopScoreDocCollector$SimpleTopScoreDocCollector.getLeafCollector (TopScoreDocCollector.java:57)
[2] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:759)
[3] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:693)
[4] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:687)
[5] org.apache.lucene.search.IndexSearcher.searchAfter (IndexSearcher.java:532)
[6] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:542)
[7] org.apache.lucene.demo.SearchFiles.doPagingSearch (SearchFiles.java:180)
[8] org.apache.lucene.demo.SearchFiles.main (SearchFiles.java:150)

+ +

获取term

从terms reader 读取term

+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
main[1] print fieldMap.get(field)
fieldMap.get(field) = "BlockTreeTerms(seg=_j terms=18,postings=20,positions=25,docs=2)"
main[1] where
[1] org.apache.lucene.codecs.lucene90.blocktree.Lucene90BlockTreeTermsReader.terms (Lucene90BlockTreeTermsReader.java:294)
[2] org.apache.lucene.codecs.perfield.PerFieldPostingsFormat$FieldsReader.terms (PerFieldPostingsFormat.java:353)
[3] org.apache.lucene.index.CodecReader.terms (CodecReader.java:114)
[4] org.apache.lucene.index.Terms.getTerms (Terms.java:41)
[5] org.apache.lucene.index.TermStates.loadTermsEnum (TermStates.java:115)
[6] org.apache.lucene.index.TermStates.build (TermStates.java:102)
[7] org.apache.lucene.search.TermQuery.createWeight (TermQuery.java:227)
[8] org.apache.lucene.search.IndexSearcher.createWeight (IndexSearcher.java:885)
[9] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:686)
[10] org.apache.lucene.search.IndexSearcher.searchAfter (IndexSearcher.java:532)
[11] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:542)
[12] org.apache.lucene.demo.SearchFiles.doPagingSearch (SearchFiles.java:180)
[13] org.apache.lucene.demo.SearchFiles.main (SearchFiles.java:150)


+ + +

通过arc 获取对应output

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
Breakpoint hit: "thread=main", org.apache.lucene.util.fst.FST.findTargetArc(), line=1,412 bci=0
1,412 if (labelToMatch == END_LABEL) {

main[1] where
[1] org.apache.lucene.util.fst.FST.findTargetArc (FST.java:1,412)
[2] org.apache.lucene.codecs.lucene90.blocktree.SegmentTermsEnum.seekExact (SegmentTermsEnum.java:511)
[3] org.apache.lucene.index.TermStates.loadTermsEnum (TermStates.java:117)
[4] org.apache.lucene.index.TermStates.build (TermStates.java:102)
[5] org.apache.lucene.search.TermQuery.createWeight (TermQuery.java:227)
[6] org.apache.lucene.search.IndexSearcher.createWeight (IndexSearcher.java:885)
[7] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:686)
[8] org.apache.lucene.search.IndexSearcher.searchAfter (IndexSearcher.java:532)
[9] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:542)
[10] org.apache.lucene.demo.SearchFiles.doPagingSearch (SearchFiles.java:180)
[11] org.apache.lucene.demo.SearchFiles.main (SearchFiles.java:150)

+ +

打开tim文件

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
main[1] where
[1] org.apache.lucene.codecs.lucene90.blocktree.Lucene90BlockTreeTermsReader.<init> (Lucene90BlockTreeTermsReader.java:135)
[2] org.apache.lucene.codecs.lucene90.Lucene90PostingsFormat.fieldsProducer (Lucene90PostingsFormat.java:427)
[3] org.apache.lucene.codecs.perfield.PerFieldPostingsFormat$FieldsReader.<init> (PerFieldPostingsFormat.java:329)
[4] org.apache.lucene.codecs.perfield.PerFieldPostingsFormat.fieldsProducer (PerFieldPostingsFormat.java:391)
[5] org.apache.lucene.index.SegmentCoreReaders.<init> (SegmentCoreReaders.java:118)
[6] org.apache.lucene.index.SegmentReader.<init> (SegmentReader.java:91)
[7] org.apache.lucene.index.StandardDirectoryReader$1.doBody (StandardDirectoryReader.java:94)
[8] org.apache.lucene.index.StandardDirectoryReader$1.doBody (StandardDirectoryReader.java:77)
[9] org.apache.lucene.index.SegmentInfos$FindSegmentsFile.run (SegmentInfos.java:809)
[10] org.apache.lucene.index.StandardDirectoryReader.open (StandardDirectoryReader.java:109)
[11] org.apache.lucene.index.StandardDirectoryReader.open (StandardDirectoryReader.java:67)
[12] org.apache.lucene.index.DirectoryReader.open (DirectoryReader.java:60)
[13] org.apache.lucene.demo.SearchFiles.main (SearchFiles.java:105)

+ +

获取topk的数据核心函数mergeAux,一个辅助函数获取topk的内容

+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
Step completed: "thread=main", org.apache.lucene.search.TopDocs.mergeAux(), line=291 bci=43
291 for (int shardIDX = 0; shardIDX < shardHits.length; shardIDX++) {

main[1] where
[1] org.apache.lucene.search.TopDocs.mergeAux (TopDocs.java:291)
[2] org.apache.lucene.search.TopDocs.merge (TopDocs.java:216)
[3] org.apache.lucene.search.IndexSearcher$2.reduce (IndexSearcher.java:528)
[4] org.apache.lucene.search.IndexSearcher$2.reduce (IndexSearcher.java:505)
[5] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:694)
[6] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:687)
[7] org.apache.lucene.search.IndexSearcher.searchAfter (IndexSearcher.java:532)
[8] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:542)
[9] org.apache.lucene.demo.SearchFiles.doPagingSearch (SearchFiles.java:180)
[10] org.apache.lucene.demo.SearchFiles.main (SearchFiles.java:150)

+ + + + + + +

docid 获取对应的文案内容

通过docid 获取document

1
2
3
4
5
6
7
8
9
10
11
main[1] where
[1] org.apache.lucene.store.ByteBufferIndexInput$SingleBufferImpl.seek (ByteBufferIndexInput.java:529)
[2] org.apache.lucene.codecs.lucene90.compressing.Lucene90CompressingStoredFieldsReader$BlockState.document (Lucene90CompressingStoredFieldsReader.java:594)
[3] org.apache.lucene.codecs.lucene90.compressing.Lucene90CompressingStoredFieldsReader.document (Lucene90CompressingStoredFieldsReader.java:610)
[4] org.apache.lucene.codecs.lucene90.compressing.Lucene90CompressingStoredFieldsReader.visitDocument (Lucene90CompressingStoredFieldsReader.java:628)
[5] org.apache.lucene.index.CodecReader.document (CodecReader.java:89)
[6] org.apache.lucene.index.BaseCompositeReader.document (BaseCompositeReader.java:154)
[7] org.apache.lucene.index.IndexReader.document (IndexReader.java:380)
[8] org.apache.lucene.search.IndexSearcher.doc (IndexSearcher.java:380)
[9] org.apache.lucene.demo.SearchFiles.doPagingSearch (SearchFiles.java:214)
[10] org.apache.lucene.demo.SearchFiles.main (SearchFiles.java:150)
+

seek 方法通过偏移获取document,其中seek 中curBufjava.nio.DirectByteBufferR

+
1
2
3
4
5
6
7
8
9
10
11
12
13
525    
526 @Override
527 public void seek(long pos) throws IOException {
528 try {
529 => curBuf.position((int) pos);
530 } catch (IllegalArgumentException e) {
531 if (pos < 0) {
532 throw new IllegalArgumentException("Seeking to negative position: " + this, e);
533 } else {
534 throw new EOFException("seek past EOF: " + this);
main[1] print curBuf
curBuf = "java.nio.DirectByteBufferR[pos=60 lim=154 cap=154]"

+ + +
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
main[1] list
168
169 // NOTE: AIOOBE not EOF if you read too much
170 @Override
171 public void readBytes(byte[] b, int offset, int len) {
172 => System.arraycopy(bytes, pos, b, offset, len);
173 pos += len;
174 }
175 }
main[1] where
[1] org.apache.lucene.store.ByteArrayDataInput.readBytes (ByteArrayDataInput.java:172)
[2] org.apache.lucene.store.DataInput.readString (DataInput.java:265)
[3] org.apache.lucene.codecs.lucene90.compressing.Lucene90CompressingStoredFieldsReader.readField (Lucene90CompressingStoredFieldsReader.java:246)
[4] org.apache.lucene.codecs.lucene90.compressing.Lucene90CompressingStoredFieldsReader.visitDocument (Lucene90CompressingStoredFieldsReader.java:640)
[5] org.apache.lucene.index.CodecReader.document (CodecReader.java:89)
[6] org.apache.lucene.index.BaseCompositeReader.document (BaseCompositeReader.java:154)
[7] org.apache.lucene.index.IndexReader.document (IndexReader.java:380)
[8] org.apache.lucene.search.IndexSearcher.doc (IndexSearcher.java:380)
[9] org.apache.lucene.demo.SearchFiles.doPagingSearch (SearchFiles.java:214)
[10] org.apache.lucene.demo.SearchFiles.main (SearchFiles.java:150)
+ + +

通过堆外内存加载文件数据

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
Breakpoint hit: "thread=main", org.apache.lucene.store.ByteBufferIndexInput.setCurBuf(), line=83 bci=0
83 this.curBuf = curBuf;

main[1] where
[1] org.apache.lucene.store.ByteBufferIndexInput.setCurBuf (ByteBufferIndexInput.java:83)
[2] org.apache.lucene.store.ByteBufferIndexInput$SingleBufferImpl.<init> (ByteBufferIndexInput.java:520)
[3] org.apache.lucene.store.ByteBufferIndexInput.newInstance (ByteBufferIndexInput.java:60)
[4] org.apache.lucene.store.MMapDirectory.openInput (MMapDirectory.java:238)
[5] org.apache.lucene.store.Directory.openChecksumInput (Directory.java:152)
[6] org.apache.lucene.index.SegmentInfos.readCommit (SegmentInfos.java:297)
[7] org.apache.lucene.index.StandardDirectoryReader$1.doBody (StandardDirectoryReader.java:88)
[8] org.apache.lucene.index.StandardDirectoryReader$1.doBody (StandardDirectoryReader.java:77)
[9] org.apache.lucene.index.SegmentInfos$FindSegmentsFile.run (SegmentInfos.java:809)
[10] org.apache.lucene.index.StandardDirectoryReader.open (StandardDirectoryReader.java:109)
[11] org.apache.lucene.index.StandardDirectoryReader.open (StandardDirectoryReader.java:67)
[12] org.apache.lucene.index.DirectoryReader.open (DirectoryReader.java:60)
[13] org.apache.lucene.demo.SearchFiles.main (SearchFiles.java:105)

+ + +

filechannel 的 map

java 对应的类

+
1
2
3
4
src\java.base\share\classes\sun\nio\ch\FileChannelImpl.java
// Creates a new mapping
private native long map0(int prot, long position, long length, boolean isSync)
throws IOException;
+ +

native 对应的c实现类

+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
src\java.base\unix\native\libnio\ch\FileChannelImpl.c
JNIEXPORT jlong JNICALL
Java_sun_nio_ch_FileChannelImpl_map0(JNIEnv *env, jobject this,
jint prot, jlong off, jlong len, jboolean map_sync)
{
void *mapAddress = 0;
jobject fdo = (*env)->GetObjectField(env, this, chan_fd);
jint fd = fdval(env, fdo);
int protections = 0;
int flags = 0;

// should never be called with map_sync and prot == PRIVATE
assert((prot != sun_nio_ch_FileChannelImpl_MAP_PV) || !map_sync);

if (prot == sun_nio_ch_FileChannelImpl_MAP_RO) {
protections = PROT_READ;
flags = MAP_SHARED;
} else if (prot == sun_nio_ch_FileChannelImpl_MAP_RW) {
protections = PROT_WRITE | PROT_READ;
flags = MAP_SHARED;
} else if (prot == sun_nio_ch_FileChannelImpl_MAP_PV) {
protections = PROT_WRITE | PROT_READ;
flags = MAP_PRIVATE;
}

// if MAP_SYNC and MAP_SHARED_VALIDATE are not defined then it is
// best to define them here. This ensures the code compiles on old
// OS releases which do not provide the relevant headers. If run
// on the same machine then it will work if the kernel contains
// the necessary support otherwise mmap should fail with an
// invalid argument error

#ifndef MAP_SYNC
#define MAP_SYNC 0x80000
#endif
#ifndef MAP_SHARED_VALIDATE
#define MAP_SHARED_VALIDATE 0x03
#endif

if (map_sync) {
// ensure
// 1) this is Linux on AArch64, x86_64, or PPC64 LE
// 2) the mmap APIs are available at compile time
#if !defined(LINUX) || ! (defined(aarch64) || (defined(amd64) && defined(_LP64)) || defined(ppc64le))
// TODO - implement for solaris/AIX/BSD/WINDOWS and for 32 bit
JNU_ThrowInternalError(env, "should never call map on platform where MAP_SYNC is unimplemented");
return IOS_THROWN;
#else
flags |= MAP_SYNC | MAP_SHARED_VALIDATE;
#endif
}

mapAddress = mmap64(
0, /* Let OS decide location */
len, /* Number of bytes to map */
protections, /* File permissions */
flags, /* Changes are shared */
fd, /* File descriptor of mapped file */
off); /* Offset into file */

if (mapAddress == MAP_FAILED) {
if (map_sync && errno == ENOTSUP) {
JNU_ThrowIOExceptionWithLastError(env, "map with mode MAP_SYNC unsupported");
return IOS_THROWN;
}

if (errno == ENOMEM) {
JNU_ThrowOutOfMemoryError(env, "Map failed");
return IOS_THROWN;
}
return handle(env, -1, "Map failed");
}

return ((jlong) (unsigned long) mapAddress);
}

+ +

mmap 映射文件读取硬盘中的内容

FileChannel.open 底层是一个native方法,如果是linux系统,就是mmap64

+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
main[1] list
228
229 /** Creates an IndexInput for the file with the given name. */
230 @Override
231 public IndexInput openInput(String name, IOContext context) throws IOException {
232 => ensureOpen();
233 ensureCanRead(name);
234 Path path = directory.resolve(name);
235 try (FileChannel c = FileChannel.open(path, StandardOpenOption.READ)) {
236 final String resourceDescription = "MMapIndexInput(path=\"" + path.toString() + "\")";
237 final boolean useUnmap = getUseUnmap();
main[1] print name
name = "_j.fnm"
main[1] where
[1] org.apache.lucene.store.MMapDirectory.openInput (MMapDirectory.java:232)
[2] org.apache.lucene.store.Directory.openChecksumInput (Directory.java:152)
[3] org.apache.lucene.codecs.lucene90.Lucene90FieldInfosFormat.read (Lucene90FieldInfosFormat.java:124)
[4] org.apache.lucene.index.SegmentCoreReaders.<init> (SegmentCoreReaders.java:111)
[5] org.apache.lucene.index.SegmentReader.<init> (SegmentReader.java:91)
[6] org.apache.lucene.index.StandardDirectoryReader$1.doBody (StandardDirectoryReader.java:94)
[7] org.apache.lucene.index.StandardDirectoryReader$1.doBody (StandardDirectoryReader.java:77)
[8] org.apache.lucene.index.SegmentInfos$FindSegmentsFile.run (SegmentInfos.java:809)
[9] org.apache.lucene.index.StandardDirectoryReader.open (StandardDirectoryReader.java:109)
[10] org.apache.lucene.index.StandardDirectoryReader.open (StandardDirectoryReader.java:67)
[11] org.apache.lucene.index.DirectoryReader.open (DirectoryReader.java:60)
[12] org.apache.lucene.demo.SearchFiles.main (SearchFiles.java:105)
+ + + +

读取mmap后的数据

mmap之后的buf在哪里会被用到呢?
和普通的文件读写类似,也就是seek后读字节

+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
lucene\core\src\java\org\apache\lucene\store\ByteBufferIndexInput.java
@Override
public final void readBytes(byte[] b, int offset, int len) throws IOException {
try {
guard.getBytes(curBuf, b, offset, len);
} catch (
@SuppressWarnings("unused")
BufferUnderflowException e) {
int curAvail = curBuf.remaining();
while (len > curAvail) {
guard.getBytes(curBuf, b, offset, curAvail);
len -= curAvail;
offset += curAvail;
curBufIndex++;
if (curBufIndex >= buffers.length) {
throw new EOFException("read past EOF: " + this);
}
setCurBuf(buffers[curBufIndex]);
curBuf.position(0);
curAvail = curBuf.remaining();
}
guard.getBytes(curBuf, b, offset, len);
} catch (
@SuppressWarnings("unused")
NullPointerException npe) {
throw new AlreadyClosedException("Already closed: " + this);
}
}
+

mmap后读取数据

+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
main[1] where
[1] jdk.internal.misc.Unsafe.copyMemory (Unsafe.java:782)
[2] java.nio.DirectByteBuffer.get (DirectByteBuffer.java:308)
[3] org.apache.lucene.store.ByteBufferGuard.getBytes (ByteBufferGuard.java:93)
[4] org.apache.lucene.store.ByteBufferIndexInput.readBytes (ByteBufferIndexInput.java:114)
[5] org.apache.lucene.store.BufferedChecksumIndexInput.readBytes (BufferedChecksumIndexInput.java:46)
[6] org.apache.lucene.store.DataInput.readString (DataInput.java:265)
[7] org.apache.lucene.codecs.CodecUtil.checkHeaderNoMagic (CodecUtil.java:202)
[8] org.apache.lucene.codecs.CodecUtil.checkHeader (CodecUtil.java:193)
[9] org.apache.lucene.codecs.CodecUtil.checkIndexHeader (CodecUtil.java:253)
[10] org.apache.lucene.codecs.lucene90.Lucene90FieldInfosFormat.read (Lucene90FieldInfosFormat.java:128)
[11] org.apache.lucene.index.SegmentCoreReaders.<init> (SegmentCoreReaders.java:111)
[12] org.apache.lucene.index.SegmentReader.<init> (SegmentReader.java:91)
[13] org.apache.lucene.index.StandardDirectoryReader$1.doBody (StandardDirectoryReader.java:94)
[14] org.apache.lucene.index.StandardDirectoryReader$1.doBody (StandardDirectoryReader.java:77)
[15] org.apache.lucene.index.SegmentInfos$FindSegmentsFile.run (SegmentInfos.java:809)
[16] org.apache.lucene.index.StandardDirectoryReader.open (StandardDirectoryReader.java:109)
[17] org.apache.lucene.index.StandardDirectoryReader.open (StandardDirectoryReader.java:67)
[18] org.apache.lucene.index.DirectoryReader.open (DirectoryReader.java:60)
[19] org.apache.lucene.demo.SearchFiles.main (SearchFiles.java:105)

+ +

文件格式介绍

.fnm 文件

格式出处

+

fnm 文件 由这几部分组成:

+
    +
  • Header
  • +
  • FieldsCount : 字段的个数
  • +
  • 数组,长度为FieldsCount , 数组中每个元素包含包含这几个字段: [FieldName: 字段名 ,FieldNumber:字段number ,FieldBits, DocValuesBits, DocValuesGen ,DimensionCount , DimensionNumBytes ]
  • +
  • Footer
  • +
+

fnm 描述的field的基础信息,也可以算是metadata信息

+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43


Field names are stored in the field info file, with suffix .fnm.

FieldInfos (.fnm) --> Header,FieldsCount, <FieldName,FieldNumber, FieldBits,DocValuesBits,DocValuesGen,Attributes,DimensionCount,DimensionNumBytes> ,Footer

Data types:

Header --> IndexHeader
FieldsCount --> VInt
FieldName --> String
FieldBits, IndexOptions, DocValuesBits --> Byte
FieldNumber, DimensionCount, DimensionNumBytes --> VInt
Attributes --> Map<String,String>
DocValuesGen --> Int64
Footer --> CodecFooter
Field Descriptions:
FieldsCount: the number of fields in this file.
FieldName: name of the field as a UTF-8 String.
FieldNumber: the field's number. Note that unlike previous versions of Lucene, the fields are not numbered implicitly by their order in the file, instead explicitly.
FieldBits: a byte containing field options.
The low order bit (0x1) is one for fields that have term vectors stored, and zero for fields without term vectors.
If the second lowest order-bit is set (0x2), norms are omitted for the indexed field.
If the third lowest-order bit is set (0x4), payloads are stored for the indexed field.
IndexOptions: a byte containing index options.
0: not indexed
1: indexed as DOCS_ONLY
2: indexed as DOCS_AND_FREQS
3: indexed as DOCS_AND_FREQS_AND_POSITIONS
4: indexed as DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS
DocValuesBits: a byte containing per-document value types. The type recorded as two four-bit integers, with the high-order bits representing norms options, and the low-order bits representing DocValues options. Each four-bit integer can be decoded as such:
0: no DocValues for this field.
1: NumericDocValues. (DocValuesType.NUMERIC)
2: BinaryDocValues. (DocValuesType#BINARY)
3: SortedDocValues. (DocValuesType#SORTED)
DocValuesGen is the generation count of the field's DocValues. If this is -1, there are no DocValues updates to that field. Anything above zero means there are updates stored by DocValuesFormat.
Attributes: a key-value map of codec-private attributes.
PointDimensionCount, PointNumBytes: these are non-zero only if the field is indexed as points, e.g. using LongPoint
VectorDimension: it is non-zero if the field is indexed as vectors.
VectorSimilarityFunction: a byte containing distance function used for similarity calculation.
0: EUCLIDEAN distance. (VectorSimilarityFunction.EUCLIDEAN)
1: DOT_PRODUCT similarity. (VectorSimilarityFunction.DOT_PRODUCT)
2: COSINE similarity. (VectorSimilarityFunction.COSINE)
+ + +

.fdt

文件路径: lucene\backward-codecs\src\java\org\apache\lucene\backward_codecs\lucene50\Lucene50CompoundFormat.java

+

没有找到90的版本的fdt格式,只有2.9.4的,将就使用fdt格式

+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
main[1] print fieldsStreamFN
fieldsStreamFN = "_j.fdt"
main[1] list
124 numDocs = si.maxDoc();
125
126 final String fieldsStreamFN =
127 IndexFileNames.segmentFileName(segment, segmentSuffix, FIELDS_EXTENSION);
128 => ChecksumIndexInput metaIn = null;
129 try {
130 // Open the data file
131 fieldsStream = d.openInput(fieldsStreamFN, context);
132 version =
133 CodecUtil.checkIndexHeader(
main[1] where
[1] org.apache.lucene.codecs.lucene90.compressing.Lucene90CompressingStoredFieldsReader.<init> (Lucene90CompressingStoredFieldsReader.java:128)
[2] org.apache.lucene.codecs.lucene90.compressing.Lucene90CompressingStoredFieldsFormat.fieldsReader (Lucene90CompressingStoredFieldsFormat.java:133)
[3] org.apache.lucene.codecs.lucene90.Lucene90StoredFieldsFormat.fieldsReader (Lucene90StoredFieldsFormat.java:136)
[4] org.apache.lucene.index.SegmentCoreReaders.<init> (SegmentCoreReaders.java:138)
[5] org.apache.lucene.index.SegmentReader.<init> (SegmentReader.java:91)
[6] org.apache.lucene.index.StandardDirectoryReader$1.doBody (StandardDirectoryReader.java:94)
[7] org.apache.lucene.index.StandardDirectoryReader$1.doBody (StandardDirectoryReader.java:77)
[8] org.apache.lucene.index.SegmentInfos$FindSegmentsFile.run (SegmentInfos.java:809)
[9] org.apache.lucene.index.StandardDirectoryReader.open (StandardDirectoryReader.java:109)
[10] org.apache.lucene.index.StandardDirectoryReader.open (StandardDirectoryReader.java:67)
[11] org.apache.lucene.index.DirectoryReader.open (DirectoryReader.java:60)
[12] org.apache.lucene.demo.SearchFiles.main (SearchFiles.java:105)
+ + +

加载doc的内容到Document 对象

整个流程是通过docid 获取document 的内容

+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22

@Override
public void visitDocument(int docID, StoredFieldVisitor visitor) throws IOException {

final SerializedDocument doc = document(docID); // 通过docID 获取doc对象

for (int fieldIDX = 0; fieldIDX < doc.numStoredFields; fieldIDX++) {
final long infoAndBits = doc.in.readVLong();
final int fieldNumber = (int) (infoAndBits >>> TYPE_BITS);
final FieldInfo fieldInfo = fieldInfos.fieldInfo(fieldNumber);

final int bits = (int) (infoAndBits & TYPE_MASK);
assert bits <= NUMERIC_DOUBLE : "bits=" + Integer.toHexString(bits);

switch (visitor.needsField(fieldInfo)) {
case YES:
readField(doc.in, visitor, fieldInfo, bits); // 通过input , 也就是input 绑定的fd ,去读mmap64 映射的文件 ,在这里会读取后缀名为 .fdt 的文件
break;
...
}
}
}
+ + +
1
2
3
4
5
6
7
8
9
10
11
12
main[1] where
[1] org.apache.lucene.document.Document.add (Document.java:60)
[2] org.apache.lucene.document.DocumentStoredFieldVisitor.stringField (DocumentStoredFieldVisitor.java:74)
[3] org.apache.lucene.codecs.lucene90.compressing.Lucene90CompressingStoredFieldsReader.readField (Lucene90CompressingStoredFieldsReader.java:246)
[4] org.apache.lucene.codecs.lucene90.compressing.Lucene90CompressingStoredFieldsReader.visitDocument (Lucene90CompressingStoredFieldsReader.java:640)
[5] org.apache.lucene.index.CodecReader.document (CodecReader.java:89)
[6] org.apache.lucene.index.BaseCompositeReader.document (BaseCompositeReader.java:154)
[7] org.apache.lucene.index.IndexReader.document (IndexReader.java:380)
[8] org.apache.lucene.search.IndexSearcher.doc (IndexSearcher.java:380)
[9] org.apache.lucene.demo.SearchFiles.doPagingSearch (SearchFiles.java:214)
[10] org.apache.lucene.demo.SearchFiles.main (SearchFiles.java:150)

+

通过docid 构建 SerializedDocument

首先入口在这里:

+
1
org.apache.lucene.codecs.lucene90.compressing.Lucene90CompressingStoredFieldsReader.document
+

Lucene90CompressingStoredFieldsReader的document 方法

+
1
2
3
4
5
6
7
8
9
10
main[1] where
[1] org.apache.lucene.store.ByteBufferIndexInput$SingleBufferImpl.seek (ByteBufferIndexInput.java:529)
[2] org.apache.lucene.codecs.lucene90.compressing.Lucene90CompressingStoredFieldsReader.document (Lucene90CompressingStoredFieldsReader.java:606)
[3] org.apache.lucene.codecs.lucene90.compressing.Lucene90CompressingStoredFieldsReader.visitDocument (Lucene90CompressingStoredFieldsReader.java:628)
[4] org.apache.lucene.index.CodecReader.document (CodecReader.java:89)
[5] org.apache.lucene.index.BaseCompositeReader.document (BaseCompositeReader.java:154)
[6] org.apache.lucene.index.IndexReader.document (IndexReader.java:380)
[7] org.apache.lucene.search.IndexSearcher.doc (IndexSearcher.java:380)
[8] org.apache.lucene.demo.SearchFiles.doPagingSearch (SearchFiles.java:214)
[9] org.apache.lucene.demo.SearchFiles.main (SearchFiles.java:150)
+ +
1
2
3
4
5
6
7
8
SerializedDocument document(int docID) throws IOException {
if (state.contains(docID) == false) {
fieldsStream.seek(indexReader.getStartPointer(docID)); // 通过mmap64 偏移
state.reset(docID);
}
assert state.contains(docID);
return state.document(docID); // 再看具体的实现 , 这个state 对象对应的类是一个静态内部类
}
+ +

下面看看静态内部类的实现

+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
  /**
* Get the serialized representation of the given docID. This docID has to be contained in the
* current block.
*/
SerializedDocument document(int docID) throws IOException {
if (contains(docID) == false) {
throw new IllegalArgumentException();
}

final int index = docID - docBase;
final int offset = Math.toIntExact(offsets[index]);
final int length = Math.toIntExact(offsets[index + 1]) - offset;
final int totalLength = Math.toIntExact(offsets[chunkDocs]);
final int numStoredFields = Math.toIntExact(this.numStoredFields[index]);

final BytesRef bytes;
if (merging) {
bytes = this.bytes;
} else {
bytes = new BytesRef();
}

final DataInput documentInput;
if (length == 0) {
...
} else {
fieldsStream.seek(startPointer); // seek mmap64 偏移量获取文件
decompressor.decompress(fieldsStream, totalLength, offset, length, bytes); // 解压对应的数据
assert bytes.length == length;
documentInput = new ByteArrayDataInput(bytes.bytes, bytes.offset, bytes.length); // 将数据塞入bytes
}

return new SerializedDocument(documentInput, length, numStoredFields); // 构建SerializedDocument
}
}
+ +

下面具体描述加载内容的过程:

+
1
2
3
4
5
 pos = 4
main[1] dump bytes
bytes = {
120, 116, 0, 26, 47, 104, 111, 109, 101, 47, 117, 98, 117, 110, 116, 117, 47, 100, 111, 99, 47, 104, 101, 108, 108, 111, 46, 116, 120, 116, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
}
+ + +
1
2
3
4
5
6
7
8
9
10
11
12
13
14
main[1] print in
in = "MMapIndexInput(path="/home/ubuntu/index/_j.fdt")"
main[1] where
[1] org.apache.lucene.codecs.lucene90.LZ4WithPresetDictCompressionMode$LZ4WithPresetDictDecompressor.decompress (LZ4WithPresetDictCompressionMode.java:88)
[2] org.apache.lucene.codecs.lucene90.compressing.Lucene90CompressingStoredFieldsReader$BlockState.document (Lucene90CompressingStoredFieldsReader.java:595)
[3] org.apache.lucene.codecs.lucene90.compressing.Lucene90CompressingStoredFieldsReader.document (Lucene90CompressingStoredFieldsReader.java:610)
[4] org.apache.lucene.codecs.lucene90.compressing.Lucene90CompressingStoredFieldsReader.visitDocument (Lucene90CompressingStoredFieldsReader.java:628)
[5] org.apache.lucene.index.CodecReader.document (CodecReader.java:89)
[6] org.apache.lucene.index.BaseCompositeReader.document (BaseCompositeReader.java:154)
[7] org.apache.lucene.index.IndexReader.document (IndexReader.java:380)
[8] org.apache.lucene.search.IndexSearcher.doc (IndexSearcher.java:380)
[9] org.apache.lucene.demo.SearchFiles.doPagingSearch (SearchFiles.java:214)
[10] org.apache.lucene.demo.SearchFiles.main (SearchFiles.java:150)

+ + + +

term 文件的加载和处理

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
public SegmentTermsEnum(FieldReader fr) throws IOException {
this.fr = fr;

// if (DEBUG) {
// System.out.println("BTTR.init seg=" + fr.parent.segment);
// }
stack = new SegmentTermsEnumFrame[0];

// Used to hold seek by TermState, or cached seek
staticFrame = new SegmentTermsEnumFrame(this, -1);

if (fr.index == null) {
fstReader = null;
} else {
fstReader = fr.index.getBytesReader();
}

// Init w/ root block; don't use index since it may
// not (and need not) have been loaded
for (int arcIdx = 0; arcIdx < arcs.length; arcIdx++) {
arcs[arcIdx] = new FST.Arc<>();
}

currentFrame = staticFrame;
final FST.Arc<BytesRef> arc;
if (fr.index != null) {
arc = fr.index.getFirstArc(arcs[0]);
// Empty string prefix must have an output in the index!
assert arc.isFinal();
} else {
arc = null;
}
// currentFrame = pushFrame(arc, rootCode, 0);
// currentFrame.loadBlock();
validIndexPrefix = 0;
// if (DEBUG) {
// System.out.println("init frame state " + currentFrame.ord);
// printSeekState();
// }

// System.out.println();
// computeBlockStats().print(System.out);
}
+ + +

解析获取getArc

+
1
2
3
4
5
6
7
8
9
10
11
12
13
private FST.Arc<BytesRef> getArc(int ord) {
if (ord >= arcs.length) {
@SuppressWarnings({"rawtypes", "unchecked"})
final FST.Arc<BytesRef>[] next =
new FST.Arc[ArrayUtil.oversize(1 + ord, RamUsageEstimator.NUM_BYTES_OBJECT_REF)];
System.arraycopy(arcs, 0, next, 0, arcs.length);
for (int arcOrd = arcs.length; arcOrd < next.length; arcOrd++) {
next[arcOrd] = new FST.Arc<>();
}
arcs = next;
}
return arcs[ord];
}
+ +
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
Breakpoint hit: "thread=main", org.apache.lucene.codecs.lucene90.blocktree.SegmentTermsEnum.getArc(), line=222 bci=0
222 if (ord >= arcs.length) {

main[1] where
[1] org.apache.lucene.codecs.lucene90.blocktree.SegmentTermsEnum.getArc (SegmentTermsEnum.java:222)
[2] org.apache.lucene.codecs.lucene90.blocktree.SegmentTermsEnum.seekExact (SegmentTermsEnum.java:511)
[3] org.apache.lucene.index.TermStates.loadTermsEnum (TermStates.java:117)
[4] org.apache.lucene.index.TermStates.build (TermStates.java:102)
[5] org.apache.lucene.search.TermQuery.createWeight (TermQuery.java:227)
[6] org.apache.lucene.search.IndexSearcher.createWeight (IndexSearcher.java:885)
[7] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:686)
[8] org.apache.lucene.search.IndexSearcher.searchAfter (IndexSearcher.java:532)
[9] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:542)
[10] org.apache.lucene.demo.SearchFiles.doPagingSearch (SearchFiles.java:180)
[11] org.apache.lucene.demo.SearchFiles.main (SearchFiles.java:150)

+

获取所有的数据

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
main[1] where
[1] org.apache.lucene.search.Weight$DefaultBulkScorer.scoreAll (Weight.java:300)
[2] org.apache.lucene.search.Weight$DefaultBulkScorer.score (Weight.java:247)
[3] org.apache.lucene.search.BulkScorer.score (BulkScorer.java:38)
[4] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:770)
[5] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:693)
[6] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:687)
[7] org.apache.lucene.search.IndexSearcher.searchAfter (IndexSearcher.java:532)
[8] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:542)
[9] org.apache.lucene.demo.SearchFiles.doPagingSearch (SearchFiles.java:180)
[10] org.apache.lucene.demo.SearchFiles.main (SearchFiles.java:150)
main[1] list
296 DocIdSetIterator iterator,
297 TwoPhaseIterator twoPhase,
298 Bits acceptDocs)
299 throws IOException {
300 => if (twoPhase == null) {
301 for (int doc = iterator.nextDoc();
302 doc != DocIdSetIterator.NO_MORE_DOCS;
303 doc = iterator.nextDoc()) {
304 if (acceptDocs == null || acceptDocs.get(doc)) {
305 collector.collect(doc);
main[1] print iterator
iterator = "org.apache.lucene.search.ImpactsDISI@6279cee3"

+ + +
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
main[1] list
494 @Override
495 public int advance(int target) throws IOException {
496 // current skip docID < docIDs generated from current buffer <= next skip docID
497 // we don't need to skip if target is buffered already
498 => if (docFreq > BLOCK_SIZE && target > nextSkipDoc) {
499
500 if (skipper == null) {
501 // Lazy init: first time this enum has ever been used for skipping
502 skipper =
503 new Lucene90SkipReader(
main[1] where
[1] org.apache.lucene.codecs.lucene90.Lucene90PostingsReader$BlockDocsEnum.advance (Lucene90PostingsReader.java:498)
[2] org.apache.lucene.index.SlowImpactsEnum.advance (SlowImpactsEnum.java:77)
[3] org.apache.lucene.search.ImpactsDISI.advance (ImpactsDISI.java:135)
[4] org.apache.lucene.search.ImpactsDISI.nextDoc (ImpactsDISI.java:140)
[5] org.apache.lucene.search.Weight$DefaultBulkScorer.scoreAll (Weight.java:301)
[6] org.apache.lucene.search.Weight$DefaultBulkScorer.score (Weight.java:247)
[7] org.apache.lucene.search.BulkScorer.score (BulkScorer.java:38)
[8] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:770)
[9] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:693)
[10] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:687)
[11] org.apache.lucene.search.IndexSearcher.searchAfter (IndexSearcher.java:532)
[12] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:542)
[13] org.apache.lucene.demo.SearchFiles.doPagingSearch (SearchFiles.java:180)
[14] org.apache.lucene.demo.SearchFiles.main (SearchFiles.java:150)
+ + +

生成iterator 的相关类 , 对应的是SegmentTermsEnum

+
1
2
3
4
5
6
7
8
9
10
11
12
13
main[1] where
[1] org.apache.lucene.search.TermQuery$TermWeight.getTermsEnum (TermQuery.java:145)
[2] org.apache.lucene.search.TermQuery$TermWeight.scorer (TermQuery.java:107)
[3] org.apache.lucene.search.Weight.bulkScorer (Weight.java:166)
[4] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:767)
[5] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:693)
[6] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:687)
[7] org.apache.lucene.search.IndexSearcher.searchAfter (IndexSearcher.java:532)
[8] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:542)
[9] org.apache.lucene.demo.SearchFiles.doPagingSearch (SearchFiles.java:180)
[10] org.apache.lucene.demo.SearchFiles.main (SearchFiles.java:150)
main[1] print termsEnum
termsEnum = "org.apache.lucene.codecs.lucene90.blocktree.SegmentTermsEnum@1a84f40f"
+ +

getTermsEnum 方法能拿到term的统计位置偏移,SegmentTermsEnum 不包含dociterator

+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
main[1] where
[1] org.apache.lucene.index.Term.bytes (Term.java:128)
[2] org.apache.lucene.search.TermQuery$TermWeight.getTermsEnum (TermQuery.java:145)
[3] org.apache.lucene.search.TermQuery$TermWeight.scorer (TermQuery.java:107)
[4] org.apache.lucene.search.Weight.bulkScorer (Weight.java:166)
[5] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:767)
[6] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:693)
[7] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:687)
[8] org.apache.lucene.search.IndexSearcher.searchAfter (IndexSearcher.java:532)
[9] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:542)
[10] org.apache.lucene.demo.SearchFiles.doPagingSearch (SearchFiles.java:180)
[11] org.apache.lucene.demo.SearchFiles.main (SearchFiles.java:150)


144 final TermsEnum termsEnum = context.reader().terms(term.field()).iterator();
145 => termsEnum.seekExact(term.bytes(), state);
146 return termsEnum;
147 }
+

这里的term.bytes() 就是我们的搜索值 , 所以term对应的倒排信息是从这里开始读的(还没看完,暂时那么定)

+

读出倒排信息之后,开始排序.
score 有iteration 可以遍历所有doc_id

+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
main[1] list
348 // (needsFreq=false)
349 private boolean isFreqsRead;
350 private int singletonDocID; // docid when there is a single pulsed posting, otherwise -1
351
352 => public BlockDocsEnum(FieldInfo fieldInfo) throws IOException {
353 this.startDocIn = Lucene90PostingsReader.this.docIn;
354 this.docIn = null;
355 indexHasFreq = fieldInfo.getIndexOptions().compareTo(IndexOptions.DOCS_AND_FREQS) >= 0;
356 indexHasPos =
357 fieldInfo.getIndexOptions().compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS) >= 0;
main[1] where
[1] org.apache.lucene.codecs.lucene90.Lucene90PostingsReader$BlockDocsEnum.<init> (Lucene90PostingsReader.java:352)
[2] org.apache.lucene.codecs.lucene90.Lucene90PostingsReader.postings (Lucene90PostingsReader.java:258)
[3] org.apache.lucene.codecs.lucene90.Lucene90PostingsReader.impacts (Lucene90PostingsReader.java:280)
[4] org.apache.lucene.codecs.lucene90.blocktree.SegmentTermsEnum.impacts (SegmentTermsEnum.java:1,150)
[5] org.apache.lucene.search.TermQuery$TermWeight.scorer (TermQuery.java:114)
[6] org.apache.lucene.search.Weight.bulkScorer (Weight.java:166)
[7] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:767)
[8] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:693)
[9] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:687)
[10] org.apache.lucene.search.IndexSearcher.searchAfter (IndexSearcher.java:532)
[11] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:542)
[12] org.apache.lucene.demo.SearchFiles.doPagingSearch (SearchFiles.java:180)
[13] org.apache.lucene.demo.SearchFiles.main (SearchFiles.java:150)
+ +

topk collector的堆栈

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
Breakpoint hit: "thread=main", org.apache.lucene.search.TopDocsCollector.populateResults(), line=64 bci=0
64 for (int i = howMany - 1; i >= 0; i--) {

main[1] where
[1] org.apache.lucene.search.TopDocsCollector.populateResults (TopDocsCollector.java:64)
[2] org.apache.lucene.search.TopDocsCollector.topDocs (TopDocsCollector.java:166)
[3] org.apache.lucene.search.TopDocsCollector.topDocs (TopDocsCollector.java:98)
[4] org.apache.lucene.search.IndexSearcher$2.reduce (IndexSearcher.java:526)
[5] org.apache.lucene.search.IndexSearcher$2.reduce (IndexSearcher.java:505)
[6] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:694)
[7] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:687)
[8] org.apache.lucene.search.IndexSearcher.searchAfter (IndexSearcher.java:532)
[9] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:542)
[10] org.apache.lucene.demo.SearchFiles.doPagingSearch (SearchFiles.java:180)
[11] org.apache.lucene.demo.SearchFiles.main (SearchFiles.java:150)
main[1]
+ +

search 过程

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
main[1] dump collector
collector = {
org.apache.lucene.search.TopScoreDocCollector.docBase: 0
org.apache.lucene.search.TopScoreDocCollector.pqTop: instance of org.apache.lucene.search.ScoreDoc(id=1529)
org.apache.lucene.search.TopScoreDocCollector.hitsThresholdChecker: instance of org.apache.lucene.search.HitsThresholdChecker$LocalHitsThresholdChecker(id=1530)
org.apache.lucene.search.TopScoreDocCollector.minScoreAcc: null
org.apache.lucene.search.TopScoreDocCollector.minCompetitiveScore: 0.0
org.apache.lucene.search.TopScoreDocCollector.$assertionsDisabled: true
org.apache.lucene.search.TopDocsCollector.EMPTY_TOPDOCS: instance of org.apache.lucene.search.TopDocs(id=1531)
org.apache.lucene.search.TopDocsCollector.pq: instance of org.apache.lucene.search.HitQueue(id=1532)
org.apache.lucene.search.TopDocsCollector.totalHits: 0
org.apache.lucene.search.TopDocsCollector.totalHitsRelation: instance of org.apache.lucene.search.TotalHits$Relation(id=1533)
}
main[1] print collector
collector = "org.apache.lucene.search.TopScoreDocCollector$SimpleTopScoreDocCollector@62bd765"
+ + +

获取hits 数量的过程

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
690      private <C extends Collector, T> T search(
691 Weight weight, CollectorManager<C, T> collectorManager, C firstCollector) throws IOException {
692 if (executor == null || leafSlices.length <= 1) {
693 search(leafContexts, weight, firstCollector);
694 => return collectorManager.reduce(Collections.singletonList(firstCollector));
695 } else {
696 final List<C> collectors = new ArrayList<>(leafSlices.length);
697 collectors.add(firstCollector);
698 final ScoreMode scoreMode = firstCollector.scoreMode();
699 for (int i = 1; i < leafSlices.length; ++i) {
main[1] where
[1] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:694)
[2] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:687)
[3] org.apache.lucene.search.IndexSearcher.searchAfter (IndexSearcher.java:532)
[4] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:542)
[5] org.apache.lucene.demo.SearchFiles.doPagingSearch (SearchFiles.java:180)
[6] org.apache.lucene.demo.SearchFiles.main (SearchFiles.java:150)
+ +

org.apache.lucene.search.TopScoreDocCollector.create , 一直往上翻,发现org.apache.lucene.search.IndexSearcher.searchAfter 就已经有了.
那么这个hit数量是从哪里初始化的呢?

+

很明显,search会填充firstCollector的数据,那么是在哪里赋值的呢?

+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
protected void search(List<LeafReaderContext> leaves, Weight weight, Collector collector)
throws IOException {

// TODO: should we make this
// threaded...? the Collector could be sync'd?
// always use single thread:
for (LeafReaderContext ctx : leaves) { // search each subreader
final LeafCollector leafCollector;
try {
leafCollector = collector.getLeafCollector(ctx);
} catch (
@SuppressWarnings("unused")
CollectionTerminatedException e) {
// there is no doc of interest in this reader context
// continue with the following leaf
continue;
}
BulkScorer scorer = weight.bulkScorer(ctx); /// 在这里会获取total hits
if (scorer != null) {
try {
scorer.score(leafCollector, ctx.reader().getLiveDocs());
} catch (
@SuppressWarnings("unused")
CollectionTerminatedException e) {
// collection was terminated prematurely
// continue with the following leaf
}
}
}
}
+ +

看完最后的堆栈,我们确定了totalHits 是在这里赋值的 , 也就是只要调用了一次就自增一, 很明显这是一个统计,那么这个统计就是命中的搜索内容,那么搜索内容是怎么来的呢?

+

我们只能往上追溯

+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
main[1] where
[1] org.apache.lucene.search.TopScoreDocCollector$SimpleTopScoreDocCollector$1.collect (TopScoreDocCollector.java:73)
[2] org.apache.lucene.search.Weight$DefaultBulkScorer.scoreAll (Weight.java:305)
[3] org.apache.lucene.search.Weight$DefaultBulkScorer.score (Weight.java:247)
[4] org.apache.lucene.search.BulkScorer.score (BulkScorer.java:38)
[5] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:770)
[6] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:693)
[7] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:687)
[8] org.apache.lucene.search.IndexSearcher.searchAfter (IndexSearcher.java:532)
[9] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:542)
[10] org.apache.lucene.demo.SearchFiles.doPagingSearch (SearchFiles.java:180)
[11] org.apache.lucene.demo.SearchFiles.main (SearchFiles.java:150)

@Override
public void collect(int doc) throws IOException {
float score = scorer.score(); // 计算分数 , 也是回调专用的score 函数 , 插件化

// This collector relies on the fact that scorers produce positive values:
assert score >= 0; // NOTE: false for NaN

totalHits++; // hit +1 在这里触发
hitsThresholdChecker.incrementHitCount();

if (minScoreAcc != null && (totalHits & minScoreAcc.modInterval) == 0) {
updateGlobalMinCompetitiveScore(scorer);
}

if (score <= pqTop.score) {
if (totalHitsRelation == TotalHits.Relation.EQUAL_TO) {
// we just reached totalHitsThreshold, we can start setting the min
// competitive score now
updateMinCompetitiveScore(scorer);
}
// Since docs are returned in-order (i.e., increasing doc Id), a document
// with equal score to pqTop.score cannot compete since HitQueue favors
// documents with lower doc Ids. Therefore reject those docs too.
return;
}
pqTop.doc = doc + docBase;
pqTop.score = score;
pqTop = pq.updateTop();
updateMinCompetitiveScore(scorer);
}
};
+ + +

继续往上面推之后,我们找到了堆栈,scorer 是根据context生成的

+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
/**
* Optional method, to return a {@link BulkScorer} to score the query and send hits to a {@link
* Collector}. Only queries that have a different top-level approach need to override this; the
* default implementation pulls a normal {@link Scorer} and iterates and collects the resulting
* hits which are not marked as deleted.
*
* @param context the {@link org.apache.lucene.index.LeafReaderContext} for which to return the
* {@link Scorer}.
* @return a {@link BulkScorer} which scores documents and passes them to a collector.
* @throws IOException if there is a low-level I/O error
*/
public BulkScorer bulkScorer(LeafReaderContext context) throws IOException {

Scorer scorer = scorer(context);
if (scorer == null) {
// No docs match
return null;
}

// This impl always scores docs in order, so we can
// ignore scoreDocsInOrder:
return new DefaultBulkScorer(scorer);
}
+ +

再往上看: 刚刚看到了bulkScorer 回调了一个scorer 方法,这个scorer抽象方法的实现是在org.apache.lucene.search.TermQuery$TermWeight.scorer

+

这个scorer方法根据入参context 以及外部类termQuery.term计算htis命中的个数

+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
main[1] list
103 assert termStates == null || termStates.wasBuiltFor(ReaderUtil.getTopLevelContext(context))
104 : "The top-reader used to create Weight is not the same as the current reader's top-reader ("
105 + ReaderUtil.getTopLevelContext(context);
106 ;
107 => final TermsEnum termsEnum = getTermsEnum(context);
108 if (termsEnum == null) {
109 return null;
110 }
111 LeafSimScorer scorer =
112 new LeafSimScorer(simScorer, context.reader(), term.field(), scoreMode.needsScores()); // 这里term是外部类的term ,也就是this$0.term
main[1] where
[1] org.apache.lucene.search.TermQuery$TermWeight.scorer (TermQuery.java:107)
[2] org.apache.lucene.search.Weight.bulkScorer (Weight.java:166)
[3] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:767)
[4] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:693)
[5] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:687)
[6] org.apache.lucene.search.IndexSearcher.searchAfter (IndexSearcher.java:532)
[7] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:542)
[8] org.apache.lucene.demo.SearchFiles.doPagingSearch (SearchFiles.java:180)
[9] org.apache.lucene.demo.SearchFiles.main (SearchFiles.java:150)
+

所以之后会调用advance,最后调用的是下面这个advance方法, 这里会用到docTermStartFP , 那么这个遍历在哪里初始化?

+

其实是在termStates里面获取,初始化的地方在docTermStartFP = termState.docStartFP;

+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71


lucene\core\src\java\org\apache\lucene\codecs\lucene90\Lucene90PostingsReader.java
@Override
public int advance(int target) throws IOException {
// current skip docID < docIDs generated from current buffer <= next skip docID
// we don't need to skip if target is buffered already
if (docFreq > BLOCK_SIZE && target > nextSkipDoc) {

if (skipper == null) {
// Lazy init: first time this enum has ever been used for skipping
skipper =
new Lucene90SkipReader(
docIn.clone(), MAX_SKIP_LEVELS, indexHasPos, indexHasOffsets, indexHasPayloads);
}

if (!skipped) {
assert skipOffset != -1;
// This is the first time this enum has skipped
// since reset() was called; load the skip data:
skipper.init(docTermStartFP + skipOffset, docTermStartFP, 0, 0, docFreq);
skipped = true;
}

// always plus one to fix the result, since skip position in Lucene90SkipReader
// is a little different from MultiLevelSkipListReader
final int newDocUpto = skipper.skipTo(target) + 1;

if (newDocUpto >= blockUpto) {
// Skipper moved
assert newDocUpto % BLOCK_SIZE == 0 : "got " + newDocUpto;
blockUpto = newDocUpto;

// Force to read next block
docBufferUpto = BLOCK_SIZE;
accum = skipper.getDoc(); // actually, this is just lastSkipEntry
docIn.seek(skipper.getDocPointer()); // now point to the block we want to search
// even if freqBuffer were not read from the previous block, we will mark them as read,
// as we don't need to skip the previous block freqBuffer in refillDocs,
// as we have already positioned docIn where in needs to be.
isFreqsRead = true;
}
// next time we call advance, this is used to
// foresee whether skipper is necessary.
nextSkipDoc = skipper.getNextSkipDoc();
}
if (docBufferUpto == BLOCK_SIZE) {
refillDocs();
}

// Now scan... this is an inlined/pared down version
// of nextDoc():
long doc;
while (true) {
doc = docBuffer[docBufferUpto];

if (doc >= target) {
break;
}
++docBufferUpto;
}

docBufferUpto++;
return this.doc = (int) doc;
}

@Override
public long cost() {
return docFreq;
}
}
+ +

那么我们继续看termStates是怎么初始化的? 我先猜测term会是termStates 的一个成员变量

+

通过断点,我们最后找到了下面这个:

+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
main[1] list
178 }
179
180 @Override
181 public BlockTermState newTermState() {
182 => return new IntBlockTermState();
183 }
184
185 @Override
186 public void close() throws IOException {
187 IOUtils.close(docIn, posIn, payIn);
main[1] where
[1] org.apache.lucene.codecs.lucene90.Lucene90PostingsReader.newTermState (Lucene90PostingsReader.java:182)
[2] org.apache.lucene.codecs.lucene90.blocktree.SegmentTermsEnumFrame.<init> (SegmentTermsEnumFrame.java:101)
[3] org.apache.lucene.codecs.lucene90.blocktree.SegmentTermsEnum.<init> (SegmentTermsEnum.java:76)
[4] org.apache.lucene.codecs.lucene90.blocktree.FieldReader.iterator (FieldReader.java:153)
[5] org.apache.lucene.index.TermStates.loadTermsEnum (TermStates.java:116)
[6] org.apache.lucene.index.TermStates.build (TermStates.java:102)
[7] org.apache.lucene.search.TermQuery.createWeight (TermQuery.java:227)
[8] org.apache.lucene.search.IndexSearcher.createWeight (IndexSearcher.java:885)
[9] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:686)
[10] org.apache.lucene.search.IndexSearcher.searchAfter (IndexSearcher.java:532)
[11] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:542)
[12] org.apache.lucene.demo.SearchFiles.doPagingSearch (SearchFiles.java:180)
[13] org.apache.lucene.demo.SearchFiles.main (SearchFiles.java:150)
main[1]
+ + +

最后这里应该就是最最核心的获取词的流程了,i hope so

+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
main[1] list
113
114 private static TermsEnum loadTermsEnum(LeafReaderContext ctx, Term term) throws IOException {
115 final Terms terms = Terms.getTerms(ctx.reader(), term.field());
116 final TermsEnum termsEnum = terms.iterator();
117 => if (termsEnum.seekExact(term.bytes())) {
118 return termsEnum;
119 }
120 return null;
121 }
122
main[1] print term.bytes()
term.bytes() = "[61 6d]"
main[1] where
[1] org.apache.lucene.index.TermStates.loadTermsEnum (TermStates.java:117)
[2] org.apache.lucene.index.TermStates.build (TermStates.java:102)
[3] org.apache.lucene.search.TermQuery.createWeight (TermQuery.java:227)
[4] org.apache.lucene.search.IndexSearcher.createWeight (IndexSearcher.java:885)
[5] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:686)
[6] org.apache.lucene.search.IndexSearcher.searchAfter (IndexSearcher.java:532)
[7] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:542)
[8] org.apache.lucene.demo.SearchFiles.doPagingSearch (SearchFiles.java:180)
[9] org.apache.lucene.demo.SearchFiles.main (SearchFiles.java:150)
+ + +

最后的最后应该是调用这里: 获取所有的term的个数,具体是哪里还需要判断,但是路径应该就是这里了

+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
// Target's prefix matches this block's prefix; we
// scan the entries check if the suffix matches.
public SeekStatus scanToTermLeaf(BytesRef target, boolean exactOnly) throws IOException {

// if (DEBUG) System.out.println(" scanToTermLeaf: block fp=" + fp + " prefix=" + prefix + "
// nextEnt=" + nextEnt + " (of " + entCount + ") target=" + brToString(target) + " term=" +
// brToString(term));

assert nextEnt != -1;

ste.termExists = true;
subCode = 0;

if (nextEnt == entCount) {
if (exactOnly) {
fillTerm();
}
return SeekStatus.END;
}

assert prefixMatches(target);

// TODO: binary search when all terms have the same length, which is common for ID fields,
// which are also the most sensitive to lookup performance?
// Loop over each entry (term or sub-block) in this block:
do {
nextEnt++;

suffix = suffixLengthsReader.readVInt();

// if (DEBUG) {
// BytesRef suffixBytesRef = new BytesRef();
// suffixBytesRef.bytes = suffixBytes;
// suffixBytesRef.offset = suffixesReader.getPosition();
// suffixBytesRef.length = suffix;
// System.out.println(" cycle: term " + (nextEnt-1) + " (of " + entCount + ") suffix="
// + brToString(suffixBytesRef));
// }

startBytePos = suffixesReader.getPosition();
suffixesReader.skipBytes(suffix);

// Loop over bytes in the suffix, comparing to the target
final int cmp =
Arrays.compareUnsigned(
suffixBytes,
startBytePos,
startBytePos + suffix,
target.bytes,
target.offset + prefix,
target.offset + target.length);

if (cmp < 0) {
// Current entry is still before the target;
// keep scanning
} else if (cmp > 0) {
// Done! Current entry is after target --
// return NOT_FOUND:
fillTerm();

// if (DEBUG) System.out.println(" not found");
return SeekStatus.NOT_FOUND;
} else {
// Exact match!

// This cannot be a sub-block because we
// would have followed the index to this
// sub-block from the start:

assert ste.termExists;
fillTerm();
// if (DEBUG) System.out.println(" found!");
return SeekStatus.FOUND;
}
} while (nextEnt < entCount);

// It is possible (and OK) that terms index pointed us
// at this block, but, we scanned the entire block and
// did not find the term to position to. This happens
// when the target is after the last term in the block
// (but, before the next term in the index). EG
// target could be foozzz, and terms index pointed us
// to the foo* block, but the last term in this block
// was fooz (and, eg, first term in the next block will
// bee fop).
// if (DEBUG) System.out.println(" block end");
if (exactOnly) {
fillTerm();
}

// TODO: not consistent that in the
// not-exact case we don't next() into the next
// frame here
return SeekStatus.END;
}

// Target's prefix matches this block's prefix; we
// scan the entries check if the suffix matches.
public SeekStatus scanToTermNonLeaf(BytesRef target, boolean exactOnly) throws IOException {

// if (DEBUG) System.out.println(" scanToTermNonLeaf: block fp=" + fp + " prefix=" + prefix +
// " nextEnt=" + nextEnt + " (of " + entCount + ") target=" + brToString(target) + " term=" +
// brToString(target));

assert nextEnt != -1;

if (nextEnt == entCount) {
if (exactOnly) {
fillTerm();
ste.termExists = subCode == 0;
}
return SeekStatus.END;
}

assert prefixMatches(target);

// Loop over each entry (term or sub-block) in this block:
while (nextEnt < entCount) {

nextEnt++;

final int code = suffixLengthsReader.readVInt();
suffix = code >>> 1;

// if (DEBUG) {
// BytesRef suffixBytesRef = new BytesRef();
// suffixBytesRef.bytes = suffixBytes;
// suffixBytesRef.offset = suffixesReader.getPosition();
// suffixBytesRef.length = suffix;
// System.out.println(" cycle: " + ((code&1)==1 ? "sub-block" : "term") + " " +
// (nextEnt-1) + " (of " + entCount + ") suffix=" + brToString(suffixBytesRef));
// }

final int termLen = prefix + suffix;
startBytePos = suffixesReader.getPosition();
suffixesReader.skipBytes(suffix);
ste.termExists = (code & 1) == 0;
if (ste.termExists) {
state.termBlockOrd++;
subCode = 0;
} else {
subCode = suffixLengthsReader.readVLong();
lastSubFP = fp - subCode;
}

final int cmp =
Arrays.compareUnsigned(
suffixBytes,
startBytePos,
startBytePos + suffix,
target.bytes,
target.offset + prefix,
target.offset + target.length);

if (cmp < 0) {
// Current entry is still before the target;
// keep scanning
} else if (cmp > 0) {
// Done! Current entry is after target --
// return NOT_FOUND:
fillTerm();

// if (DEBUG) System.out.println(" maybe done exactOnly=" + exactOnly + "
// ste.termExists=" + ste.termExists);

if (!exactOnly && !ste.termExists) {
// System.out.println(" now pushFrame");
// TODO this
// We are on a sub-block, and caller wants
// us to position to the next term after
// the target, so we must recurse into the
// sub-frame(s):
ste.currentFrame = ste.pushFrame(null, ste.currentFrame.lastSubFP, termLen);
ste.currentFrame.loadBlock();
while (ste.currentFrame.next()) {
ste.currentFrame = ste.pushFrame(null, ste.currentFrame.lastSubFP, ste.term.length());
ste.currentFrame.loadBlock(); /////////////////////////////////////////////////// 这里会有流的加载
}
}

// if (DEBUG) System.out.println(" not found");
return SeekStatus.NOT_FOUND;
} else {
// Exact match!

// This cannot be a sub-block because we
// would have followed the index to this
// sub-block from the start:

assert ste.termExists;
fillTerm();
// if (DEBUG) System.out.println(" found!");
return SeekStatus.FOUND;
}
}

// It is possible (and OK) that terms index pointed us
// at this block, but, we scanned the entire block and
// did not find the term to position to. This happens
// when the target is after the last term in the block
// (but, before the next term in the index). EG
// target could be foozzz, and terms index pointed us
// to the foo* block, but the last term in this block
// was fooz (and, eg, first term in the next block will
// bee fop).
// if (DEBUG) System.out.println(" block end");
if (exactOnly) {
fillTerm();
}

// TODO: not consistent that in the
// not-exact case we don't next() into the next
// frame here
return SeekStatus.END;
}
+ +

termState 是如何被反序列化的?

+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
Breakpoint hit: "thread=main", org.apache.lucene.codecs.lucene90.Lucene90PostingsReader.decodeTerm(), line=194 bci=0
194 final IntBlockTermState termState = (IntBlockTermState) _termState;

main[1] where
[1] org.apache.lucene.codecs.lucene90.Lucene90PostingsReader.decodeTerm (Lucene90PostingsReader.java:194)
[2] org.apache.lucene.codecs.lucene90.blocktree.SegmentTermsEnumFrame.decodeMetaData (SegmentTermsEnumFrame.java:476)
[3] org.apache.lucene.codecs.lucene90.blocktree.SegmentTermsEnum.termState (SegmentTermsEnum.java:1,178)
[4] org.apache.lucene.index.TermStates.build (TermStates.java:104)
[5] org.apache.lucene.search.TermQuery.createWeight (TermQuery.java:227)
[6] org.apache.lucene.search.IndexSearcher.createWeight (IndexSearcher.java:885)
[7] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:686)
[8] org.apache.lucene.search.IndexSearcher.searchAfter (IndexSearcher.java:532)
[9] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:542)
[10] org.apache.lucene.demo.SearchFiles.doPagingSearch (SearchFiles.java:180)
[11] org.apache.lucene.demo.SearchFiles.main (SearchFiles.java:150)


@Override
public void decodeTerm(
DataInput in, FieldInfo fieldInfo, BlockTermState _termState, boolean absolute)
throws IOException {
final IntBlockTermState termState = (IntBlockTermState) _termState;
final boolean fieldHasPositions =
fieldInfo.getIndexOptions().compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS) >= 0;
final boolean fieldHasOffsets =
fieldInfo.getIndexOptions().compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS)
>= 0;
final boolean fieldHasPayloads = fieldInfo.hasPayloads();

if (absolute) {
termState.docStartFP = 0;
termState.posStartFP = 0;
termState.payStartFP = 0;
}

final long l = in.readVLong();
if ((l & 0x01) == 0) {
termState.docStartFP += l >>> 1;
if (termState.docFreq == 1) {
termState.singletonDocID = in.readVInt();
} else {
termState.singletonDocID = -1;
}
} else {
assert absolute == false;
assert termState.singletonDocID != -1;
termState.singletonDocID += BitUtil.zigZagDecode(l >>> 1);
}

if (fieldHasPositions) {
termState.posStartFP += in.readVLong();
if (fieldHasOffsets || fieldHasPayloads) {
termState.payStartFP += in.readVLong();
}
if (termState.totalTermFreq > BLOCK_SIZE) {
termState.lastPosBlockOffset = in.readVLong();
} else {
termState.lastPosBlockOffset = -1;
}
}

if (termState.docFreq > BLOCK_SIZE) {
termState.skipOffset = in.readVLong();
} else {
termState.skipOffset = -1;
}
}
+

其实ste持有term的引用

+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
main[2] dump ste.term.ref.bytes
ste.term.ref.bytes = {
97, 109, 0, 0, 0, 0, 0, 0
}
main[2] where
[2] org.apache.lucene.codecs.lucene90.blocktree.SegmentTermsEnumFrame.decodeMetaData (SegmentTermsEnumFrame.java:476)
[3] org.apache.lucene.codecs.lucene90.blocktree.SegmentTermsEnum.termState (SegmentTermsEnum.java:1,178)
[4] org.apache.lucene.index.TermStates.build (TermStates.java:104)
[5] org.apache.lucene.search.TermQuery.createWeight (TermQuery.java:227)
[6] org.apache.lucene.search.IndexSearcher.createWeight (IndexSearcher.java:885)
[7] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:686)
[8] org.apache.lucene.search.IndexSearcher.searchAfter (IndexSearcher.java:532)
[9] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:542)
[10] org.apache.lucene.demo.SearchFiles.doPagingSearch (SearchFiles.java:180)
[11] org.apache.lucene.demo.SearchFiles.main (SearchFiles.java:150)
+ +

ste.in 描述的是读取的文件:

+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
 ste.in = {
$assertionsDisabled: true
org.apache.lucene.store.ByteBufferIndexInput.EMPTY_FLOATBUFFER: instance of java.nio.HeapFloatBuffer(id=1473)
org.apache.lucene.store.ByteBufferIndexInput.EMPTY_LONGBUFFER: instance of java.nio.HeapLongBuffer(id=1474)
org.apache.lucene.store.ByteBufferIndexInput.EMPTY_INTBUFFER: instance of java.nio.HeapIntBuffer(id=1475)
org.apache.lucene.store.ByteBufferIndexInput.length: 1993
org.apache.lucene.store.ByteBufferIndexInput.chunkSizeMask: 1073741823
org.apache.lucene.store.ByteBufferIndexInput.chunkSizePower: 30
org.apache.lucene.store.ByteBufferIndexInput.guard: instance of org.apache.lucene.store.ByteBufferGuard(id=1476)
org.apache.lucene.store.ByteBufferIndexInput.buffers: instance of java.nio.ByteBuffer[1] (id=1477)
org.apache.lucene.store.ByteBufferIndexInput.curBufIndex: 0
org.apache.lucene.store.ByteBufferIndexInput.curBuf: instance of java.nio.DirectByteBufferR(id=1479)
org.apache.lucene.store.ByteBufferIndexInput.curLongBufferViews: null
org.apache.lucene.store.ByteBufferIndexInput.curIntBufferViews: null
org.apache.lucene.store.ByteBufferIndexInput.curFloatBufferViews: null
org.apache.lucene.store.ByteBufferIndexInput.isClone: true
org.apache.lucene.store.ByteBufferIndexInput.$assertionsDisabled: true
org.apache.lucene.store.IndexInput.resourceDescription: "MMapIndexInput(path="/home/dai/index/_7.cfs") [slice=_7_Lucene90_0.tim]"
}

+

相关阅读

+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
public void nextLeaf() {
// if (DEBUG) System.out.println(" frame.next ord=" + ord + " nextEnt=" + nextEnt + "
// entCount=" + entCount);
assert nextEnt != -1 && nextEnt < entCount
: "nextEnt=" + nextEnt + " entCount=" + entCount + " fp=" + fp;
nextEnt++;
suffix = suffixLengthsReader.readVInt();
startBytePos = suffixesReader.getPosition();
ste.term.setLength(prefix + suffix);
ste.term.grow(ste.term.length());
suffixesReader.readBytes(ste.term.bytes(), prefix, suffix);
ste.termExists = true;
}

public boolean nextNonLeaf() throws IOException {
// if (DEBUG) System.out.println(" stef.next ord=" + ord + " nextEnt=" + nextEnt + " entCount="
// + entCount + " fp=" + suffixesReader.getPosition());
while (true) {
if (nextEnt == entCount) {
assert arc == null || (isFloor && isLastInFloor == false)
: "isFloor=" + isFloor + " isLastInFloor=" + isLastInFloor;
loadNextFloorBlock();
if (isLeafBlock) {
nextLeaf();
return false;
} else {
continue;
}
}

assert nextEnt != -1 && nextEnt < entCount
: "nextEnt=" + nextEnt + " entCount=" + entCount + " fp=" + fp;
nextEnt++;
final int code = suffixLengthsReader.readVInt();
suffix = code >>> 1;
startBytePos = suffixesReader.getPosition();
ste.term.setLength(prefix + suffix);
ste.term.grow(ste.term.length());
suffixesReader.readBytes(ste.term.bytes(), prefix, suffix); // 这里是最核心的地方吗?
if ((code & 1) == 0) {
// A normal term
ste.termExists = true;
subCode = 0;
state.termBlockOrd++;
return false;
} else {
// A sub-block; make sub-FP absolute:
ste.termExists = false;
subCode = suffixLengthsReader.readVLong();
lastSubFP = fp - subCode;
// if (DEBUG) {
// System.out.println(" lastSubFP=" + lastSubFP);
// }
return true;
}
}
}

+ + +

看上去这就行读取term 在文件中的位置信息:

+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
main[1] where
[1] org.apache.lucene.codecs.lucene90.blocktree.SegmentTermsEnumFrame.scanToTermLeaf (SegmentTermsEnumFrame.java:593)
[2] org.apache.lucene.codecs.lucene90.blocktree.SegmentTermsEnumFrame.scanToTerm (SegmentTermsEnumFrame.java:530)
[3] org.apache.lucene.codecs.lucene90.blocktree.SegmentTermsEnum.seekExact (SegmentTermsEnum.java:538)
[4] org.apache.lucene.index.TermStates.loadTermsEnum (TermStates.java:117)
[5] org.apache.lucene.index.TermStates.build (TermStates.java:102)
[6] org.apache.lucene.search.TermQuery.createWeight (TermQuery.java:227)
[7] org.apache.lucene.search.IndexSearcher.createWeight (IndexSearcher.java:885)
[8] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:686)
[9] org.apache.lucene.search.IndexSearcher.searchAfter (IndexSearcher.java:532)
[10] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:542)
[11] org.apache.lucene.demo.SearchFiles.doPagingSearch (SearchFiles.java:180)
[12] org.apache.lucene.demo.SearchFiles.main (SearchFiles.java:150)
main[1] dump suffixBytes
suffixBytes = {
97, 109, 97, 110, 100, 98, 117, 116, 99, 97, 110, 100, 111, 104, 101, 108, 108, 111, 104, 105, 105, 105, 115, 105, 116, 107, 110, 111, 119, 109, 97, 121, 109, 111, 110, 103, 111, 110, 111, 116, 116, 114, 121, 119, 104, 97, 116, 119, 111, 114, 108, 100, 121, 111, 117, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
}
+ + +

term 对应docfreq 的统计信息的读取位置

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
main[1] list
451 // postings
452
453 // TODO: if docFreq were bulk decoded we could
454 // just skipN here:
455 => if (statsSingletonRunLength > 0) {
456 state.docFreq = 1;
457 state.totalTermFreq = 1;
458 statsSingletonRunLength--;
459 } else {
460 int token = statsReader.readVInt();
main[1] print statsSingletonRunLength
statsSingletonRunLength = 0
main[1] next
>
Step completed: "thread=main", org.apache.lucene.codecs.lucene90.blocktree.SegmentTermsEnumFrame.decodeMetaData(), line=460 bci=80
460 int token = statsReader.readVInt();

main[1] list
456 state.docFreq = 1;
457 state.totalTermFreq = 1;
458 statsSingletonRunLength--;
459 } else {
460 => int token = statsReader.readVInt();
461 if ((token & 1) == 1) {
462 state.docFreq = 1;
463 state.totalTermFreq = 1;
464 statsSingletonRunLength = token >>> 1;
465 } else {
main[1] print statsReader
statsReader = "org.apache.lucene.store.ByteArrayDataInput@6b67034"
main[1] dump statsReader
statsReader = {
bytes: instance of byte[64] (id=1520)
pos: 0
limit: 16
}
main[1] dump statsReader.bytes
statsReader.bytes = {
4, 0, 9, 2, 1, 4, 0, 3, 2, 1, 1, 2, 1, 7, 2, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
}
+ + +

搜索的termam对应的是

+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
00000000  3f d7 6c 17 12 42 6c 6f  63 6b 54 72 65 65 54 65  |?.l..BlockTreeTe|
00000010 72 6d 73 44 69 63 74 00 00 00 00 fe ea 80 e6 45 |rmsDict........E|
00000020 20 d8 56 64 1b 1b 1b 89 70 fe 67 0a 4c 75 63 65 | .Vd....p.g.Luce|
00000030 6e 65 39 30 5f 30 25 bc 03 61 6d 61 6e 64 62 75 |ne90_0%..amandbu|
00000040 74 63 61 6e 64 6f 68 65 6c 6c 6f 68 69 69 69 73 |tcandohellohiiis|
00000050 69 74 6b 6e 6f 77 6d 61 79 6d 6f 6e 67 6f 6e 6f |itknowmaymongono|
00000060 74 74 72 79 77 68 61 74 77 6f 72 6c 64 79 6f 75 |ttrywhatworldyou|
00000070 24 02 03 03 03 02 05 02 01 02 02 04 03 05 03 03 |$...............|
00000080 04 05 03 10 04 00 09 02 01 04 00 03 02 01 01 02 |................| <---- 在这一行第四个开始的序列
00000090 01 07 02 02 26 7a 3d 04 01 02 03 01 01 01 01 01 |....&z=.........|
000000a0 05 01 01 01 00 02 04 00 02 01 01 01 01 01 02 01 |................|
000000b0 01 01 02 01 01 01 01 05 01 03 01 05 a4 03 2f 68 |............../h|
000000c0 6f 6d 65 2f 75 62 75 6e 74 75 2f 64 6f 63 2f 68 |ome/ubuntu/doc/h|
000000d0 65 6c 6c 6f 2e 74 78 74 2f 68 6f 6d 65 2f 75 62 |ello.txt/home/ub|
000000e0 75 6e 74 75 2f 64 6f 63 2f 6d 6f 6e 67 6f 2e 74 |untu/doc/mongo.t|
000000f0 78 74 05 1a 01 03 04 82 01 01 03 c0 28 93 e8 00 |xt..........(...|
00000100 00 00 00 00 00 00 00 da 02 a3 a3 |...........|
+ + +

那么docFreq 的赋值在哪里呢?

+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
 currentFrame.state.docFreq = 2
main[1] list
1,113 assert !eof;
1,114 // if (DEBUG) System.out.println("BTR.docFreq");
1,115 currentFrame.decodeMetaData();
1,116 // if (DEBUG) System.out.println(" return " + currentFrame.state.docFreq);
1,117 => return currentFrame.state.docFreq;
1,118 }
1,119
1,120 @Override
1,121 public long totalTermFreq() throws IOException {
1,122 assert !eof;
main[1] where
[1] org.apache.lucene.codecs.lucene90.blocktree.SegmentTermsEnum.docFreq (SegmentTermsEnum.java:1,117)
[2] org.apache.lucene.index.TermStates.build (TermStates.java:107)
[3] org.apache.lucene.search.TermQuery.createWeight (TermQuery.java:227)
[4] org.apache.lucene.search.IndexSearcher.createWeight (IndexSearcher.java:885)
[5] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:686)
[6] org.apache.lucene.search.IndexSearcher.searchAfter (IndexSearcher.java:532)
[7] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:542)
[8] org.apache.lucene.demo.SearchFiles.doPagingSearch (SearchFiles.java:180)
[9] org.apache.lucene.demo.SearchFiles.main (SearchFiles.java:150)
+ +

读取的过程:

+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
readByte:110, ByteBufferIndexInput (org.apache.lucene.store)
readVInt:121, DataInput (org.apache.lucene.store)
readVIntBlock:149, Lucene90PostingsReader (org.apache.lucene.codecs.lucene90)
refillDocs:472, Lucene90PostingsReader$BlockDocsEnum (org.apache.lucene.codecs.lucene90)
advance:538, Lucene90PostingsReader$BlockDocsEnum (org.apache.lucene.codecs.lucene90)
advance:77, SlowImpactsEnum (org.apache.lucene.index)
advance:128, ImpactsDISI (org.apache.lucene.search)
nextDoc:133, ImpactsDISI (org.apache.lucene.search)
scoreAll:301, Weight$DefaultBulkScorer (org.apache.lucene.search)
score:247, Weight$DefaultBulkScorer (org.apache.lucene.search)
score:38, BulkScorer (org.apache.lucene.search)
search:776, IndexSearcher (org.apache.lucene.search)
search:694, IndexSearcher (org.apache.lucene.search)
search:688, IndexSearcher (org.apache.lucene.search)
searchAfter:523, IndexSearcher (org.apache.lucene.search)
search:538, IndexSearcher (org.apache.lucene.search)
doPagingSearch:161, SearchFiles (com.dinosaur.lucene.skiptest)
queryTest:52, QueryTest (com.dinosaur.lucene.demo)

+ +

tim 文件在哪里初始化

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
void loadBlock() throws IOException {

// Clone the IndexInput lazily, so that consumers
// that just pull a TermsEnum to
// seekExact(TermState) don't pay this cost:
ste.initIndexInput();

if (nextEnt != -1) {
// Already loaded
return;
}
// System.out.println("blc=" + blockLoadCount);

ste.in.seek(fp);
int code = ste.in.readVInt();
entCount = code >>> 1;
assert entCount > 0;
isLastInFloor = (code & 1) != 0;

assert arc == null || (isLastInFloor || isFloor)
: "fp=" + fp + " arc=" + arc + " isFloor=" + isFloor + " isLastInFloor=" + isLastInFloor;

// TODO: if suffixes were stored in random-access
// array structure, then we could do binary search
// instead of linear scan to find target term; eg
// we could have simple array of offsets

final long startSuffixFP = ste.in.getFilePointer();
// term suffixes:
final long codeL = ste.in.readVLong();
isLeafBlock = (codeL & 0x04) != 0;
final int numSuffixBytes = (int) (codeL >>> 3);
if (suffixBytes.length < numSuffixBytes) {
suffixBytes = new byte[ArrayUtil.oversize(numSuffixBytes, 1)];
}
try {
compressionAlg = CompressionAlgorithm.byCode((int) codeL & 0x03);
} catch (IllegalArgumentException e) {
throw new CorruptIndexException(e.getMessage(), ste.in, e);
}
compressionAlg.read(ste.in, suffixBytes, numSuffixBytes);
suffixesReader.reset(suffixBytes, 0, numSuffixBytes);

int numSuffixLengthBytes = ste.in.readVInt();
final boolean allEqual = (numSuffixLengthBytes & 0x01) != 0;
numSuffixLengthBytes >>>= 1;
if (suffixLengthBytes.length < numSuffixLengthBytes) {
suffixLengthBytes = new byte[ArrayUtil.oversize(numSuffixLengthBytes, 1)];
}
if (allEqual) {
Arrays.fill(suffixLengthBytes, 0, numSuffixLengthBytes, ste.in.readByte());
} else {
ste.in.readBytes(suffixLengthBytes, 0, numSuffixLengthBytes);
}
suffixLengthsReader.reset(suffixLengthBytes, 0, numSuffixLengthBytes);
totalSuffixBytes = ste.in.getFilePointer() - startSuffixFP;

/*if (DEBUG) {
if (arc == null) {
System.out.println(" loadBlock (next) fp=" + fp + " entCount=" + entCount + " prefixLen=" + prefix + " isLastInFloor=" + isLastInFloor + " leaf?=" + isLeafBlock);
} else {
System.out.println(" loadBlock (seek) fp=" + fp + " entCount=" + entCount + " prefixLen=" + prefix + " hasTerms?=" + hasTerms + " isFloor?=" + isFloor + " isLastInFloor=" + isLastInFloor + " leaf?=" + isLeafBlock);
}
}*/

// stats
int numBytes = ste.in.readVInt();
if (statBytes.length < numBytes) {
statBytes = new byte[ArrayUtil.oversize(numBytes, 1)];
}
ste.in.readBytes(statBytes, 0, numBytes);
statsReader.reset(statBytes, 0, numBytes);
statsSingletonRunLength = 0;
metaDataUpto = 0;

state.termBlockOrd = 0;
nextEnt = 0;
lastSubFP = -1;

// TODO: we could skip this if !hasTerms; but
// that's rare so won't help much
// metadata
numBytes = ste.in.readVInt();
if (bytes.length < numBytes) {
bytes = new byte[ArrayUtil.oversize(numBytes, 1)];
}
ste.in.readBytes(bytes, 0, numBytes);
bytesReader.reset(bytes, 0, numBytes);

// Sub-blocks of a single floor block are always
// written one after another -- tail recurse:
fpEnd = ste.in.getFilePointer();
// if (DEBUG) {
// System.out.println(" fpEnd=" + fpEnd);
// }
}
+ + +
1
2
3
4
5
6
7
8
9
10
我们知道Lucene将索引文件拆分为了多个文件,这里我们仅讨论倒排索引部分。

Lucene把用于存储Term的索引文件叫Terms Index,它的后缀是.tip;把Postings信息分别存储在.doc、.pay、.pox,分别记录Postings的DocId信息和Term的词频、Payload信息、pox是记录位置信息。Terms Dictionary的文件后缀称为.tim,它是Term与Postings的关系纽带,存储了Term和其对应的Postings文件指针。

总体来说,通过Terms Index(.tip)能够快速地在Terms Dictionary(.tim)中找到你的想要的Term,以及它对应的Postings文件指针与Term在Segment作用域上的统计信息。


postings: 实际上Postings包含的东西并不仅仅是DocIDs(我们通常把这一个有序文档编号系列叫DocIDs),它还包括文档编号、以及词频、Term在文档中的位置信息、还有Payload数据。

所以关于倒排索引至少涉及5类文件,本文不会全面展开。
+ + + + +

相关阅读

+ + +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git "a/2021/11/04/rabbitmq\345\277\203\350\267\263\351\227\256\351\242\230\345\222\214php/index.html" "b/2021/11/04/rabbitmq\345\277\203\350\267\263\351\227\256\351\242\230\345\222\214php/index.html" new file mode 100644 index 0000000000..d415975173 --- /dev/null +++ "b/2021/11/04/rabbitmq\345\277\203\350\267\263\351\227\256\351\242\230\345\222\214php/index.html" @@ -0,0 +1,446 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + rabbitmq心跳问题和php | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ rabbitmq心跳问题和php +

+ + +
+ + + + +
+ + +

为什么我们需要心跳

tcp 靠的是什么保证链接?

+

序列号和重传,这是传输层的事情,但是对于应用层来说,是感知不到对端断开的,所以需要应用层的心跳.

+

php的心跳有什么问题?

php大部分都是单进程模型,所以没有一个额外的线程去定时给这个tcp链接发一个心跳包,导致一旦运行比较长的时间(心跳时间*2),对端的rabbitmq会断开连接

+

所以大部分场景我们需要保证我们的运行时间小于心跳时间 , 不然会有pipe broken的问题,其实这个问题一般是超过心跳时间,导致rabbitmq 手动断开tcp连接了

+ +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git "a/2021/11/09/rabbit\346\265\201\347\250\213/index.html" "b/2021/11/09/rabbit\346\265\201\347\250\213/index.html" new file mode 100644 index 0000000000..e0327830f8 --- /dev/null +++ "b/2021/11/09/rabbit\346\265\201\347\250\213/index.html" @@ -0,0 +1,441 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + rabbit流程 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ rabbit流程 +

+ + +
+ + + + +
+ + +
1
rabbitmq-server/deps/rabbit/src/rabbit_msg_store.erl
+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277

%% Message store is responsible for storing messages
%% on disk and loading them back. The store handles both
%% persistent messages and transient ones (when a node
%% is under RAM pressure and needs to page messages out
%% to disk). The store is responsible for locating messages
%% on disk and maintaining an index.
消息存储是响应式地存储在硬盘或者把他们从硬盘加载到内存.存储的例程回调包括持久化的消息和非持久化的消息(当使用内存高出一定阈值,会把消息调入到硬盘).
这个存储模块会返回消息在硬盘的偏移,以及维持这个消息到硬盘的映射的索引.

%% There are two message stores per node: one for transient
%% and one for persistent messages.
每个节点有两种消息:
一个是对易失的消息 , 一个是持久化的消息
%%
%% Queue processes interact with the stores via clients.
队列进程和存储模块通过客户端交互
%% The components:
%%
%% Index: this is a mapping from MsgId to #msg_location{}.
%% By default, it's in ETS, but other implementations can
%% be used.
%% FileSummary: this maps File to #file_summary{} and is stored
%% in ETS.
包括两个组件:


%% The basic idea is that messages are appended to the current file up
%% until that file becomes too big (> file_size_limit). At that point,
%% the file is closed and a new file is created on the _right_ of the
%% old file which is used for new messages. Files are named
%% numerically ascending, thus the file with the lowest name is the
%% eldest file.
基本的思路是将消息加入到文件里面,直到文件变得足够大。在这个时候,会将文件关闭,
然后创建一个新的文件添加到旧有文件的右边。文件名会升序命名,因此文件名数字比较
小的就是比较旧的文件。
%% We need to keep track of which messages are in which files (this is
%% the index); how much useful data is in each file and which files
%% are on the left and right of each other. This is the purpose of the
%% file summary ETS table.
我们需要确定消息在哪个文件(这就是一个索引的功能); 还有多少数据是有效在每个文
件以及每个文件的排序.这个的目的是为了确定表的统计.
%% As messages are removed from files, holes appear in these
%% files. The field ValidTotalSize contains the total amount of useful
%% data left in the file. This is needed for garbage collection.
当消息被从文件中移除,文件会出现空洞.ValidTotalSize字段会比有用的数据小,这需
要垃圾回收.
%% When we discover that a file is now empty, we delete it. When we
%% discover that it can be combined with the useful data in either its
%% left or right neighbour, and overall, across all the files, we have
%% ((the amount of garbage) / (the sum of all file sizes)) >
%% ?GARBAGE_FRACTION, we start a garbage collection run concurrently,
%% which will compact the two files together.
当发现文件是空,我们会删除他.当我们发现文件可以和自己的左右邻居合并
当garbage 数量与所有文件大
小的比例超过一定阈值,会开始垃圾回收

This keeps disk
%% utilisation high and aids performance. We deliberately do this
%% lazily in order to prevent doing GC on files which are soon to be
%% emptied (and hence deleted).

%% Given the compaction between two files, the left file (i.e. elder
%% file) is considered the ultimate destination for the good data in
%% the right file. If necessary, the good data in the left file which
%% is fragmented throughout the file is written out to a temporary
%% file, then read back in to form a contiguous chunk of good data at
%% the start of the left file. Thus the left file is garbage collected
%% and compacted. Then the good data from the right file is copied
%% onto the end of the left file. Index and file summary tables are
%% updated.
%%
%% On non-clean startup, we scan the files we discover, dealing with
%% the possibilities of a crash having occurred during a compaction
%% (this consists of tidyup - the compaction is deliberately designed
%% such that data is duplicated on disk rather than risking it being
%% lost), and rebuild the file summary and index ETS table.
%%
%% So, with this design, messages move to the left. Eventually, they
%% should end up in a contiguous block on the left and are then never
%% rewritten. But this isn't quite the case. If in a file there is one
%% message that is being ignored, for some reason, and messages in the
%% file to the right and in the current block are being read all the
%% time then it will repeatedly be the case that the good data from
%% both files can be combined and will be written out to a new
%% file. Whenever this happens, our shunned message will be rewritten.
%%
%% So, provided that we combine messages in the right order,
%% (i.e. left file, bottom to top, right file, bottom to top),
%% eventually our shunned message will end up at the bottom of the
%% left file. The compaction/combining algorithm is smart enough to
%% read in good data from the left file that is scattered throughout
%% (i.e. C and D in the below diagram), then truncate the file to just
%% above B (i.e. truncate to the limit of the good contiguous region
%% at the start of the file), then write C and D on top and then write
%% E, F and G from the right file on top. Thus contiguous blocks of
%% good data at the bottom of files are not rewritten.
%%
%% +-------+ +-------+ +-------+
%% | X | | G | | G |
%% +-------+ +-------+ +-------+
%% | D | | X | | F |
%% +-------+ +-------+ +-------+
%% | X | | X | | E |
%% +-------+ +-------+ +-------+
%% | C | | F | ===> | D |
%% +-------+ +-------+ +-------+
%% | X | | X | | C |
%% +-------+ +-------+ +-------+
%% | B | | X | | B |
%% +-------+ +-------+ +-------+
%% | A | | E | | A |
%% +-------+ +-------+ +-------+
%% left right left
%%
%% From this reasoning, we do have a bound on the number of times the
%% message is rewritten. From when it is inserted, there can be no
%% files inserted between it and the head of the queue, and the worst
%% case is that every time it is rewritten, it moves one position lower
%% in the file (for it to stay at the same position requires that
%% there are no holes beneath it, which means truncate would be used
%% and so it would not be rewritten at all). Thus this seems to
%% suggest the limit is the number of messages ahead of it in the
%% queue, though it's likely that that's pessimistic, given the
%% requirements for compaction/combination of files.
%%
%% The other property that we have is the bound on the lowest
%% utilisation, which should be 50% - worst case is that all files are
%% fractionally over half full and can't be combined (equivalent is
%% alternating full files and files with only one tiny message in
%% them).
%%
%% Messages are reference-counted. When a message with the same msg id
%% is written several times we only store it once, and only remove it
%% from the store when it has been removed the same number of times.
%%
%% The reference counts do not persist. Therefore the initialisation
%% function must be provided with a generator that produces ref count
%% deltas for all recovered messages. This is only used on startup
%% when the shutdown was non-clean.
%%
%% Read messages with a reference count greater than one are entered
%% into a message cache. The purpose of the cache is not especially
%% performance, though it can help there too, but prevention of memory
%% explosion. It ensures that as messages with a high reference count
%% are read from several processes they are read back as the same
%% binary object rather than multiples of identical binary
%% objects.
%%
%% Reads can be performed directly by clients without calling to the
%% server. This is safe because multiple file handles can be used to
%% read files. However, locking is used by the concurrent GC to make
%% sure that reads are not attempted from files which are in the
%% process of being garbage collected.
%%
%% When a message is removed, its reference count is decremented. Even
%% if the reference count becomes 0, its entry is not removed. This is
%% because in the event of the same message being sent to several
%% different queues, there is the possibility of one queue writing and
%% removing the message before other queues write it at all. Thus
%% accommodating 0-reference counts allows us to avoid unnecessary
%% writes here. Of course, there are complications: the file to which
%% the message has already been written could be locked pending
%% deletion or GC, which means we have to rewrite the message as the
%% original copy will now be lost.
%%
%% The server automatically defers reads, removes and contains calls
%% that occur which refer to files which are currently being
%% GC'd. Contains calls are only deferred in order to ensure they do
%% not overtake removes.
%%
%% The current file to which messages are being written has a
%% write-back cache. This is written to immediately by clients and can
%% be read from by clients too. This means that there are only ever
%% writes made to the current file, thus eliminating delays due to
%% flushing write buffers in order to be able to safely read from the
%% current file. The one exception to this is that on start up, the
%% cache is not populated with msgs found in the current file, and
%% thus in this case only, reads may have to come from the file
%% itself. The effect of this is that even if the msg_store process is
%% heavily overloaded, clients can still write and read messages with
%% very low latency and not block at all.
%%
%% Clients of the msg_store are required to register before using the
%% msg_store. This provides them with the necessary client-side state
%% to allow them to directly access the various caches and files. When
%% they terminate, they should deregister. They can do this by calling
%% either client_terminate/1 or client_delete_and_terminate/1. The
%% differences are: (a) client_terminate is synchronous. As a result,
%% if the msg_store is badly overloaded and has lots of in-flight
%% writes and removes to process, this will take some time to
%% return. However, once it does return, you can be sure that all the
%% actions you've issued to the msg_store have been processed. (b) Not
%% only is client_delete_and_terminate/1 asynchronous, but it also
%% permits writes and subsequent removes from the current
%% (terminating) client which are still in flight to be safely
%% ignored. Thus from the point of view of the msg_store itself, and
%% all from the same client:
%%
%% (T) = termination; (WN) = write of msg N; (RN) = remove of msg N
%% --> W1, W2, W1, R1, T, W3, R2, W2, R1, R2, R3, W4 -->
%%
%% The client obviously sent T after all the other messages (up to
%% W4), but because the msg_store prioritises messages, the T can be
%% promoted and thus received early.
%%
%% Thus at the point of the msg_store receiving T, we have messages 1
%% and 2 with a refcount of 1. After T, W3 will be ignored because
%% it's an unknown message, as will R3, and W4. W2, R1 and R2 won't be
%% ignored because the messages that they refer to were already known
%% to the msg_store prior to T. However, it can be a little more
%% complex: after the first R2, the refcount of msg 2 is 0. At that
%% point, if a GC occurs or file deletion, msg 2 could vanish, which
%% would then mean that the subsequent W2 and R2 are then ignored.
%%
%% The use case then for client_delete_and_terminate/1 is if the
%% client wishes to remove everything it's written to the msg_store:
%% it issues removes for all messages it's written and not removed,
%% and then calls client_delete_and_terminate/1. At that point, any
%% in-flight writes (and subsequent removes) can be ignored, but
%% removes and writes for messages the msg_store already knows about
%% will continue to be processed normally (which will normally just
%% involve modifying the reference count, which is fast). Thus we save
%% disk bandwidth for writes which are going to be immediately removed
%% again by the the terminating client.
%%
%% We use a separate set to keep track of the dying clients in order
%% to keep that set, which is inspected on every write and remove, as
%% small as possible. Inspecting the set of all clients would degrade
%% performance with many healthy clients and few, if any, dying
%% clients, which is the typical case.
%%
%% Client termination messages are stored in a separate ets index to
%% avoid filling primary message store index and message files with
%% client termination messages.
%%
%% When the msg_store has a backlog (i.e. it has unprocessed messages
%% in its mailbox / gen_server priority queue), a further optimisation
%% opportunity arises: we can eliminate pairs of 'write' and 'remove'
%% from the same client for the same message. A typical occurrence of
%% these is when an empty durable queue delivers persistent messages
%% to ack'ing consumers. The queue will asynchronously ask the
%% msg_store to 'write' such messages, and when they are acknowledged
%% it will issue a 'remove'. That 'remove' may be issued before the
%% msg_store has processed the 'write'. There is then no point going
%% ahead with the processing of that 'write'.
%%
%% To detect this situation a 'flying_ets' table is shared between the
%% clients and the server. The table is keyed on the combination of
%% client (reference) and msg id, and the value represents an
%% integration of all the writes and removes currently "in flight" for
%% that message between the client and server - '+1' means all the
%% writes/removes add up to a single 'write', '-1' to a 'remove', and
%% '0' to nothing. (NB: the integration can never add up to more than
%% one 'write' or 'read' since clients must not write/remove a message
%% more than once without first removing/writing it).
%%
%% Maintaining this table poses two challenges: 1) both the clients
%% and the server access and update the table, which causes
%% concurrency issues, 2) we must ensure that entries do not stay in
%% the table forever, since that would constitute a memory leak. We
%% address the former by carefully modelling all operations as
%% sequences of atomic actions that produce valid results in all
%% possible interleavings. We address the latter by deleting table
%% entries whenever the server finds a 0-valued entry during the
%% processing of a write/remove. 0 is essentially equivalent to "no
%% entry". If, OTOH, the value is non-zero we know there is at least
%% one other 'write' or 'remove' in flight, so we get an opportunity
%% later to delete the table entry when processing these.
%%
%% There are two further complications. We need to ensure that 1)
%% eliminated writes still get confirmed, and 2) the write-back cache
%% doesn't grow unbounded. These are quite straightforward to
%% address. See the comments in the code.
%%
%% For notes on Clean Shutdown and startup, see documentation in
%% rabbit_variable_queue.
+ +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git "a/2021/11/30/\347\247\237\347\272\246/index.html" "b/2021/11/30/\347\247\237\347\272\246/index.html" new file mode 100644 index 0000000000..660ed33ff4 --- /dev/null +++ "b/2021/11/30/\347\247\237\347\272\246/index.html" @@ -0,0 +1,440 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 租约 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ 租约 +

+ + +
+ + + + +
+ + +

A lease is a contract that gives its holder specified rights
over property for a limited period of time. In the context
of caching, a lease grants to its holder control over writes
to the covered datum during the term of the lease, such that
the server must obtain the approval of the leaseholder before the datum may be written. When a leaseholder grants
approval for a write, it invalidates its local copy of the da
租约是一个租约持有人在一定时间内有特别的权限的合约.
在缓存这个场景下,租约保证他的持有人在租约期限内有写的权限 , 所以当服务器

+

相关阅读

    +
  • Leases: An Efficient Fault-Tolerant Mechanism for Distributed File Cache Consistency
  • +
+ +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/2021/12/30/roaring-bitmap/index.html b/2021/12/30/roaring-bitmap/index.html new file mode 100644 index 0000000000..1037e265ac --- /dev/null +++ b/2021/12/30/roaring-bitmap/index.html @@ -0,0 +1,482 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + roaring bitmap | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ roaring bitmap +

+ + +
+ + + + +
+ + +

bitmap 在某个长度之后会占用内存比array小,利用这个特性,可以将数据的存储压缩成bitmap存储.

+

背景

当我们有多个数字的数组,我们可以用多种方式描述一个数字.

+
1
array = [1 , 2 , 3 ,5,7]     (1)
+

方案1 , 直接使用数组

假设每个数字是一个Uint32 ,也就是4字节的数字.

+

那么上面例子(1) 中占用的字节数:5*4 = 20 字节 , 如果我们要存越大的数据需要的内存越多.我们占用的内存是线性的

+
1
memory = array.size() * 4
+

优点:

+
    +
  • 有多少内存就可以存多少数据
    缺点:
  • +
  • 占用内存是线性的
  • +
+

方案2 , 直接使用4个字节的的位图

bitmap

+
1
2
3
4
5
6
7
uint32 num = 0 ; 
num |= num << 1 ;
num |= num << 2 ;
num |= num << 3 ;
num |= num << 5 ;
num |= num << 7 ;

+

那么可以存多少个数字呢?
4*8 = 32 也就是可以存32个数字

+

优点:

+
    +
  • 占用内存是O(1) , 存储数量不随着数据变大而变大
  • +
+

缺点

+
    +
  • 用4个字节的位图最多可以描述一个32个Uint的数字
  • +
+

roaring bitmap

roaring bitmap

+

更进一步,我们要存2^32个数

    +
  • 只使用bitmap
    如果要用bitmap来存,我们要用 2^32 / 8 = 2^29 byte = 256m

    +
  • +
  • 只使用array
    需要的内存: 4*array.length byte

    +
  • +
+

bitmap和array的区别: bitmap会固定占用的内存,array则是动态占用内存。
上面的例子(存储4字节长度的数字数组),bitmap会固定占用256m,而array则动态长度。在数组比较小的时候,使用array比较好,在数组长度比较大的时候,则使用bitmap比较好。

+

核心公式: number_length * n * 8 = 2^n
这里解释一下公式长度: number_length 描述的是一个数字的字节长度,比如要存储的是uint32 , 则number_length = 4 , 如果要存储uint64 ,则number_length = 8

+

4 *n*8 = 2^n 的解是16

+

所以用16个字节来描述一个联合体:{bitmap , array} , 当数组数量小于16的时候使用array 存储,当数量大于等于16的时候使用bitmap描述

+

roaring bitmap 容器的运算

参考相关代码:

+
1
CRoaring/include/roaring/containers/containers.h
+ +

相关阅读:

+ + +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/2022/01/04/croaring-bitmap/index.html b/2022/01/04/croaring-bitmap/index.html new file mode 100644 index 0000000000..6fda126694 --- /dev/null +++ b/2022/01/04/croaring-bitmap/index.html @@ -0,0 +1,442 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + croaring bitmap | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ croaring bitmap +

+ + +
+ + + + +
+ + +

1 安装

1
2
3
4
5
git clone https://github.com/RoaringBitmap/CRoaring.git
cd CRoaring/
cd build/
cmake -DCMAKE_BUILD_TYPE=Debug ..
make -j4
+

然后就可以了

+ +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git "a/2022/02/18/jdk-\347\274\226\350\257\221/index.html" "b/2022/02/18/jdk-\347\274\226\350\257\221/index.html" new file mode 100644 index 0000000000..41a6e4e8c3 --- /dev/null +++ "b/2022/02/18/jdk-\347\274\226\350\257\221/index.html" @@ -0,0 +1,459 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + jdk 编译 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ jdk 编译 +

+ + +
+ + + + +
+ + +

前言

jdk 某个版本之后是自举的,所以编译jdk之前先有一个jdk。
主要参照https://openjdk.java.net/groups/build/doc/building.html

+
下载jdk
字节码

src\hotspot\share\interpreter\bytecodes.cpp

+

BytecodeInterpreter::run

+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
(gdb) bt
#0 TemplateTable::if_icmp (cc=4294967295) at /home/ubuntu/jdk/src/hotspot/cpu/x86/templateTable_x86.cpp:2381
#1 0x00007ffff70a9519 in Template::generate (this=0x7ffff7d60be0 <TemplateTable::_template_table+5088>, masm=0x7ffff0019760) at /home/ubuntu/jdk/src/hotspot/share/interpreter/templateTable.cpp:63
#2 0x00007ffff709bde8 in TemplateInterpreterGenerator::generate_and_dispatch (this=0x7ffff5c5da40, t=0x7ffff7d60be0 <TemplateTable::_template_table+5088>, tos_out=vtos)
at /home/ubuntu/jdk/src/hotspot/share/interpreter/templateInterpreterGenerator.cpp:392
#3 0x00007ffff709b934 in TemplateInterpreterGenerator::set_short_entry_points (this=0x7ffff5c5da40, t=0x7ffff7d60be0 <TemplateTable::_template_table+5088>,
bep=@0x7ffff5c5d398: 0x7fffe1008a37 "H\211d$\330H\201", <incomplete sequence \354\200>, cep=@0x7ffff5c5d3a0: 0x7fffe1008a37 "H\211d$\330H\201", <incomplete sequence \354\200>,
sep=@0x7ffff5c5d3a8: 0x7fffe1008a37 "H\211d$\330H\201", <incomplete sequence \354\200>, aep=@0x7ffff5c5d3b0: 0x7fffe1008a37 "H\211d$\330H\201", <incomplete sequence \354\200>,
iep=@0x7ffff5c5d3b8: 0x7fffe1028807 "PSQRH\213M\330H\205\311\017\204", <incomplete sequence \312>, lep=@0x7ffff5c5d3c0: 0x7fffe1008a37 "H\211d$\330H\201", <incomplete sequence \354\200>,
fep=@0x7ffff5c5d3c8: 0x7fffe1008a37 "H\211d$\330H\201", <incomplete sequence \354\200>, dep=@0x7ffff5c5d3d0: 0x7fffe1008a37 "H\211d$\330H\201", <incomplete sequence \354\200>,
vep=@0x7ffff5c5d3d8: 0x7fffe1028800 "\213\004$H\203\304\bPSQRH\213M\330H\205\311\017\204", <incomplete sequence \312>) at /home/ubuntu/jdk/src/hotspot/share/interpreter/templateInterpreterGenerator.cpp:356
#4 0x00007ffff709b46c in TemplateInterpreterGenerator::set_entry_points (this=0x7ffff5c5da40, code=Bytecodes::_if_icmpeq) at /home/ubuntu/jdk/src/hotspot/share/interpreter/templateInterpreterGenerator.cpp:325
#5 0x00007ffff709b06d in TemplateInterpreterGenerator::set_entry_points_for_all_bytes (this=0x7ffff5c5da40) at /home/ubuntu/jdk/src/hotspot/share/interpreter/templateInterpreterGenerator.cpp:281
#6 0x00007ffff709ac13 in TemplateInterpreterGenerator::generate_all (this=0x7ffff5c5da40) at /home/ubuntu/jdk/src/hotspot/share/interpreter/templateInterpreterGenerator.cpp:234
#7 0x00007ffff70993cb in TemplateInterpreterGenerator::TemplateInterpreterGenerator (this=0x7ffff5c5da40, _code=0x7ffff00a28b0) at /home/ubuntu/jdk/src/hotspot/share/interpreter/templateInterpreterGenerator.cpp:42
#8 0x00007ffff7097e5b in TemplateInterpreter::initialize () at /home/ubuntu/jdk/src/hotspot/share/interpreter/templateInterpreter.cpp:56
#9 0x00007ffff69e06a2 in interpreter_init () at /home/ubuntu/jdk/src/hotspot/share/interpreter/interpreter.cpp:116
#10 0x00007ffff69bbe73 in init_globals () at /home/ubuntu/jdk/src/hotspot/share/runtime/init.cpp:119
#11 0x00007ffff70d59b7 in Threads::create_vm (args=0x7ffff5c5de20, canTryAgain=0x7ffff5c5dd2b) at /home/ubuntu/jdk/src/hotspot/share/runtime/thread.cpp:3728
#12 0x00007ffff6adcd5d in JNI_CreateJavaVM_inner (vm=0x7ffff5c5de78, penv=0x7ffff5c5de80, args=0x7ffff5c5de20) at /home/ubuntu/jdk/src/hotspot/share/prims/jni.cpp:3945
#13 0x00007ffff6add0b1 in JNI_CreateJavaVM (vm=0x7ffff5c5de78, penv=0x7ffff5c5de80, args=0x7ffff5c5de20) at /home/ubuntu/jdk/src/hotspot/share/prims/jni.cpp:4036
#14 0x00007ffff7fba88c in InitializeJVM (pvm=0x7ffff5c5de78, penv=0x7ffff5c5de80, ifn=0x7ffff5c5ded0) at /home/ubuntu/jdk/src/java.base/share/native/libjli/java.c:1527
#15 0x00007ffff7fb7447 in JavaMain (_args=0x7fffffffb040) at /home/ubuntu/jdk/src/java.base/share/native/libjli/java.c:414
#16 0x00007ffff7d7a609 in start_thread (arg=<optimized out>) at pthread_create.c:477
#17 0x00007ffff7ed8293 in clone () at ../sysdeps/unix/sysv/linux/x86_64/clone.S:95
+ +

类加载

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
Thread 2 "java" hit Breakpoint 3, SystemDictionary::load_instance_class (class_name=0x7fffcc2d90f0, class_loader=..., __the_thread__=0x7ffff001b800)
at /home/dinosaur/jdk12/jdk-jdk-12-25/src/hotspot/share/classfile/systemDictionary.cpp:1386
1386 InstanceKlass* SystemDictionary::load_instance_class(Symbol* class_name, Handle class_loader, TRAPS) {
(gdb) bt
#0 SystemDictionary::load_instance_class (class_name=0x7fffcc2d90f0, class_loader=..., __the_thread__=0x7ffff001b800)
at /home/dinosaur/jdk12/jdk-jdk-12-25/src/hotspot/share/classfile/systemDictionary.cpp:1386
#1 0x00007ffff61bf8ab in SystemDictionary::resolve_instance_class_or_null (name=0x7fffcc2d90f0, class_loader=..., protection_domain=..., __the_thread__=0x7ffff001b800)
at /home/dinosaur/jdk12/jdk-jdk-12-25/src/hotspot/share/classfile/systemDictionary.cpp:854
#2 0x00007ffff61bdcf8 in SystemDictionary::resolve_instance_class_or_null_helper (class_name=0x7fffcc2d90f0, class_loader=..., protection_domain=..., __the_thread__=0x7ffff001b800)
at /home/dinosaur/jdk12/jdk-jdk-12-25/src/hotspot/share/classfile/systemDictionary.cpp:272
#3 0x00007ffff61bdb5e in SystemDictionary::resolve_or_null (class_name=0x7fffcc2d90f0, class_loader=..., protection_domain=..., __the_thread__=0x7ffff001b800)
at /home/dinosaur/jdk12/jdk-jdk-12-25/src/hotspot/share/classfile/systemDictionary.cpp:255
#4 0x00007ffff61bd7d1 in SystemDictionary::resolve_or_fail (class_name=0x7fffcc2d90f0, class_loader=..., protection_domain=..., throw_error=true, __the_thread__=0x7ffff001b800)
at /home/dinosaur/jdk12/jdk-jdk-12-25/src/hotspot/share/classfile/systemDictionary.cpp:203
#5 0x00007ffff61bdae8 in SystemDictionary::resolve_or_fail (class_name=0x7fffcc2d90f0, throw_error=true, __the_thread__=0x7ffff001b800)
at /home/dinosaur/jdk12/jdk-jdk-12-25/src/hotspot/share/classfile/systemDictionary.cpp:245
#6 0x00007ffff61c3490 in SystemDictionary::resolve_wk_klass (id=SystemDictionary::Object_klass_knum, __the_thread__=0x7ffff001b800)
at /home/dinosaur/jdk12/jdk-jdk-12-25/src/hotspot/share/classfile/systemDictionary.cpp:1938
#7 0x00007ffff61c35c5 in SystemDictionary::resolve_wk_klasses_until (limit_id=SystemDictionary::Cloneable_klass_knum, start_id=@0x7ffff7fbc974: SystemDictionary::Object_klass_knum,
__the_thread__=0x7ffff001b800) at /home/dinosaur/jdk12/jdk-jdk-12-25/src/hotspot/share/classfile/systemDictionary.cpp:1948
#8 0x00007ffff5c5c46d in SystemDictionary::resolve_wk_klasses_through (end_id=SystemDictionary::Class_klass_knum, start_id=@0x7ffff7fbc974: SystemDictionary::Object_klass_knum,
__the_thread__=0x7ffff001b800) at /home/dinosaur/jdk12/jdk-jdk-12-25/src/hotspot/share/classfile/systemDictionary.hpp:391
#9 0x00007ffff61c37a3 in SystemDictionary::resolve_well_known_classes (__the_thread__=0x7ffff001b800) at /home/dinosaur/jdk12/jdk-jdk-12-25/src/hotspot/share/classfile/systemDictionary.cpp:1991
#10 0x00007ffff61c32d8 in SystemDictionary::initialize (__the_thread__=0x7ffff001b800) at /home/dinosaur/jdk12/jdk-jdk-12-25/src/hotspot/share/classfile/systemDictionary.cpp:1898
#11 0x00007ffff623b65c in Universe::genesis (__the_thread__=0x7ffff001b800) at /home/dinosaur/jdk12/jdk-jdk-12-25/src/hotspot/share/memory/universe.cpp:329
#12 0x00007ffff623dd21 in universe2_init () at /home/dinosaur/jdk12/jdk-jdk-12-25/src/hotspot/share/memory/universe.cpp:953
#13 0x00007ffff5a575e7 in init_globals () at /home/dinosaur/jdk12/jdk-jdk-12-25/src/hotspot/share/runtime/init.cpp:125
#14 0x00007ffff620ecbe in Threads::create_vm (args=0x7ffff7fbce20, canTryAgain=0x7ffff7fbcd2b) at /home/dinosaur/jdk12/jdk-jdk-12-25/src/hotspot/share/runtime/thread.cpp:3731
#15 0x00007ffff5b6b598 in JNI_CreateJavaVM_inner (vm=0x7ffff7fbce78, penv=0x7ffff7fbce80, args=0x7ffff7fbce20) at /home/dinosaur/jdk12/jdk-jdk-12-25/src/hotspot/share/prims/jni.cpp:3935
#16 0x00007ffff5b6b8c2 in JNI_CreateJavaVM (vm=0x7ffff7fbce78, penv=0x7ffff7fbce80, args=0x7ffff7fbce20) at /home/dinosaur/jdk12/jdk-jdk-12-25/src/hotspot/share/prims/jni.cpp:4021
#17 0x00007ffff7bc6601 in InitializeJVM (pvm=0x7ffff7fbce78, penv=0x7ffff7fbce80, ifn=0x7ffff7fbced0) at /home/dinosaur/jdk12/jdk-jdk-12-25/src/java.base/share/native/libjli/java.c:1529
#18 0x00007ffff7bc320c in JavaMain (_args=0x7fffffffaa30) at /home/dinosaur/jdk12/jdk-jdk-12-25/src/java.base/share/native/libjli/java.c:414
#19 0x00007ffff71956db in start_thread (arg=0x7ffff7fbd700) at pthread_create.c:463
#20 0x00007ffff78ef61f in clone () at ../sysdeps/unix/sysv/linux/x86_64/clone.S:95


(gdb) p class_name->_body@10
$14 = {"ja", "va", "/l", "an", "g/", "Ob", "je", "ct", "\361", <incomplete sequence \361>, "\377\377"}

+ + +

解析文件流

1
ClassFileParser::parse_stream
+ +

find_transitive_override

重载

+
1
2
find_transitive_override
update_inherited_vtable
+ + +

签名

1
Method::name_and_sig_as_C_string
+ +

相关阅读

+ +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/2022/03/27/lr-parser/index.html b/2022/03/27/lr-parser/index.html new file mode 100644 index 0000000000..15dbdc8909 --- /dev/null +++ b/2022/03/27/lr-parser/index.html @@ -0,0 +1,597 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + lr parser | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ lr parser +

+ + +
+ + + + +
+ + +

文法介绍

文法 (Grammar) 由四部分组成:

+

: 描述的是终结符

+

: 描述的是非终结符

+

P : 产生式

+

S : 开始的产生式

+

lr(0)

right : terminal and notermial
left : noterminal
procduction: left and right
configuration: production whith dot
successor: set of configuration

+

build configure set

the problem is that how to build the configure set :

+

the configure set is combine with two set : basic set and closure set

+
    +
  • basic set

    +
  • +
  • closure set

    +
  • +
+
1
2
 The basis set consists of all configurations in S having a marker before an s, but with the
marker moved to follow the s;
+ +

closure set : {A -> .w | A->w is production}

+

lr(0)

lr(0) 特别在于状态机:

+
    +
  • reduce state : 转换函数的transition 要么 (一个底部 + 0到1个非终结符)
  • +
  • read state : 转换函数的transition 都是终结符
  • +
+

lr(0) 算法

lr(0) 描述的需要注意以下几个内容:

+
    +
  • stack : stack 存了两个内容.一个是state 还有一个是 也就是终结符和非终结符的并集
  • +
+

冲突

移入-规约冲突

+
    +
  • 原因: 存在产生式P1P2 , P1右部是产生式P2右部的前缀
  • +
+

规约-规约冲突:

+
    +
  • 原因: 存在产生式P1P2,P1 右部和P2右部有公共后缀
  • +
+

如何解决冲突

移入-规约冲突解决:
FOLLOW(P)没有交集

+

规约-规约冲突解决:
FIRST(t) 没有交集

+

为了解决冲突,我们引入了slr(1),改进了lr(0)的遇到冲突的时候无法解决的问题

+

SLR(1)

相关阅读

+ +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git "a/2022/03/30/\350\214\203\345\236\213\346\243\200\346\237\245/index.html" "b/2022/03/30/\350\214\203\345\236\213\346\243\200\346\237\245/index.html" new file mode 100644 index 0000000000..2d5934ae2f --- /dev/null +++ "b/2022/03/30/\350\214\203\345\236\213\346\243\200\346\237\245/index.html" @@ -0,0 +1,442 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 范型检查 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ 范型检查 +

+ + +
+ + + + +
+ + +
1
2
3
4
5
A ClassCastException is thrown if a cast is found at run time to be impermissible.
Some casts result in an error at compile time. Some casts can be proven, at compile time,
always to be correct at run time. For example, it is always correct to convert a value of a
class type to the type of its superclass; such a cast should require no special action at run
time. Finally, some casts cannot be proven to be either always correct or always i
+ +

相关阅读:

+ + +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/2022/04/02/dubbo-rpc/index.html b/2022/04/02/dubbo-rpc/index.html new file mode 100644 index 0000000000..a0ce0013c0 --- /dev/null +++ b/2022/04/02/dubbo-rpc/index.html @@ -0,0 +1,454 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + dubbo-rpc | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ dubbo-rpc +

+ + +
+ + + + +
+ + +
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
doInvoke:92, DubboInvoker (org.apache.dubbo.rpc.protocol.dubbo)
invoke:173, AbstractInvoker (org.apache.dubbo.rpc.protocol)
invoke:52, AsyncToSyncInvoker (org.apache.dubbo.rpc.protocol)
invoke:78, ListenerInvokerWrapper (org.apache.dubbo.rpc.listener)
invoke:91, MonitorFilter (org.apache.dubbo.monitor.support)
invoke:61, FilterNode (org.apache.dubbo.rpc.protocol)
invoke:52, FutureFilter (org.apache.dubbo.rpc.protocol.dubbo.filter)
invoke:61, FilterNode (org.apache.dubbo.rpc.protocol)
invoke:69, ConsumerContextFilter (org.apache.dubbo.rpc.filter)
invoke:61, FilterNode (org.apache.dubbo.rpc.protocol)
invoke:56, InvokerWrapper (org.apache.dubbo.rpc.protocol)
doInvoke:79, FailoverClusterInvoker (org.apache.dubbo.rpc.cluster.support)
invoke:265, AbstractClusterInvoker (org.apache.dubbo.rpc.cluster.support)
intercept:47, ClusterInterceptor (org.apache.dubbo.rpc.cluster.interceptor)
invoke:92, AbstractCluster$InterceptorInvokerNode (org.apache.dubbo.rpc.cluster.support.wrapper)
invoke:93, MockClusterInvoker (org.apache.dubbo.rpc.cluster.support.wrapper)
invoke:170, MigrationInvoker (org.apache.dubbo.registry.client.migration)
invoke:96, InvokerInvocationHandler (org.apache.dubbo.rpc.proxy)
testRpc:-1, proxy1 (org.apache.dubbo.common.bytecode)
testRpc:30, ThirdDubboManagerImpl (com.patpat.mms.mdp.biz.engine.dependencies.api.impl)
testRpc:41, PushTest (com.patpat.mms.mdp.biz.engine.rest)
invoke0:-1, NativeMethodAccessorImpl (jdk.internal.reflect)
invoke:62, NativeMethodAccessorImpl (jdk.internal.reflect)
invoke:43, DelegatingMethodAccessorImpl (jdk.internal.reflect)
invoke:566, Method (java.lang.reflect)
runReflectiveCall:59, FrameworkMethod$1 (org.junit.runners.model)
run:12, ReflectiveCallable (org.junit.internal.runners.model)
invokeExplosively:56, FrameworkMethod (org.junit.runners.model)
evaluate:17, InvokeMethod (org.junit.internal.runners.statements)
evaluate:74, RunBeforeTestExecutionCallbacks (org.springframework.test.context.junit4.statements)
evaluate:84, RunAfterTestExecutionCallbacks (org.springframework.test.context.junit4.statements)
evaluate:75, RunBeforeTestMethodCallbacks (org.springframework.test.context.junit4.statements)
evaluate:86, RunAfterTestMethodCallbacks (org.springframework.test.context.junit4.statements)
evaluate:84, SpringRepeat (org.springframework.test.context.junit4.statements)
runLeaf:366, ParentRunner (org.junit.runners)
runChild:251, SpringJUnit4ClassRunner (org.springframework.test.context.junit4)
runChild:97, SpringJUnit4ClassRunner (org.springframework.test.context.junit4)
run:331, ParentRunner$4 (org.junit.runners)
schedule:79, ParentRunner$1 (org.junit.runners)
runChildren:329, ParentRunner (org.junit.runners)
access$100:66, ParentRunner (org.junit.runners)
evaluate:293, ParentRunner$2 (org.junit.runners)
evaluate:61, RunBeforeTestClassCallbacks (org.springframework.test.context.junit4.statements)
evaluate:70, RunAfterTestClassCallbacks (org.springframework.test.context.junit4.statements)
evaluate:306, ParentRunner$3 (org.junit.runners)
run:413, ParentRunner (org.junit.runners)
run:190, SpringJUnit4ClassRunner (org.springframework.test.context.junit4)
runChild:128, Suite (org.junit.runners)
runChild:27, Suite (org.junit.runners)
run:331, ParentRunner$4 (org.junit.runners)
schedule:79, ParentRunner$1 (org.junit.runners)
runChildren:329, ParentRunner (org.junit.runners)
access$100:66, ParentRunner (org.junit.runners)
evaluate:293, ParentRunner$2 (org.junit.runners)
evaluate:306, ParentRunner$3 (org.junit.runners)
run:413, ParentRunner (org.junit.runners)
run:137, JUnitCore (org.junit.runner)
startRunnerWithArgs:69, JUnit4IdeaTestRunner (com.intellij.junit4)
execute:38, IdeaTestRunner$Repeater$1 (com.intellij.rt.junit)
repeat:11, TestsRepeater (com.intellij.rt.execution.junit)
startRunnerWithArgs:35, IdeaTestRunner$Repeater (com.intellij.rt.junit)
prepareStreamsAndStart:235, JUnitStarter (com.intellij.rt.junit)
main:54, JUnitStarter (com.intellij.rt.junit)
+ + +

网络io

+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
send:157, NettyChannel (org.apache.dubbo.remoting.transport.netty4)
send:181, AbstractClient (org.apache.dubbo.remoting.transport)
send:53, AbstractPeer (org.apache.dubbo.remoting.transport)
request:137, HeaderExchangeChannel (org.apache.dubbo.remoting.exchange.support.header)
request:95, HeaderExchangeClient (org.apache.dubbo.remoting.exchange.support.header)
request:93, ReferenceCountExchangeClient (org.apache.dubbo.rpc.protocol.dubbo)
doInvoke:108, DubboInvoker (org.apache.dubbo.rpc.protocol.dubbo)
invoke:173, AbstractInvoker (org.apache.dubbo.rpc.protocol)
invoke:52, AsyncToSyncInvoker (org.apache.dubbo.rpc.protocol)
invoke:78, ListenerInvokerWrapper (org.apache.dubbo.rpc.listener)
invoke:91, MonitorFilter (org.apache.dubbo.monitor.support)
invoke:61, FilterNode (org.apache.dubbo.rpc.protocol)
invoke:52, FutureFilter (org.apache.dubbo.rpc.protocol.dubbo.filter)
invoke:61, FilterNode (org.apache.dubbo.rpc.protocol)
invoke:69, ConsumerContextFilter (org.apache.dubbo.rpc.filter)
invoke:61, FilterNode (org.apache.dubbo.rpc.protocol)
invoke:56, InvokerWrapper (org.apache.dubbo.rpc.protocol)
doInvoke:79, FailoverClusterInvoker (org.apache.dubbo.rpc.cluster.support)
invoke:265, AbstractClusterInvoker (org.apache.dubbo.rpc.cluster.support)
intercept:47, ClusterInterceptor (org.apache.dubbo.rpc.cluster.interceptor)
invoke:92, AbstractCluster$InterceptorInvokerNode (org.apache.dubbo.rpc.cluster.support.wrapper)
invoke:93, MockClusterInvoker (org.apache.dubbo.rpc.cluster.support.wrapper)
invoke:170, MigrationInvoker (org.apache.dubbo.registry.client.migration)
invoke:96, InvokerInvocationHandler (org.apache.dubbo.rpc.proxy)
testRpc:-1, proxy1 (org.apache.dubbo.common.bytecode)
testRpc:30, ThirdDubboManagerImpl (com.patpat.mms.mdp.biz.engine.dependencies.api.impl)
testRpc:41, PushTest (com.patpat.mms.mdp.biz.engine.rest)
invoke0:-1, NativeMethodAccessorImpl (jdk.internal.reflect)
invoke:62, NativeMethodAccessorImpl (jdk.internal.reflect)
invoke:43, DelegatingMethodAccessorImpl (jdk.internal.reflect)
invoke:566, Method (java.lang.reflect)
runReflectiveCall:59, FrameworkMethod$1 (org.junit.runners.model)
run:12, ReflectiveCallable (org.junit.internal.runners.model)
invokeExplosively:56, FrameworkMethod (org.junit.runners.model)
evaluate:17, InvokeMethod (org.junit.internal.runners.statements)
evaluate:74, RunBeforeTestExecutionCallbacks (org.springframework.test.context.junit4.statements)
evaluate:84, RunAfterTestExecutionCallbacks (org.springframework.test.context.junit4.statements)
evaluate:75, RunBeforeTestMethodCallbacks (org.springframework.test.context.junit4.statements)
evaluate:86, RunAfterTestMethodCallbacks (org.springframework.test.context.junit4.statements)
evaluate:84, SpringRepeat (org.springframework.test.context.junit4.statements)
runLeaf:366, ParentRunner (org.junit.runners)
runChild:251, SpringJUnit4ClassRunner (org.springframework.test.context.junit4)
runChild:97, SpringJUnit4ClassRunner (org.springframework.test.context.junit4)
run:331, ParentRunner$4 (org.junit.runners)
schedule:79, ParentRunner$1 (org.junit.runners)
runChildren:329, ParentRunner (org.junit.runners)
access$100:66, ParentRunner (org.junit.runners)
evaluate:293, ParentRunner$2 (org.junit.runners)
evaluate:61, RunBeforeTestClassCallbacks (org.springframework.test.context.junit4.statements)
evaluate:70, RunAfterTestClassCallbacks (org.springframework.test.context.junit4.statements)
evaluate:306, ParentRunner$3 (org.junit.runners)
run:413, ParentRunner (org.junit.runners)
run:190, SpringJUnit4ClassRunner (org.springframework.test.context.junit4)
run:137, JUnitCore (org.junit.runner)
startRunnerWithArgs:69, JUnit4IdeaTestRunner (com.intellij.junit4)
execute:38, IdeaTestRunner$Repeater$1 (com.intellij.rt.junit)
repeat:11, TestsRepeater (com.intellij.rt.execution.junit)
startRunnerWithArgs:35, IdeaTestRunner$Repeater (com.intellij.rt.junit)
prepareStreamsAndStart:235, JUnitStarter (com.intellij.rt.junit)
main:54, JUnitStarter (com.intellij.rt.junit)
+ + +
1
final WriteTask task = WriteTask.newInstance(next, m, promise, flush);
+ + + +

堆栈

+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
run:1067, AbstractChannelHandlerContext$WriteTask (io.netty.channel)
safeExecute$$$capture:164, AbstractEventExecutor (io.netty.util.concurrent)
safeExecute:-1, AbstractEventExecutor (io.netty.util.concurrent)
- Async stack trace
addTask:-1, SingleThreadEventExecutor (io.netty.util.concurrent)
execute:828, SingleThreadEventExecutor (io.netty.util.concurrent)
execute:818, SingleThreadEventExecutor (io.netty.util.concurrent)
safeExecute:989, AbstractChannelHandlerContext (io.netty.channel)
write:796, AbstractChannelHandlerContext (io.netty.channel)
writeAndFlush:758, AbstractChannelHandlerContext (io.netty.channel)
writeAndFlush:808, AbstractChannelHandlerContext (io.netty.channel)
writeAndFlush:1025, DefaultChannelPipeline (io.netty.channel)
writeAndFlush:306, AbstractChannel (io.netty.channel)
send:162, NettyChannel (org.apache.dubbo.remoting.transport.netty4)
send:181, AbstractClient (org.apache.dubbo.remoting.transport)
send:53, AbstractPeer (org.apache.dubbo.remoting.transport)
request:137, HeaderExchangeChannel (org.apache.dubbo.remoting.exchange.support.header)
request:95, HeaderExchangeClient (org.apache.dubbo.remoting.exchange.support.header)
request:93, ReferenceCountExchangeClient (org.apache.dubbo.rpc.protocol.dubbo)
doInvoke:108, DubboInvoker (org.apache.dubbo.rpc.protocol.dubbo)
invoke:173, AbstractInvoker (org.apache.dubbo.rpc.protocol)
invoke:52, AsyncToSyncInvoker (org.apache.dubbo.rpc.protocol)
invoke:78, ListenerInvokerWrapper (org.apache.dubbo.rpc.listener)
invoke:91, MonitorFilter (org.apache.dubbo.monitor.support)
invoke:61, FilterNode (org.apache.dubbo.rpc.protocol)
invoke:52, FutureFilter (org.apache.dubbo.rpc.protocol.dubbo.filter)
invoke:61, FilterNode (org.apache.dubbo.rpc.protocol)
invoke:69, ConsumerContextFilter (org.apache.dubbo.rpc.filter)
invoke:61, FilterNode (org.apache.dubbo.rpc.protocol)
invoke:56, InvokerWrapper (org.apache.dubbo.rpc.protocol)
doInvoke:79, FailoverClusterInvoker (org.apache.dubbo.rpc.cluster.support)
invoke:265, AbstractClusterInvoker (org.apache.dubbo.rpc.cluster.support)
intercept:47, ClusterInterceptor (org.apache.dubbo.rpc.cluster.interceptor)
invoke:92, AbstractCluster$InterceptorInvokerNode (org.apache.dubbo.rpc.cluster.support.wrapper)
invoke:93, MockClusterInvoker (org.apache.dubbo.rpc.cluster.support.wrapper)
invoke:170, MigrationInvoker (org.apache.dubbo.registry.client.migration)
invoke:96, InvokerInvocationHandler (org.apache.dubbo.rpc.proxy)
testRpc:-1, proxy1 (org.apache.dubbo.common.bytecode)
testRpc:30, ThirdDubboManagerImpl (com.patpat.mms.mdp.biz.engine.dependencies.api.impl)
testRpc:41, PushTest (com.patpat.mms.mdp.biz.engine.rest)
invoke0:-2, NativeMethodAccessorImpl (jdk.internal.reflect)
invoke:62, NativeMethodAccessorImpl (jdk.internal.reflect)
invoke:43, DelegatingMethodAccessorImpl (jdk.internal.reflect)
invoke:566, Method (java.lang.reflect)
runReflectiveCall:59, FrameworkMethod$1 (org.junit.runners.model)
run:12, ReflectiveCallable (org.junit.internal.runners.model)
invokeExplosively:56, FrameworkMethod (org.junit.runners.model)
evaluate:17, InvokeMethod (org.junit.internal.runners.statements)
evaluate:74, RunBeforeTestExecutionCallbacks (org.springframework.test.context.junit4.statements)
evaluate:84, RunAfterTestExecutionCallbacks (org.springframework.test.context.junit4.statements)
evaluate:75, RunBeforeTestMethodCallbacks (org.springframework.test.context.junit4.statements)
evaluate:86, RunAfterTestMethodCallbacks (org.springframework.test.context.junit4.statements)
evaluate:84, SpringRepeat (org.springframework.test.context.junit4.statements)
runLeaf:366, ParentRunner (org.junit.runners)
runChild:251, SpringJUnit4ClassRunner (org.springframework.test.context.junit4)
runChild:97, SpringJUnit4ClassRunner (org.springframework.test.context.junit4)
run:331, ParentRunner$4 (org.junit.runners)
schedule:79, ParentRunner$1 (org.junit.runners)
runChildren:329, ParentRunner (org.junit.runners)
access$100:66, ParentRunner (org.junit.runners)
evaluate:293, ParentRunner$2 (org.junit.runners)
evaluate:61, RunBeforeTestClassCallbacks (org.springframework.test.context.junit4.statements)
evaluate:70, RunAfterTestClassCallbacks (org.springframework.test.context.junit4.statements)
evaluate:306, ParentRunner$3 (org.junit.runners)
run:413, ParentRunner (org.junit.runners)
run:190, SpringJUnit4ClassRunner (org.springframework.test.context.junit4)
run:137, JUnitCore (org.junit.runner)
startRunnerWithArgs:69, JUnit4IdeaTestRunner (com.intellij.junit4)
execute:38, IdeaTestRunner$Repeater$1 (com.intellij.rt.junit)
repeat:11, TestsRepeater (com.intellij.rt.execution.junit)
startRunnerWithArgs:35, IdeaTestRunner$Repeater (com.intellij.rt.junit)
prepareStreamsAndStart:235, JUnitStarter (com.intellij.rt.junit)
main:54, JUnitStarter (com.intellij.rt.junit)
+ + +

相关阅读

+ +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git "a/2022/04/12/java\345\222\214springboot/index.html" "b/2022/04/12/java\345\222\214springboot/index.html" new file mode 100644 index 0000000000..1a137674fb --- /dev/null +++ "b/2022/04/12/java\345\222\214springboot/index.html" @@ -0,0 +1,442 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + java和springboot | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + + + + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git "a/2022/04/12/java\347\261\273\345\210\235\345\247\213\345\214\226/index.html" "b/2022/04/12/java\347\261\273\345\210\235\345\247\213\345\214\226/index.html" new file mode 100644 index 0000000000..2ca0b6541f --- /dev/null +++ "b/2022/04/12/java\347\261\273\345\210\235\345\247\213\345\214\226/index.html" @@ -0,0 +1,449 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + java类初始化 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ java类初始化 +

+ + +
+ + + + +
+ + +
1
2
3
4
5
6
7
8
Method* InstanceKlass::class_initializer() const {
Method* clinit = find_method(
vmSymbols::class_initializer_name(), vmSymbols::void_method_signature());
if (clinit != NULL && clinit->has_valid_initializer_flags()) {
return clinit;
}
return NULL;
}
+ +

寻找方法:

+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
src\hotspot\share\oops\instanceKlass.cpp

int InstanceKlass::find_method_index(const Array<Method*>* methods,
const Symbol* name,
const Symbol* signature,
OverpassLookupMode overpass_mode,
StaticLookupMode static_mode,
PrivateLookupMode private_mode) {
const bool skipping_overpass = (overpass_mode == OverpassLookupMode::skip);
const bool skipping_static = (static_mode == StaticLookupMode::skip);
const bool skipping_private = (private_mode == PrivateLookupMode::skip);
const int hit = quick_search(methods, name);
if (hit != -1) {
const Method* const m = methods->at(hit);

// Do linear search to find matching signature. First, quick check
// for common case, ignoring overpasses if requested.
if (method_matches(m, signature, skipping_overpass, skipping_static, skipping_private)) {
return hit;
}

// search downwards through overloaded methods
int i;
for (i = hit - 1; i >= 0; --i) {
const Method* const m = methods->at(i);
assert(m->is_method(), "must be method");
if (m->name() != name) {
break;
}
if (method_matches(m, signature, skipping_overpass, skipping_static, skipping_private)) {
return i;
}
}
// search upwards
for (i = hit + 1; i < methods->length(); ++i) {
const Method* const m = methods->at(i);
assert(m->is_method(), "must be method");
if (m->name() != name) {
break;
}
if (method_matches(m, signature, skipping_overpass, skipping_static, skipping_private)) {
return i;
}
}
// not found
#ifdef ASSERT
const int index = (skipping_overpass || skipping_static || skipping_private) ? -1 :
linear_search(methods, name, signature);
assert(-1 == index, "binary search should have found entry %d", index);
#endif
}
return -1;
}
+ +

相关阅读

+ +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git "a/2022/04/12/jvm\347\272\277\347\250\213\345\256\236\347\216\260/index.html" "b/2022/04/12/jvm\347\272\277\347\250\213\345\256\236\347\216\260/index.html" new file mode 100644 index 0000000000..18d8916965 --- /dev/null +++ "b/2022/04/12/jvm\347\272\277\347\250\213\345\256\236\347\216\260/index.html" @@ -0,0 +1,452 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + jvm线程实现 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ jvm线程实现 +

+ + +
+ + + + +
+ + +

背景

了解线程相关处理逻辑

+

jni

jni和php的扩展类似,都是用一个符号绑定一个native函数

+

堆栈

堆栈:

+
1
2
3
4
5
6
7
8
9
(gdb) bt
#0 __pthread_create_2_1 (newthread=0x7ffff5aa5458, attr=0x7ffff5aa54a0, start_routine=0x7ffff6c0d1aa <thread_native_entry(Thread*)>, arg=0x7ffff02e7810) at pthread_create.c:625
#1 0x00007ffff6c0d813 in os::create_thread (thread=0x7ffff02e7810, thr_type=os::java_thread, req_stack_size=0) at /home/ubuntu/daixiao/jdk/src/hotspot/os/linux/os_linux.cpp:867
#2 0x00007ffff6ee1eb7 in JavaThread::JavaThread (this=0x7ffff02e7810, entry_point=0x7ffff6837419 <thread_entry(JavaThread*, JavaThread*)>, stack_sz=0) at /home/ubuntu/daixiao/jdk/src/hotspot/share/runtime/thread.cpp:1195
#3 0x00007ffff68375e4 in JVM_StartThread (env=0x7ffff0028c38, jthread=0x7ffff5aa5760) at /home/ubuntu/daixiao/jdk/src/hotspot/share/prims/jvm.cpp:2890
#4 0x00007fffe100f68b in ?? ()
#5 0x00007ffff5aa56e0 in ?? ()
#6 0x00007ffff5aa56f8 in ?? ()
#7 0x0000000000000000 in ?? ()
+ + + +

相关阅读

+ +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/2022/04/19/bloom-filter/index.html b/2022/04/19/bloom-filter/index.html new file mode 100644 index 0000000000..bff185c42a --- /dev/null +++ b/2022/04/19/bloom-filter/index.html @@ -0,0 +1,443 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + bloom-filter | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + + + + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/2022/05/13/clickhosue-insert-insert-deduplicate/index.html b/2022/05/13/clickhosue-insert-insert-deduplicate/index.html new file mode 100644 index 0000000000..1242510dfb --- /dev/null +++ b/2022/05/13/clickhosue-insert-insert-deduplicate/index.html @@ -0,0 +1,477 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + clickhosue insert insert_deduplicate | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ clickhosue insert insert_deduplicate +

+ + +
+ + + + +
+ + +

问题复现

    +
  • 建表语句如下
  • +
+
1
2
3
4
5
6
7
8
9
10
11
CREATE TABLE test ON CLUSTER `{cluster}`
(
`timestamp` DateTime,
`contractid` UInt32,
`userid` UInt32
)
ENGINE = ReplicatedMergeTree('/clickhouse/tables/{cluster}/{shard}/default/test', '{replica}')
PARTITION BY toYYYYMM(timestamp)
ORDER BY (contractid, toDate(timestamp), userid)
SAMPLE BY userid

+
    +
  • 第一次insert
  • +
+
1
2
insert into test ( userid ,contractid ,  timestamp ) values (1,1,'2022-02-02');

+
    +
  • 返回结果是一行记录:
    1
    2
    3
    4
    5
    6
    SELECT *  FROM test;

    ┌───────────timestamp─┬─contractid─┬─userid─┐
    │ 2022-02-02 00:00:00 │ 1 │ 1 │
    └─────────────────────┴────────────┴────────┘

  • +
+

第二次insert

+
1
2
insert into test ( userid ,contractid ,  timestamp ) values (1,1,'2022-02-02');

+

返回还是一行:

+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
:) insert into test ( userid ,contractid ,  timestamp ) values (1,1,'2022-02-02');

INSERT INTO test (userid, contractid, timestamp) FORMAT Values

Query id: 706e2447-95eb-4515-a7b7-cf363512b673

Ok.

1 row in set. Elapsed: 0.056 sec.

dai-MS-7B89 :) select * from test

SELECT *
FROM test

Query id: 3ba7cd7f-4621-4286-8646-79737ec3e763

┌───────────timestamp─┬─contractid─┬─userid─┐
│ 2022-02-02 00:00:00 │ 1 │ 1 │
└─────────────────────┴────────────┴────────┘

1 row in set. Elapsed: 0.030 sec.

+ + +

两次插入一样的数据的话, clickhouse会做对应的去重操作,这样两次插入只会插入一条数据

+

如何解决

clickhouse提供了参数控制是否去重的参数insert-deduplicate

+
1
set insert_deduplicate=0;
+ +

然后重新insert同一行记录,就不会因为去重导致重复插入数据被丢弃了。

+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
insert into test ( userid ,contractid ,  timestamp ) values (1,1,'2022-02-02');

INSERT INTO test (userid, contractid, timestamp) FORMAT Values

Query id: a8df989b-0b63-4b45-a1b8-22c13b18bf0a

Ok.

1 row in set. Elapsed: 0.070 sec.

dai-MS-7B89 :) select * from test

SELECT *
FROM test

Query id: e077b55e-bfd9-4678-ae46-9fc05714b3f7

┌───────────timestamp─┬─contractid─┬─userid─┐
│ 2022-02-02 00:00:00 │ 1 │ 1 │
└─────────────────────┴────────────┴────────┘
┌───────────timestamp─┬─contractid─┬─userid─┐
│ 2022-02-02 00:00:00 │ 1 │ 1 │
└─────────────────────┴────────────┴────────┘
+ + +

日志和源码分析

日志分析

1
2
3
4
5
2022.05.15 23:32:04.515912 [ 68323 ] {64b40d4f-0d00-4747-9af3-4afb56b6a84b} <Trace> MergedBlockOutputStream: filled checksums 202202_2_2_0 (state Temporary)
2022.05.15 23:32:04.517872 [ 68323 ] {64b40d4f-0d00-4747-9af3-4afb56b6a84b} <Debug> default.test (7d656761-7cd0-4866-a43e-f0e4cea97654) (Replicated OutputStream): Wrote block with ID '202202_8166901380224458449_12408515745921908624', 1 rows
2022.05.15 23:32:04.533981 [ 68323 ] {64b40d4f-0d00-4747-9af3-4afb56b6a84b} <Information> default.test (7d656761-7cd0-4866-a43e-f0e4cea97654) (Replicated OutputStream): Block with ID 202202_8166901380224458449_12408515745921908624 already exists locally as part 202202_0_0_0; ignoring it.


+ +

用lldb调试clickhouse

+
1
lldb ./clickhouse-server
+ +
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
void ReplicatedMergeTreeSink::consume(Chunk chunk)
{
auto block = getHeader().cloneWithColumns(chunk.detachColumns());

String block_id;

if (deduplicate) // 从上面生成
{
String block_dedup_token; // 生成token

/// We add the hash from the data and partition identifier to deduplication ID.
/// That is, do not insert the same data to the same partition twice.

const String & dedup_token = settings.insert_deduplication_token;
if (!dedup_token.empty())
{
/// multiple blocks can be inserted within the same insert query
/// an ordinal number is added to dedup token to generate a distinctive block id for each block
block_dedup_token = fmt::format("{}_{}", dedup_token, chunk_dedup_seqnum);
++chunk_dedup_seqnum;
}

block_id = temp_part.part->getZeroLevelPartBlockID(block_dedup_token);
LOG_DEBUG(log, "Wrote block with ID '{}', {} rows", block_id, current_block.block.rows());
}
else
{
LOG_DEBUG(log, "Wrote block with {} rows", current_block.block.rows());
}
+ +

相关阅读

+ +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git "a/2022/05/15/clickhouse-\345\273\272\350\241\250/index.html" "b/2022/05/15/clickhouse-\345\273\272\350\241\250/index.html" new file mode 100644 index 0000000000..f5656fac2b --- /dev/null +++ "b/2022/05/15/clickhouse-\345\273\272\350\241\250/index.html" @@ -0,0 +1,459 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + clickhouse 建表 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ clickhouse 建表 +

+ + +
+ + + + +
+ + +

背景

背景是目前在使用clickhouse,想自己搭建一个实例,并且建表成功

+

zookeeper

    +
  • 步骤1:
    下载zookeeper

    +
  • +
  • 步骤2:启动zookeeper:

    +
    1
    2
    3
    4
    #### 切换到bin目录
    cd apache-zookeeper-3.8.0-bin/bin/
    ## 启动zk
    ./zkServer.sh
  • +
  • 步骤3:创建zk节点 : path为/path/to/zookeeper/node

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    ### 启动zkCli
    cd apache-zookeeper-3.8.0-bin/bin/
    ### 启动节点
    ./zkCli.sh
    ### 创建zk节点 需要一级一级建
    ### 创建节点命令: create path
    [zk: localhost:2181(CONNECTED) 11] create /path
    Created /path
    [zk: localhost:2181(CONNECTED) 12] create /path/to
    Created /path/to
    [zk: localhost:2181(CONNECTED) 13] create /path/to/zookeeper
    Created /path/to/zookeeper
    [zk: localhost:2181(CONNECTED) 14] create /path/to/zookeeper/node
    Created /path/to/zookeeper/node

  • +
+

建表

建表之前先要把cluseter配置好
我的配置:

+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
<!-- zk 配置 -->
<zookeeper>
<node>
<host>localhost</host>
<port>2181</port>
</node>
<session_timeout_ms>30000</session_timeout_ms>
<operation_timeout_ms>10000</operation_timeout_ms>
<!-- Optional. Chroot suffix. Should exist. -->
<root>/path/to/zookeeper/node</root>

</zookeeper>
<!-- 宏变量, 建表的时候指定的path中的变量从宏里面读取 -->
<macros>
<cluster>testcluster</cluster>
<shard>01</shard>
<replica>example01-01-1</replica>
</macros>
<remote_servers>
<!-- cluster 名称叫做 testcluster , 名字随便取的-->
<testcluster>
<shard>
<replica>
<host>localhost</host>
<port>9000</port>
</replica>
</shard>
</testcluster>
</remote_servers>
<distributed_ddl>

+

建表语句:

+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
### 这里的{cluster} 就是上面配置的testcluster
CREATE TABLE test ON CLUSTER `{cluster}`
(
`timestamp` DateTime,
`contractid` UInt32,
`userid` UInt32
)
ENGINE = ReplicatedMergeTree('/clickhouse/tables/{cluster}/{shard}/default/test', '{replica}')
PARTITION BY toYYYYMM(timestamp)
ORDER BY (contractid, toDate(timestamp), userid)
SAMPLE BY userid

Query id: 56c07fac-9a0b-4b0b-bf8f-fb808ce452e6

+ +

查询zk配置

1
SELECT  path  FROM system.zookeeper
+ +

遇到错误

遇到错误:There is no DistributedDDL configuration in server config

+

原因是: clickhosue的配置没有配对,需要参考上面给的链接添加配置

+
1
2
3
4
5
6
7
<distributed_ddl>
<!-- Path in ZooKeeper to queue with DDL queries -->
<path>/clickhouse/task_queue/ddl</path>
<cleanup_delay_period>60</cleanup_delay_period>
<task_max_lifetime>86400</task_max_lifetime>
<max_tasks_in_queue>1000</max_tasks_in_queue>
</distributed_ddl>
+ +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git "a/2022/05/20/\344\275\277\347\224\250gtest/index.html" "b/2022/05/20/\344\275\277\347\224\250gtest/index.html" new file mode 100644 index 0000000000..7ccd0c5f15 --- /dev/null +++ "b/2022/05/20/\344\275\277\347\224\250gtest/index.html" @@ -0,0 +1,444 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 使用gtest | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ 使用gtest +

+ + +
+ + + + +
+ + +

背景

单元测试是一个很基础的功能,会使用单元测试是一个维护良好的程序的基础。
所以通过单元测试维护一个程序也是必然。

+

gtest quick start

相关阅读

+ +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git "a/2022/05/22/clickhouse-\350\257\267\346\261\202\347\232\204\347\224\237\345\221\275\345\221\250\346\234\237/index.html" "b/2022/05/22/clickhouse-\350\257\267\346\261\202\347\232\204\347\224\237\345\221\275\345\221\250\346\234\237/index.html" new file mode 100644 index 0000000000..ce194d7502 --- /dev/null +++ "b/2022/05/22/clickhouse-\350\257\267\346\261\202\347\232\204\347\224\237\345\221\275\345\221\250\346\234\237/index.html" @@ -0,0 +1,446 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + clickhouse 请求的生命周期 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ clickhouse 请求的生命周期 +

+ + +
+ + + + +
+ + +

背景

当前业务上是用curl 请求clickhouse,然后写入clickhouse。所以很好奇clickhouse的整个生命周期

+

请求

解析http请求

报文请求

1
echo -ne '1,Hello\n2,World\n' | curl -sSF 'file=@-' "http://localhost:8123/&qu?query=SELECT+*+FROM+file&file_format=CSV&file_types=UInt8,String";
+ +

请求到clickhouse

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
(lldb) p in
(DB::PeekableReadBuffer) $3 = {
DB::BufferWithOwnMemory<DB::ReadBuffer> = {
DB::ReadBuffer = {
DB::BufferBase = {
pos = 0x0000398b7c4ea0fe "--------------------------0c8b1c0a5e3c9b36\r\nContent-Disposition: form-data; name=\"file\"; filename=\"-\"\r\n\r\n1,Hello\n2,World\n\r\n--------------------------0c8b1c0a5e3c9b36--\r\n"
bytes = 0
working_buffer = (begin_pos = "--------------------------0c8b1c0a5e3c9b36\r\nContent-Disposition: form-data; name=\"file\"; filename=\"-\"\r\n\r\n1,Hello\n2,World\n\r\n--------------------------0c8b1c0a5e3c9b36--\r\n", end_pos = "")
internal_buffer = (begin_pos = "--------------------------0c8b1c0a5e3c9b36\r\nContent-Disposition: form-data; name=\"file\"; filename=\"-\"\r\n\r\n1,Hello\n2,World\n\r\n--------------------------0c8b1c0a5e3c9b36--\r\n", end_pos = "")
padded = false
}
nextimpl_working_buffer_offset = 0
}
memory = (m_capacity = 0, m_size = 0, m_data = 0x0000000000000000, alignment = 0)
}
sub_buf = 0x00007fff2628c080
peeked_size = 0
checkpoint = Has Value=false {}
checkpoint_in_own_memory = false
stack_memory = "'\xf9\f\0\0\0\0(\0\0\0\0\0\0\0 "
use_stack_memory = true
}
(lldb) bt
* thread #4, name = 'HTTPHandler', stop reason = step over
* frame #0: 0x000000001c5a98dc clickhouse-server`DB::HTMLForm::MultipartReadBuffer::readLine(this=0x00007fff2c9eede8, append_crlf=true) at HTMLForm.cpp:271:9
frame #1: 0x000000001c5a95df clickhouse-server`DB::HTMLForm::MultipartReadBuffer::skipToNextBoundary(this=0x00007fff2c9eede8) at HTMLForm.cpp:253:21
frame #2: 0x000000001c5a8ad4 clickhouse-server`DB::HTMLForm::readMultipart(this=0x00007fff2c9f0e10, in_=0x00007fff2628c080, handler=0x00007fff2c9ef1f0) at HTMLForm.cpp:186:13
frame #3: 0x000000001c5a7e39 clickhouse-server`DB::HTMLForm::load(this=0x00007fff2c9f0e10, request=0x00007fff2c9f1438, requestBody=0x00007fff2628c080, handler=0x00007fff2c9ef1f0) at HTMLForm.cpp:99:13
frame #4: 0x000000001d3ba404 clickhouse-server`DB::DynamicQueryHandler::getQuery(this=0x00007fff262b4000, request=0x00007fff2c9f1438, params=0x00007fff2c9f0e10, context=std::__1::shared_ptr<DB::Context>::element_type @ 0x00007fff2628f800 strong=2 weak=6) at HTTPHandler.cpp:1032:12
frame #5: 0x000000001d3b5ed4 clickhouse-server`DB::HTTPHandler::processQuery(this=0x00007fff262b4000, request=0x00007fff2c9f1438, params=0x00007fff2c9f0e10, response=0x00007fff2c9f14f0, used_output=0x00007fff2c9f0ec8, query_scope= Has Value=true ) at HTTPHandler.cpp:764:26
frame #6: 0x000000001d3b90de clickhouse-server`DB::HTTPHandler::handleRequest(this=0x00007fff262b4000, request=0x00007fff2c9f1438, response=0x00007fff2c9f14f0) at HTTPHandler.cpp:960:9
frame #7: 0x000000001d3f09f7 clickhouse-server`DB::HTTPServerConnection::run(this=0x00007fff2628c000) at HTTPServerConnection.cpp:65:34
frame #8: 0x000000002308f119 clickhouse-server`Poco::Net::TCPServerConnection::start(this=0x00007fff2628c000) at TCPServerConnection.cpp:43:3
frame #9: 0x000000002308f926 clickhouse-server`Poco::Net::TCPServerDispatcher::run(this=0x00007fff29fa8800) at TCPServerDispatcher.cpp:115:20
frame #10: 0x00000000232cfc34 clickhouse-server`Poco::PooledThread::run(this=0x00007ffff702e200) at ThreadPool.cpp:199:14
frame #11: 0x00000000232cc75a clickhouse-server`Poco::(anonymous namespace)::RunnableHolder::run(this=0x00007ffff7001350) at Thread.cpp:55:11
frame #12: 0x00000000232cb53e clickhouse-server`Poco::ThreadImpl::runnableEntry(pThread=0x00007ffff702e238) at Thread_POSIX.cpp:345:27
frame #13: 0x00007ffff7dfeb43 libc.so.6`start_thread(arg=<unavailable>) at pthread_create.c:442:8
frame #14: 0x00007ffff7e90a00 libc.so.6`__clone3 at clone3.S:81
+

生成语法树:

+
1
std::tie(ast, streams) = executeQueryImpl(begin, end, context, false, QueryProcessingStage::Complete, &istr);
+ +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/2022/06/01/clickhouse-function/index.html b/2022/06/01/clickhouse-function/index.html new file mode 100644 index 0000000000..8c629452a1 --- /dev/null +++ b/2022/06/01/clickhouse-function/index.html @@ -0,0 +1,459 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + clickhouse function | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ clickhouse function +

+ + +
+ + + + +
+ + +

背景

想要熟悉clickhouse的内容。

+

实现

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
## /ClickHouse/src/Functions/isNaN.cpp
## 添加一个struct
struct testtrue
{
static constexpr auto name = "testtrue"; ##函数名
template <typename T>
static bool execute(const T t) ### 执行回调
{
/// Suppression for PVS-Studio.
return true || t;
}
};
### 起别名
using FunctionTestTure = FunctionNumericPredicate<testtrue>;


void registerFunctionIsNaN(FunctionFactory & factory)
{
factory.registerFunction<FunctionIsNaN>();
factory.registerFunction<FunctionTestTure>(); ### 回调注册这个函数
}
+ +

重新编译并调用:

+
1
2
3
4
5
6
7
8
SELECT testtrue(2)

Query id: 87e4625d-8b79-4c3a-8153-b333d6a0614f

┌─testtrue(2)─┐
│ 1 │
└─────────────┘

+ +

注册路径

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
(lldb) bt
* thread #1, name = 'clickhouse-serv', stop reason = breakpoint 1.1
* frame #0: 0x00000000148ee2ac clickhouse-server`DB::registerFunctionIsNaN(factory=0x000000002f984a50) at isNaN.cpp:43:5
frame #1: 0x0000000010339e2c clickhouse-server`DB::registerFunctionsMiscellaneous(factory=0x000000002f984a50) at registerFunctionsMiscellaneous.cpp:128:5
frame #2: 0x00000000103391a0 clickhouse-server`DB::registerFunctions() at registerFunctions.cpp:96:5
frame #3: 0x000000000d063d35 clickhouse-server`DB::Server::main(this=0x00007fffffffd9b8, (null)=size=0) at Server.cpp:623:5
frame #4: 0x00000000230abec5 clickhouse-server`Poco::Util::Application::run(this=0x00007fffffffd9b8) at Application.cpp:334:8
frame #5: 0x000000000d06250b clickhouse-server`DB::Server::run(this=0x00007fffffffd9b8) at Server.cpp:461:25
frame #6: 0x00000000230c6c70 clickhouse-server`Poco::Util::ServerApplication::run(this=0x00007fffffffd9b8, argc=1, argv=0x00007ffff70f7038) at ServerApplication.cpp:611:9
frame #7: 0x000000000d05f8e1 clickhouse-server`mainEntryClickHouseServer(argc=1, argv=0x00007ffff70f7038) at Server.cpp:187:20
frame #8: 0x000000000cf7fe63 clickhouse-server`main(argc_=1, argv_=0x00007fffffffdfb8) at main.cpp:409:12
frame #9: 0x00007ffff7d92d90 libc.so.6`__libc_start_call_main(main=(clickhouse-server`main at main.cpp:380), argc=1, argv=0x00007fffffffdfb8) at libc_start_call_main.h:58:16
frame #10: 0x00007ffff7d92e40 libc.so.6`__libc_start_main_impl(main=(clickhouse-server`main at main.cpp:380), argc=1, argv=0x00007fffffffdfb8, init=0x00007ffff7ffd040, fini=<unavailable>, rtld_fini=<unavailable>, stack_end=0x00007fffffffdfa8) at libc-start.c:392:3
frame #11: 0x000000000cf7fb55 clickhouse-server`_start + 37

+

函数调用路径

lldb 调试

+
1
2
(lldb) b DB::(anonymous namespace)::testtrue::execute

+ + +

调用堆栈:

+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
(lldb) bt
* thread #3, name = 'TCPHandler', stop reason = breakpoint 3.1
* frame #0: 0x00000000148f3dca clickhouse-server`bool DB::(anonymous namespace)::testtrue::execute<char8_t>(t=0x02 u8'\U00000002') at isNaN.cpp:28:9
frame #1: 0x00000000148f29dd clickhouse-server`COW<DB::IColumn>::immutable_ptr<DB::IColumn> DB::FunctionNumericPredicate<DB::(anonymous namespace)::testtrue>::execute<char8_t>(this=0x00007fff1e44db58, in_untyped=0x00007fff1e447820) const at FunctionNumericPredicate.h:89:31
frame #2: 0x00000000148f1e8a clickhouse-server`DB::FunctionNumericPredicate<DB::(anonymous namespace)::testtrue>::executeImpl(this=0x00007fff1e44db58, arguments=size=1, (null)=std::__1::shared_ptr<const DB::IDataType>::element_type @ 0x00007fff1e4899c8 strong=2 weak=2, (null)=1) const at FunctionNumericPredicate.h:61:22
frame #3: 0x00000000103a611c clickhouse-server`DB::IFunction::executeImplDryRun(this=0x00007fff1e44db58, arguments=size=1, result_type=std::__1::shared_ptr<const DB::IDataType>::element_type @ 0x00007fff1e4899c8 strong=2 weak=2, input_rows_count=1) const at IFunction.h:395:16
frame #4: 0x00000000103a484d clickhouse-server`DB::FunctionToExecutableFunctionAdaptor::executeDryRunImpl(this=0x00007fff1e44dba0, arguments=size=1, result_type=std::__1::shared_ptr<const DB::IDataType>::element_type @ 0x00007fff1e4899c8 strong=2 weak=2, input_rows_count=1) const at IFunctionAdaptors.h:26:26
frame #5: 0x000000001a8fa3d9 clickhouse-server`DB::IExecutableFunction::executeWithoutLowCardinalityColumns(this=0x00007fff1e44dba0, args=size=1, result_type=std::__1::shared_ptr<const DB::IDataType>::element_type @ 0x00007fff1e4899c8 strong=2 weak=2, input_rows_count=1, dry_run=true) const at IFunction.cpp:217:15
frame #6: 0x000000001a8fa00a clickhouse-server`DB::IExecutableFunction::defaultImplementationForConstantArguments(this=0x00007fff1e44dba0, args=size=1, result_type=std::__1::shared_ptr<const DB::IDataType>::element_type @ 0x00007fff1e4899c8 strong=2 weak=2, input_rows_count=1, dry_run=true) const at IFunction.cpp:160:31
frame #7: 0x000000001a8fa2c4 clickhouse-server`DB::IExecutableFunction::executeWithoutLowCardinalityColumns(this=0x00007fff1e44dba0, args=size=1, result_type=std::__1::shared_ptr<const DB::IDataType>::element_type @ 0x00007fff1e4899c8 strong=2 weak=2, input_rows_count=1, dry_run=true) const at IFunction.cpp:209:20
frame #8: 0x000000001a8faf55 clickhouse-server`DB::IExecutableFunction::executeWithoutSparseColumns(this=0x00007fff1e44dba0, arguments=size=1, result_type=std::__1::shared_ptr<const DB::IDataType>::element_type @ 0x00007fff1e4899c8 strong=2 weak=2, input_rows_count=1, dry_run=true) const at IFunction.cpp:267:22
frame #9: 0x000000001a8fbe43 clickhouse-server`DB::IExecutableFunction::execute(this=0x00007fff1e44dba0, arguments=size=1, result_type=std::__1::shared_ptr<const DB::IDataType>::element_type @ 0x00007fff1e4899c8 strong=2 weak=2, input_rows_count=1, dry_run=true) const at IFunction.cpp:337:16
frame #10: 0x000000001b02a7b8 clickhouse-server`DB::ActionsDAG::addFunction(this=0x00007fff1e4ac248, function=std::__1::shared_ptr<DB::IFunctionOverloadResolver>::element_type @ 0x00007fff1e44dde0 strong=2 weak=1, children=size=0, result_name="testtrue(2)") at ActionsDAG.cpp:199:37
frame #11: 0x000000001cdad1a1 clickhouse-server`DB::ScopeStack::addFunction(this=0x00007fff2d1e96f0, function=std::__1::shared_ptr<DB::IFunctionOverloadResolver>::element_type @ 0x00007fff1e44dde0 strong=2 weak=1, argument_names=size=1, result_name="") at ActionsVisitor.cpp:598:51
frame #12: 0x000000001cdb7485 clickhouse-server`DB::ActionsMatcher::Data::addFunction(this=0x00007fff2d1e9698, function=std::__1::shared_ptr<DB::IFunctionOverloadResolver>::element_type @ 0x00007fff1e44dde0 strong=2 weak=1, argument_names=size=1, result_name=<unavailable>) at ActionsVisitor.h:140:27
frame #13: 0x000000001cdb0a2b clickhouse-server`DB::ActionsMatcher::visit(node=0x00007fff2a45c9b8, ast=std::__1::shared_ptr<DB::IAST>::element_type @ 0x00007fff2a45c9b8 strong=1 weak=2, data=0x00007fff2d1e9698) at ActionsVisitor.cpp:1093:14
frame #14: 0x000000001cdad64d clickhouse-server`DB::ActionsMatcher::visit(ast=std::__1::shared_ptr<DB::IAST>::element_type @ 0x00007fff2a45c9b8 strong=1 weak=2, data=0x00007fff2d1e9698) at ActionsVisitor.cpp:655:9
frame #15: 0x000000001cdb125c clickhouse-server`DB::ActionsMatcher::visit(expression_list=0x00007fff1e4895b8, (null)=std::__1::shared_ptr<DB::IAST>::element_type @ 0x00007fff1e4895b8 strong=2 weak=2, data=0x00007fff2d1e9698) at ActionsVisitor.cpp:763:17
frame #16: 0x000000001cdad6b9 clickhouse-server`DB::ActionsMatcher::visit(ast=std::__1::shared_ptr<DB::IAST>::element_type @ 0x00007fff1e4895b8 strong=2 weak=2, data=0x00007fff2d1e9698) at ActionsVisitor.cpp:659:9
frame #17: 0x000000001b1ebce5 clickhouse-server`DB::InDepthNodeVisitor<DB::ActionsMatcher, true, false, std::__1::shared_ptr<DB::IAST> const>::visit(this=0x00007fff2d1e9638, ast=std::__1::shared_ptr<DB::IAST>::element_type @ 0x00007fff1e4895b8 strong=2 weak=2) at InDepthNodeVisitor.h:34:13
frame #18: 0x000000001b1dc0ea clickhouse-server`DB::ExpressionAnalyzer::getRootActions(this=0x00007fff1e47f780, ast=std::__1::shared_ptr<DB::IAST>::element_type @ 0x00007fff1e4895b8 strong=2 weak=2, no_makeset_for_subqueries=false, actions=nullptr, only_consts=false) at ExpressionAnalyzer.cpp:587:48
frame #19: 0x000000001b1e2f9e clickhouse-server`DB::SelectQueryExpressionAnalyzer::appendSelect(this=0x00007fff1e47f780, chain=0x00007fff2d1ea5a0, only_types=false) at ExpressionAnalyzer.cpp:1383:5
frame #20: 0x000000001b1e6da8 clickhouse-server`DB::ExpressionAnalysisResult::ExpressionAnalysisResult(this=0x00007fff2d1eaa88, query_analyzer=0x00007fff1e47f780, metadata_snapshot=std::__1::shared_ptr<const DB::StorageInMemoryMetadata>::element_type @ 0x00007ffff705a800 strong=4 weak=1, first_stage_=true, second_stage_=true, only_types=false, filter_info_=nullptr, source_header=0x00007fff1e4c9550) at ExpressionAnalyzer.cpp:1830:24
frame #21: 0x000000001b57ab9d clickhouse-server`DB::InterpreterSelectQuery::getSampleBlockImpl(this=0x00007fff1e4c9000) at InterpreterSelectQuery.cpp:692:23
frame #22: 0x000000001b5747f9 clickhouse-server`DB::InterpreterSelectQuery::InterpreterSelectQuery(this=0x00007fff2d1ec148, try_move_to_prewhere=true)::$_1::operator()(bool) const at InterpreterSelectQuery.cpp:552:25
frame #23: 0x000000001b5709f6 clickhouse-server`DB::InterpreterSelectQuery::InterpreterSelectQuery(this=0x00007fff1e4c9000, query_ptr_=std::__1::shared_ptr<DB::IAST>::element_type @ 0x00007fff1e48e198 strong=1 weak=2, context_=std::__1::shared_ptr<const DB::Context>::element_type @ 0x00007fff1e495000 strong=3 weak=2, input_pipe_= Has Value=false , storage_=nullptr, options_=0x00007fff1e461770, required_result_column_names=size=0, metadata_snapshot_=nullptr, subquery_for_sets_=size=0, prepared_sets_=size=0) at InterpreterSelectQuery.cpp:555:5
frame #24: 0x000000001b56edf3 clickhouse-server`DB::InterpreterSelectQuery::InterpreterSelectQuery(this=0x00007fff1e4c9000, query_ptr_=std::__1::shared_ptr<DB::IAST>::element_type @ 0x00007fff1e48e198 strong=1 weak=2, context_=std::__1::shared_ptr<const DB::Context>::element_type @ 0x00007fff1e495000 strong=3 weak=2, options_=0x00007fff1e461770, required_result_column_names_=size=0) at InterpreterSelectQuery.cpp:165:7
frame #25: 0x000000001b5f4ce5 clickhouse-server`std::__1::__unique_if<DB::InterpreterSelectQuery>::__unique_single std::__1::make_unique<DB::InterpreterSelectQuery, std::__1::shared_ptr<DB::IAST> const&, std::__1::shared_ptr<DB::Context>&, DB::SelectQueryOptions&, std::__1::vector<std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> >, std::__1::allocator<std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> > > > const&>(__args=std::__1::shared_ptr<DB::IAST>::element_type @ 0x00007fff1e48e198 strong=1 weak=2, __args=std::__1::shared_ptr<DB::Context>::element_type @ 0x00007fff1e495000 strong=3 weak=2, __args=0x00007fff1e461770, __args=size=0) at unique_ptr.h:725:32
frame #26: 0x000000001b5f2d09 clickhouse-server`DB::InterpreterSelectWithUnionQuery::buildCurrentChildInterpreter(this=0x00007fff1e461700, ast_ptr_=std::__1::shared_ptr<DB::IAST>::element_type @ 0x00007fff1e48e198 strong=1 weak=2, current_required_result_column_names=size=0) at InterpreterSelectWithUnionQuery.cpp:223:16
frame #27: 0x000000001b5f23ed clickhouse-server`DB::InterpreterSelectWithUnionQuery::InterpreterSelectWithUnionQuery(this=0x00007fff1e461700, query_ptr_=std::__1::shared_ptr<DB::IAST>::element_type @ 0x00007fff1e48f218 strong=2 weak=2, context_=std::__1::shared_ptr<const DB::Context>::element_type @ 0x00007fff1e492800 strong=5 weak=9, options_=0x00007fff2d1ef588, required_result_column_names=size=0) at InterpreterSelectWithUnionQuery.cpp:140:13
frame #28: 0x000000001b53b830 clickhouse-server`std::__1::__unique_if<DB::InterpreterSelectWithUnionQuery>::__unique_single std::__1::make_unique<DB::InterpreterSelectWithUnionQuery, std::__1::shared_ptr<DB::IAST>&, std::__1::shared_ptr<DB::Context>&, DB::SelectQueryOptions const&>(__args=std::__1::shared_ptr<DB::IAST>::element_type @ 0x00007fff1e48f218 strong=2 weak=2, __args=std::__1::shared_ptr<DB::Context>::element_type @ 0x00007fff1e492800 strong=5 weak=9, __args=0x00007fff2d1ef588) at unique_ptr.h:725:32
frame #29: 0x000000001b539e5d clickhouse-server`DB::InterpreterFactory::get(query=std::__1::shared_ptr<DB::IAST>::element_type @ 0x00007fff1e48f218 strong=2 weak=2, context=std::__1::shared_ptr<DB::Context>::element_type @ 0x00007fff1e492800 strong=5 weak=9, options=0x00007fff2d1ef588) at InterpreterFactory.cpp:122:16
frame #30: 0x000000001b961577 clickhouse-server`DB::executeQueryImpl(begin="select testtrue(2);", end="", context=std::__1::shared_ptr<DB::Context>::element_type @ 0x00007fff1e492800 strong=5 weak=9, internal=false, stage=Complete, istr=0x0000000000000000) at executeQuery.cpp:658:27
frame #31: 0x000000001b95ee64 clickhouse-server`DB::executeQuery(query="select testtrue(2);", context=std::__1::shared_ptr<DB::Context>::element_type @ 0x00007fff1e492800 strong=5 weak=9, internal=false, stage=Complete) at executeQuery.cpp:1067:30
frame #32: 0x000000001c590f25 clickhouse-server`DB::TCPHandler::runImpl(this=0x00007fff1e46e000) at TCPHandler.cpp:332:24
frame #33: 0x000000001c59f9e5 clickhouse-server`DB::TCPHandler::run(this=0x00007fff1e46e000) at TCPHandler.cpp:1781:9
frame #34: 0x0000000023091f79 clickhouse-server`Poco::Net::TCPServerConnection::start(this=0x00007fff1e46e000) at TCPServerConnection.cpp:43:3
frame #35: 0x0000000023092786 clickhouse-server`Poco::Net::TCPServerDispatcher::run(this=0x00007fff26648600) at TCPServerDispatcher.cpp:115:20
frame #36: 0x00000000232d2a94 clickhouse-server`Poco::PooledThread::run(this=0x00007ffff702df80) at ThreadPool.cpp:199:14
frame #37: 0x00000000232cf5ba clickhouse-server`Poco::(anonymous namespace)::RunnableHolder::run(this=0x00007ffff7001330) at Thread.cpp:55:11
frame #38: 0x00000000232ce39e clickhouse-server`Poco::ThreadImpl::runnableEntry(pThread=0x00007ffff702dfb8) at Thread_POSIX.cpp:345:27
frame #39: 0x00007ffff7dfdb43 libc.so.6`start_thread(arg=<unavailable>) at pthread_create.c:442:8
frame #40: 0x00007ffff7e8fa00 libc.so.6`__clone3 at clone3.S:81

+ +

function的实现

所有函数都是继承IFunction

+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
## ClickHouse/src/Functions/IFunction.h

class IFunction
{
public:

virtual ~IFunction() = default;

virtual String getName() const = 0;

virtual ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type, size_t input_rows_count) const = 0;
virtual ColumnPtr executeImplDryRun(const ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type, size_t input_rows_count) const
{
return executeImpl(arguments, result_type, input_rows_count);
}
...
};

using FunctionPtr = std::shared_ptr<IFunction>;
+

核心是virtual方法executeImpl

+

看看实现的模板类

+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
## ClickHouse/src/Functions/FunctionNumericPredicate.h
ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t /*input_rows_count*/) const override
{
const auto * in = arguments.front().column.get();

ColumnPtr res;
if (!((res = execute<UInt8>(in))
|| (res = execute<UInt16>(in))
|| (res = execute<UInt32>(in))
|| (res = execute<UInt64>(in))
|| (res = execute<Int8>(in))
|| (res = execute<Int16>(in))
|| (res = execute<Int32>(in))
|| (res = execute<Int64>(in))
|| (res = execute<Float32>(in))
|| (res = execute<Float64>(in))))
throw Exception{"Illegal column " + in->getName() + " of first argument of function " + getName(), ErrorCodes::ILLEGAL_COLUMN};

return res;
}

template <typename T>
ColumnPtr execute(const IColumn * in_untyped) const
{
if (const auto in = checkAndGetColumn<ColumnVector<T>>(in_untyped))
{
const auto size = in->size();

auto out = ColumnUInt8::create(size);

const auto & in_data = in->getData();
auto & out_data = out->getData();

for (const auto i : collections::range(0, size))
out_data[i] = Impl::execute(in_data[i]);

return out;
}

return nullptr;
}
+ +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git "a/2022/06/19/lucene-\347\274\226\350\257\221\345\256\211\350\243\205/index.html" "b/2022/06/19/lucene-\347\274\226\350\257\221\345\256\211\350\243\205/index.html" new file mode 100644 index 0000000000..3a7d5e8024 --- /dev/null +++ "b/2022/06/19/lucene-\347\274\226\350\257\221\345\256\211\350\243\205/index.html" @@ -0,0 +1,493 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + lucene 编译安装 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ lucene 编译安装 +

+ + +
+ + + + +
+ + +

背景

需要编译和了解lucene代码

+

编译

因为lucene锁死了版本,所以要切换成jdk17,我本地是jdk18

+

clone代码

1
2
3
4
5
6
7
8
9
10
11
12
13
## clone 代码
git clone https://github.com/apache/lucene.git

### 切换目录
cd lucene

### 编译
./gradlew

## 如果是翻墙,可以使用代理,这样会快一点
## 指定域名和端口
./gradlew -DsocksProxyHost=192.168.1.102 -DsocksProxyPort=1081

+ +

启动和测试

1
2
3
4
5
### 打包demo
./gradlew lucene:demo:jar

### 执行demo
java -cp /home/ubuntu/lucene-9.1.0/lucene/demo/build/classes/java/main:/home/ubuntu/lucene-9.1.0/lucene/core/build/classes/java/main/ org.apache.lucene.demo.IndexFiles -
+ + + + +

操作系统是ubuntu切换jdk17命令如下:

+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
### 安装jdk17
sudo apt install openjdk-17-jdk
# Configure Java 切换java
sudo update-alternatives --config java

# Configure Java Compiler 切换javac
sudo update-alternatives --config javac


### 查看切换之后的命令,java 已经是17了
java --version
openjdk 17.0.3 2022-04-19
OpenJDK Runtime Environment (build 17.0.3+7-Ubuntu-0ubuntu0.22.04.1)
OpenJDK 64-Bit Server VM (build 17.0.3+7-Ubuntu-0ubuntu0.22.04.1, mixed mode, sharing)
+ +

遇到的错误

gradle-wrapper.jar 下载不下来,跳过证书:

+
1
wget --no-check-certificate  https://raw.githubusercontent.com/gradle/gradle/v7.3.3/gradle/wrapper/gradle-wrapper.jar
+

然后放到{$luceneGitDir}/gradle/wrapper/ 下面 , 这里luceneGitDir 是你的git clone 下来的lucuene 目录

+

相关代码

1
2
IndexWriterConfig iwc = new IndexWriterConfig(analyzer);
iwc.setUseCompoundFile(false); // 生成多个文件
+ + +

写入header

对应的jdb调试

+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
main[1] stop in  org.apache.lucene.codecs.lucene90.compressing.Lucene90CompressingStoredFieldsWriter:136
Deferring breakpoint org.apache.lucene.codecs.lucene90.compressing.Lucene90CompressingStoredFieldsWriter:136.
It will be set after the class is loaded.
main[1] cont
> Set deferred breakpoint org.apache.lucene.codecs.lucene90.compressing.Lucene90CompressingStoredFieldsWriter:136

Breakpoint hit: "thread=main", org.apache.lucene.codecs.lucene90.compressing.Lucene90CompressingStoredFieldsWriter.<init>(), line=136 bci=180
136 CodecUtil.writeIndexHeader(

main[1] list
132
133 fieldsStream =
134 directory.createOutput(
135 IndexFileNames.segmentFileName(segment, segmentSuffix, FIELDS_EXTENSION), context);
136 => CodecUtil.writeIndexHeader(
137 fieldsStream, formatName, VERSION_CURRENT, si.getId(), segmentSuffix);
138 assert CodecUtil.indexHeaderLength(formatName, segmentSuffix)
139 == fieldsStream.getFilePointer();
140
141 indexWriter =
main[1] print formatName
formatName = "Lucene90StoredFieldsFastData"
+

对应堆栈

+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
[1] org.apache.lucene.store.OutputStreamIndexOutput.writeByte (OutputStreamIndexOutput.java:54)
[2] org.apache.lucene.codecs.CodecUtil.writeBEInt (CodecUtil.java:653)
[3] org.apache.lucene.codecs.CodecUtil.writeHeader (CodecUtil.java:82)
[4] org.apache.lucene.codecs.CodecUtil.writeIndexHeader (CodecUtil.java:125)
[5] org.apache.lucene.codecs.lucene90.compressing.Lucene90CompressingStoredFieldsWriter.<init> (Lucene90CompressingStoredFieldsWriter.java:128)
[6] org.apache.lucene.codecs.lucene90.compressing.Lucene90CompressingStoredFieldsFormat.fieldsWriter (Lucene90CompressingStoredFieldsFormat.java:140)
[7] org.apache.lucene.codecs.lucene90.Lucene90StoredFieldsFormat.fieldsWriter (Lucene90StoredFieldsFormat.java:154)
[8] org.apache.lucene.index.StoredFieldsConsumer.initStoredFieldsWriter (StoredFieldsConsumer.java:49)
[9] org.apache.lucene.index.StoredFieldsConsumer.startDocument (StoredFieldsConsumer.java:56)
[10] org.apache.lucene.index.IndexingChain.startStoredFields (IndexingChain.java:556)
[11] org.apache.lucene.index.IndexingChain.processDocument (IndexingChain.java:587)
[12] org.apache.lucene.index.DocumentsWriterPerThread.updateDocuments (DocumentsWriterPerThread.java:241)
[13] org.apache.lucene.index.DocumentsWriter.updateDocuments (DocumentsWriter.java:432)
[14] org.apache.lucene.index.IndexWriter.updateDocuments (IndexWriter.java:1,531)
[15] org.apache.lucene.index.IndexWriter.updateDocument (IndexWriter.java:1,816)
[16] org.apache.lucene.index.IndexWriter.addDocument (IndexWriter.java:1,469)
[17] org.apache.lucene.demo.IndexFiles.indexDoc (IndexFiles.java:271)
[18] org.apache.lucene.demo.IndexFiles$1.visitFile (IndexFiles.java:212)
[19] org.apache.lucene.demo.IndexFiles$1.visitFile (IndexFiles.java:208)
[20] java.nio.file.Files.walkFileTree (Files.java:2,725)
[21] java.nio.file.Files.walkFileTree (Files.java:2,797)
[22] org.apache.lucene.demo.IndexFiles.indexDocs (IndexFiles.java:206)
[23] org.apache.lucene.demo.IndexFiles.main (IndexFiles.java:157)

+ +

倒排索引

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
main[1] where
[1] org.apache.lucene.index.TermsHashPerField.initStreamSlices (TermsHashPerField.java:150)
[2] org.apache.lucene.index.TermsHashPerField.add (TermsHashPerField.java:198)
[3] org.apache.lucene.index.IndexingChain$PerField.invert (IndexingChain.java:1,224)
[4] org.apache.lucene.index.IndexingChain.processField (IndexingChain.java:729)
[5] org.apache.lucene.index.IndexingChain.processDocument (IndexingChain.java:620)
[6] org.apache.lucene.index.DocumentsWriterPerThread.updateDocuments (DocumentsWriterPerThread.java:241)
[7] org.apache.lucene.index.DocumentsWriter.updateDocuments (DocumentsWriter.java:432)
[8] org.apache.lucene.index.IndexWriter.updateDocuments (IndexWriter.java:1,531)
[9] org.apache.lucene.index.IndexWriter.updateDocument (IndexWriter.java:1,816)
[10] org.apache.lucene.demo.IndexFiles.indexDoc (IndexFiles.java:277)
[11] org.apache.lucene.demo.IndexFiles$1.visitFile (IndexFiles.java:212)
[12] org.apache.lucene.demo.IndexFiles$1.visitFile (IndexFiles.java:208)
[13] java.nio.file.Files.walkFileTree (Files.java:2,725)
[14] java.nio.file.Files.walkFileTree (Files.java:2,797)
[15] org.apache.lucene.demo.IndexFiles.indexDocs (IndexFiles.java:206)
[16] org.apache.lucene.demo.IndexFiles.main (IndexFiles.java:157)

+ +

写入内容

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
main[1] where
[1] org.apache.lucene.codecs.lucene90.compressing.Lucene90CompressingStoredFieldsWriter.writeField (Lucene90CompressingStoredFieldsWriter.java:276)
[2] org.apache.lucene.index.StoredFieldsConsumer.writeField (StoredFieldsConsumer.java:65)
[3] org.apache.lucene.index.IndexingChain.processField (IndexingChain.java:749)
[4] org.apache.lucene.index.IndexingChain.processDocument (IndexingChain.java:620)
[5] org.apache.lucene.index.DocumentsWriterPerThread.updateDocuments (DocumentsWriterPerThread.java:241)
[6] org.apache.lucene.index.DocumentsWriter.updateDocuments (DocumentsWriter.java:432)
[7] org.apache.lucene.index.IndexWriter.updateDocuments (IndexWriter.java:1,531)
[8] org.apache.lucene.index.IndexWriter.updateDocument (IndexWriter.java:1,816)
[9] org.apache.lucene.index.IndexWriter.addDocument (IndexWriter.java:1,469)
[10] org.apache.lucene.demo.IndexFiles.indexDoc (IndexFiles.java:271)
[11] org.apache.lucene.demo.IndexFiles$1.visitFile (IndexFiles.java:212)
[12] org.apache.lucene.demo.IndexFiles$1.visitFile (IndexFiles.java:208)
[13] java.nio.file.Files.walkFileTree (Files.java:2,725)
[14] java.nio.file.Files.walkFileTree (Files.java:2,797)
[15] org.apache.lucene.demo.IndexFiles.indexDocs (IndexFiles.java:206)
[16] org.apache.lucene.demo.IndexFiles.main (IndexFiles.java:157)

+ +

查看fdt文件

1
2
3
4
5
6
7
8
9
10
11
12
hexdump -C _0.fdt
00000000 3f d7 6c 17 1c 4c 75 63 65 6e 65 39 30 53 74 6f |?.l..Lucene90Sto|
00000010 72 65 64 46 69 65 6c 64 73 46 61 73 74 44 61 74 |redFieldsFastDat|
00000020 61 00 00 00 01 85 88 12 2b 0c 73 6b 95 30 38 76 |a.......+.sk.08v|
00000030 c9 0a 2a 52 29 00 00 0a 00 01 00 1c 02 06 03 07 |..*R)...........|
00000040 07 07 07 07 07 07 07 07 20 00 1a 60 2f 68 6f 6d |........ ..`/hom|
00000050 65 2f 60 75 62 75 6e 74 75 60 2f 64 6f 63 2f 6d |e/`ubuntu`/doc/m|
00000060 60 6f 6e 67 6f 2e 74 60 78 74 00 1a 2f 68 60 6f |`ongo.t`xt../h`o|
00000070 6d 65 2f 75 62 60 75 6e 74 75 2f 64 60 6f 63 2f |me/ub`untu/d`oc/|
00000080 68 65 6c 60 6c 6f 2e 74 78 74 c0 28 93 e8 00 00 |hel`lo.txt.(....|
00000090 00 00 00 00 00 00 c8 75 0a 41 |.......u.A|
0000009a
+ +

fdt描述

然后分析fdt格式:
[1-4]代表第一个字节到第四个字节

+

[1-4]前四位字节是大端的magic number CODEC_MAGIC = 0x3fd76c17
[5-33] 第五个字节描述字符串长度,后面的[6-33]是具体的字符串,也就是16进制1c也就是10进制的28 , 因为字符串长度是28的字符串Lucene90StoredFieldsFastData
[34-37]字符串后面是写死的版本大端的1
[38-53] 16字节用唯一id描述这个文件

+

缓冲池

TermsHashPerField持有三个缓冲池intPool,bytePool,termBytePool

+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
TermsHashPerField(
int streamCount,
IntBlockPool intPool,
ByteBlockPool bytePool,
ByteBlockPool termBytePool,
Counter bytesUsed,
TermsHashPerField nextPerField,
String fieldName,
IndexOptions indexOptions) {
this.intPool = intPool;
this.bytePool = bytePool;
this.streamCount = streamCount;
this.fieldName = fieldName;
this.nextPerField = nextPerField;
assert indexOptions != IndexOptions.NONE;
this.indexOptions = indexOptions;
PostingsBytesStartArray byteStarts = new PostingsBytesStartArray(this, bytesUsed);
bytesHash = new BytesRefHash(termBytePool, HASH_INIT_SIZE, byteStarts);
}

+

生成term

+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
main[1] where
[1] org.apache.lucene.util.BytesRefHash.add (BytesRefHash.java:247)
[2] org.apache.lucene.index.TermsHashPerField.add (TermsHashPerField.java:193)
[3] org.apache.lucene.index.IndexingChain$PerField.invert (IndexingChain.java:1,224)
[4] org.apache.lucene.index.IndexingChain.processField (IndexingChain.java:729)
[5] org.apache.lucene.index.IndexingChain.processDocument (IndexingChain.java:620)
[6] org.apache.lucene.index.DocumentsWriterPerThread.updateDocuments (DocumentsWriterPerThread.java:241)
[7] org.apache.lucene.index.DocumentsWriter.updateDocuments (DocumentsWriter.java:432)
[8] org.apache.lucene.index.IndexWriter.updateDocuments (IndexWriter.java:1,531)
[9] org.apache.lucene.index.IndexWriter.updateDocument (IndexWriter.java:1,816)
[10] org.apache.lucene.demo.IndexFiles.indexDoc (IndexFiles.java:277)
[11] org.apache.lucene.demo.IndexFiles$1.visitFile (IndexFiles.java:212)
[12] org.apache.lucene.demo.IndexFiles$1.visitFile (IndexFiles.java:208)
[13] java.nio.file.Files.walkFileTree (Files.java:2,725)
[14] java.nio.file.Files.walkFileTree (Files.java:2,797)
[15] org.apache.lucene.demo.IndexFiles.indexDocs (IndexFiles.java:206)
[16] org.apache.lucene.demo.IndexFiles.main (IndexFiles.java:157)

+ +

arch 查询

相关阅读

+
1
2
3
4
5
6
7
8
9
10
11
12
main[1] where
[1] org.apache.lucene.codecs.lucene90.blocktree.SegmentTermsEnum.seekExact (SegmentTermsEnum.java:476)
[2] org.apache.lucene.index.TermStates.loadTermsEnum (TermStates.java:117)
[3] org.apache.lucene.index.TermStates.build (TermStates.java:102)
[4] org.apache.lucene.search.TermQuery.createWeight (TermQuery.java:227)
[5] org.apache.lucene.search.IndexSearcher.createWeight (IndexSearcher.java:885)
[6] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:686)
[7] org.apache.lucene.search.IndexSearcher.searchAfter (IndexSearcher.java:532)
[8] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:542)
[9] org.apache.lucene.demo.SearchFiles.doPagingSearch (SearchFiles.java:180)
[10] org.apache.lucene.demo.SearchFiles.main (SearchFiles.java:150)

+ + + + +

相关阅读

+ +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git "a/2022/06/26/c-\345\237\272\347\241\200/index.html" "b/2022/06/26/c-\345\237\272\347\241\200/index.html" new file mode 100644 index 0000000000..58fea9a679 --- /dev/null +++ "b/2022/06/26/c-\345\237\272\347\241\200/index.html" @@ -0,0 +1,456 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + cpp基础 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ cpp基础 +

+ + +
+ + + + +
+ + +

Storage duration

这个是描述变量的生命周期的,分为四类

+
    +
  • automatic: 生命周期在代码块内,在代码块内分配内存,在代码块内析构
  • +
  • static:生命周期是整个程序。分配内存的时机是程序开始前,析构是在程序结束之后(和static 关键词没有太大关系)
  • +
  • thread: 生命周期是线程开始和线程结束
  • +
  • dynamic:动态生命周期,一般是new、malloc一类
  • +
+

Linkage

linkage 描述的是变量可见性,分为三种:

+
    +
  • no linkage: 当前代码块可见
  • +
  • internal linkage:当前编译单元内可见
  • +
  • external linkage:其他编译单元可见
  • +
+

相关阅读

+ +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git "a/2022/06/27/lucene-10\346\272\220\347\240\201\345\210\206\346\236\220/index.html" "b/2022/06/27/lucene-10\346\272\220\347\240\201\345\210\206\346\236\220/index.html" new file mode 100644 index 0000000000..432c495e54 --- /dev/null +++ "b/2022/06/27/lucene-10\346\272\220\347\240\201\345\210\206\346\236\220/index.html" @@ -0,0 +1,536 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + lucene 10源码分析 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ lucene 10源码分析 +

+ + +
+ + + + +
+ + +

背景

我家里的电脑的lucene是10版本的

+

创建索引和保存

1
2
3
4
### 断点
java -agentlib:jdwp=transport=dt_socket,server=y,address=8000 -cp /home/dai/lucene/lucene/demo/build/libs/lucene-demo-10.0.0-SNAPSHOT.jar:/home/dai/lucene/lucene/core/build/libs/lucene-core-10.0.0-SNAPSHOT.jar org.apache.lucene.demo.IndexFiles -docs /home/dai/docs
### jdb 调试
jdb -attach 8000 -sourcepath /home/dai/lucene/lucene/demo/src/java/:/home/dai/lucene/lucene/core/src/java/
+

分词

倒排索引和分词都在这块代码

+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
main[1] where
[1] org.apache.lucene.index.IndexingChain$PerField.invert (IndexingChain.java:1,140)
[2] org.apache.lucene.index.IndexingChain.processField (IndexingChain.java:729)
[3] org.apache.lucene.index.IndexingChain.processDocument (IndexingChain.java:620)
[4] org.apache.lucene.index.DocumentsWriterPerThread.updateDocuments (DocumentsWriterPerThread.java:241)
[5] org.apache.lucene.index.DocumentsWriter.updateDocuments (DocumentsWriter.java:432)
[6] org.apache.lucene.index.IndexWriter.updateDocuments (IndexWriter.java:1,531)
[7] org.apache.lucene.index.IndexWriter.updateDocument (IndexWriter.java:1,816)
[8] org.apache.lucene.index.IndexWriter.addDocument (IndexWriter.java:1,469)
[9] org.apache.lucene.demo.IndexFiles.indexDoc (IndexFiles.java:271)
[10] org.apache.lucene.demo.IndexFiles$1.visitFile (IndexFiles.java:212)
[11] org.apache.lucene.demo.IndexFiles$1.visitFile (IndexFiles.java:208)
[12] java.nio.file.Files.walkFileTree (Files.java:2,811)
[13] java.nio.file.Files.walkFileTree (Files.java:2,882)
[14] org.apache.lucene.demo.IndexFiles.indexDocs (IndexFiles.java:206)
[15] org.apache.lucene.demo.IndexFiles.main (IndexFiles.java:157)

+ +
1
2
3
4
5
6
Step completed: "thread=main", org.apache.lucene.index.TermsHashPerField.add(), line=193 bci=22
193 int termID = bytesHash.add(termBytes);

main[1] print termBytes
termBytes = "[2f 68 6f 6d 65 2f 64 61 69 2f 64 6f 63 73 2f 62 62 62 2e 74 78 74]"

+ +

invert

倒排索引,核心是构造一个term=>doc 的映射。比较核心的类是lucene/core/src/java/org/apache/lucene/index/FreqProxTermsWriterPerField.java,这是

+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
@Override
void addTerm(final int termID, final int docID) {
final FreqProxPostingsArray postings = freqProxPostingsArray;
assert !hasFreq || postings.termFreqs[termID] > 0;

if (!hasFreq) {
assert postings.termFreqs == null;
if (termFreqAtt.getTermFrequency() != 1) {
throw new IllegalStateException(
"field \""
+ getFieldName()
+ "\": must index term freq while using custom TermFrequencyAttribute");
}
if (docID != postings.lastDocIDs[termID]) {
// New document; now encode docCode for previous doc:
assert docID > postings.lastDocIDs[termID];
writeVInt(0, postings.lastDocCodes[termID]);
postings.lastDocCodes[termID] = docID - postings.lastDocIDs[termID];
postings.lastDocIDs[termID] = docID;
fieldState.uniqueTermCount++;
}
} else if (docID != postings.lastDocIDs[termID]) {
assert docID > postings.lastDocIDs[termID]
: "id: " + docID + " postings ID: " + postings.lastDocIDs[termID] + " termID: " + termID;
// Term not yet seen in the current doc but previously
// seen in other doc(s) since the last flush

// Now that we know doc freq for previous doc,
// write it & lastDocCode
if (1 == postings.termFreqs[termID]) {
writeVInt(0, postings.lastDocCodes[termID] | 1);
} else {
writeVInt(0, postings.lastDocCodes[termID]);
writeVInt(0, postings.termFreqs[termID]);
}

// Init freq for the current document
postings.termFreqs[termID] = getTermFreq();
fieldState.maxTermFrequency =
Math.max(postings.termFreqs[termID], fieldState.maxTermFrequency);
postings.lastDocCodes[termID] = (docID - postings.lastDocIDs[termID]) << 1;
postings.lastDocIDs[termID] = docID;
if (hasProx) {
writeProx(termID, fieldState.position);
if (hasOffsets) {
postings.lastOffsets[termID] = 0;
writeOffsets(termID, fieldState.offset);
}
} else {
assert !hasOffsets;
}
fieldState.uniqueTermCount++;
} else {
postings.termFreqs[termID] = Math.addExact(postings.termFreqs[termID], getTermFreq());
fieldState.maxTermFrequency =
Math.max(fieldState.maxTermFrequency, postings.termFreqs[termID]);
if (hasProx) {
writeProx(termID, fieldState.position - postings.lastPositions[termID]);
if (hasOffsets) {
writeOffsets(termID, fieldState.offset);
}
}
}
}
+ +

生成termId

堆栈

+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
main[1] where
[1] org.apache.lucene.index.TermsHashPerField.initStreamSlices (TermsHashPerField.java:150)
[2] org.apache.lucene.index.TermsHashPerField.add (TermsHashPerField.java:198)
[3] org.apache.lucene.index.IndexingChain$PerField.invert (IndexingChain.java:1,224)
[4] org.apache.lucene.index.IndexingChain.processField (IndexingChain.java:729)
[5] org.apache.lucene.index.IndexingChain.processDocument (IndexingChain.java:620)
[6] org.apache.lucene.index.DocumentsWriterPerThread.updateDocuments (DocumentsWriterPerThread.java:241)
[7] org.apache.lucene.index.DocumentsWriter.updateDocuments (DocumentsWriter.java:432)
[8] org.apache.lucene.index.IndexWriter.updateDocuments (IndexWriter.java:1,531)
[9] org.apache.lucene.index.IndexWriter.updateDocument (IndexWriter.java:1,816)
[10] org.apache.lucene.index.IndexWriter.addDocument (IndexWriter.java:1,469)
[11] org.apache.lucene.demo.IndexFiles.indexDoc (IndexFiles.java:271)
[12] org.apache.lucene.demo.IndexFiles$1.visitFile (IndexFiles.java:212)
[13] org.apache.lucene.demo.IndexFiles$1.visitFile (IndexFiles.java:208)
[14] java.nio.file.Files.walkFileTree (Files.java:2,811)
[15] java.nio.file.Files.walkFileTree (Files.java:2,882)
[16] org.apache.lucene.demo.IndexFiles.indexDocs (IndexFiles.java:206)
[17] org.apache.lucene.demo.IndexFiles.main (IndexFiles.java:157)

+
1
2
3
IntBlockPool intPool,
ByteBlockPool bytePool,
ByteBlockPool termBytePool,
+

首先介绍intPool这个变量这个变量维护了一个二维数组int buffers[][]和三个偏移量来保存bytePool的偏移量。

+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
public final class IntBlockPool {
...

// 类初始化是10 , 后面会自动扩容,核心结构 , 这个二维数组存的是bytePool 偏移量,默认初始化容量是10
public int[][] buffers = new int[10][];

// 二维数组偏移量,也就是联合buffers使用 。一般这样用 buffers[bufferUpto+offset]
private int bufferUpto = -1;
// 二维数组中的一维数组 , 描述的是最新写入的buffers
// 举例 buffer = buffers[1];
public int[] buffer;
//intUpto 描述的是相对于一维数组的偏移
public int intUpto = INT_BLOCK_SIZE;
// 绝对偏移 ,相对于二维数组的偏移 ,有点像计算机里面的相对跳转和绝对跳转
public int intOffset = -INT_BLOCK_SIZE;
}
+ +

然后和intPool一样,bytePooltermBytePool 也是用几个变量加一个二维数组描述

+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
public final class ByteBlockPool implements Accountable {
...
// 核心结构,一个二维数组
public byte[][] buffers = new byte[10][];

/** index into the buffers array pointing to the current buffer used as the head */
private int bufferUpto = -1; // Which buffer we are upto
/** Where we are in head buffer */
public int byteUpto = BYTE_BLOCK_SIZE;

/** Current head buffer */
public byte[] buffer;
/** Current head offset */
public int byteOffset = -BYTE_BLOCK_SIZE;
+

查询搜索

断点

1
2
3
4
5
## 断点
java -agentlib:jdwp=transport=dt_socket,server=y,address=8000 -cp /home/dai/lucene/lucene/demo/build/libs/lucene-demo-10.0.0-SNAPSHOT.jar:/home/dai/lucene/lucene/core/build/libs/lucene-core-10.0.0-SNAPSHOT.jar:/home/dai/lucene/lucene/queryparser/build/libs/lucene-queryparser-10.0.0-SNAPSHOT.jar org.apache.lucene.demo.SearchFiles

## jdb 调试
jdb -attach 8000 -sourcepath /home/dai/lucene/lucene/demo/src/java/:/home/dai/lucene/lucene/core/src/java/
+ +

termState描述的是term的统计信息

+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
ain[1] print termState
termState = "TermStates
state=docFreq=1 totalTermFreq=1 termBlockOrd=2 blockFP=0 docStartFP=63 posStartFP=63 payStartFP=0 lastPosBlockOffset=-1 singletonDocID=6
"
main[1] print term
term = "contents:am"
main[1] where
[1] org.apache.lucene.search.TermQuery.createWeight (TermQuery.java:233)
[2] org.apache.lucene.search.IndexSearcher.createWeight (IndexSearcher.java:894)
[3] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:686)
[4] org.apache.lucene.search.IndexSearcher.searchAfter (IndexSearcher.java:532)
[5] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:542)
[6] org.apache.lucene.demo.SearchFiles.doPagingSearch (SearchFiles.java:180)
[7] org.apache.lucene.demo.SearchFiles.main (SearchFiles.java:150)

+ +

排序

默认排序是BM25Similarity

+
1
2
3
4
5
6
7
8
9
10
11
main[1] where
[1] org.apache.lucene.search.similarities.BM25Similarity.scorer (BM25Similarity.java:200)
[2] org.apache.lucene.search.TermQuery$TermWeight.<init> (TermQuery.java:75)
[3] org.apache.lucene.search.TermQuery.createWeight (TermQuery.java:233)
[4] org.apache.lucene.search.IndexSearcher.createWeight (IndexSearcher.java:894)
[5] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:686)
[6] org.apache.lucene.search.IndexSearcher.searchAfter (IndexSearcher.java:532)
[7] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:542)
[8] org.apache.lucene.demo.SearchFiles.doPagingSearch (SearchFiles.java:180)
[9] org.apache.lucene.demo.SearchFiles.main (SearchFiles.java:150)

+ + +

核心搜索参数

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
main[1] list
763 // there is no doc of interest in this reader context
764 // continue with the following leaf
765 continue;
766 }
767 => BulkScorer scorer = weight.bulkScorer(ctx);
768 if (scorer != null) {
769 try {
770 scorer.score(leafCollector, ctx.reader().getLiveDocs());
771 } catch (
772 @SuppressWarnings("unused")
main[1] where
[1] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:767)
[2] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:693)
[3] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:687)
[4] org.apache.lucene.search.IndexSearcher.searchAfter (IndexSearcher.java:532)
[5] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:542)
[6] org.apache.lucene.demo.SearchFiles.doPagingSearch (SearchFiles.java:180)
[7] org.apache.lucene.demo.SearchFiles.main (SearchFiles.java:150)

+

获取reader

+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
Step completed: "thread=main", org.apache.lucene.index.LeafReaderContext.reader(), line=67 bci=0
67 return reader;

main[1] print reader
reader = "_0(10.0.0):c7:[diagnostics={source=flush, os.arch=amd64, java.runtime.version=17.0.3+7-Ubuntu-0ubuntu0.22.04.1, os.version=5.15.0-33-generic, java.vendor=Private Build, os=Linux, timestamp=1656601918836, java.version=17.0.3, java.vm.version=17.0.3+7-Ubuntu-0ubuntu0.22.04.1, lucene.version=10.0.0}]:[attributes={Lucene90StoredFieldsFormat.mode=BEST_SPEED}] :id=c276i3vlaza4c6uumuxapfnvf"
main[1] where
[1] org.apache.lucene.index.LeafReaderContext.reader (LeafReaderContext.java:67)
[2] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:770)
[3] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:693)
[4] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:687)
[5] org.apache.lucene.search.IndexSearcher.searchAfter (IndexSearcher.java:532)
[6] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:542)
[7] org.apache.lucene.demo.SearchFiles.doPagingSearch (SearchFiles.java:180)
[8] org.apache.lucene.demo.SearchFiles.main (SearchFiles.java:150)

+

其中的reader 对象

+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
main[1] dump reader
reader = {
si: instance of org.apache.lucene.index.SegmentCommitInfo(id=1531)
originalSi: instance of org.apache.lucene.index.SegmentCommitInfo(id=1532)
metaData: instance of org.apache.lucene.index.LeafMetaData(id=1533)
liveDocs: null
hardLiveDocs: null
numDocs: 7
core: instance of org.apache.lucene.index.SegmentCoreReaders(id=1534)
segDocValues: instance of org.apache.lucene.index.SegmentDocValues(id=1535)
isNRT: false
docValuesProducer: null
fieldInfos: instance of org.apache.lucene.index.FieldInfos(id=1536)
readerClosedListeners: instance of java.util.concurrent.CopyOnWriteArraySet(id=1537)
readerCacheHelper: instance of org.apache.lucene.index.SegmentReader$1(id=1538)
coreCacheHelper: instance of org.apache.lucene.index.SegmentReader$2(id=1539)
$assertionsDisabled: true
org.apache.lucene.index.LeafReader.readerContext: instance of org.apache.lucene.index.LeafReaderContext(id=1540)
org.apache.lucene.index.LeafReader.$assertionsDisabled: true
org.apache.lucene.index.IndexReader.closed: false
org.apache.lucene.index.IndexReader.closedByChild: false
org.apache.lucene.index.IndexReader.refCount: instance of java.util.concurrent.atomic.AtomicInteger(id=1541)
org.apache.lucene.index.IndexReader.parentReaders: instance of java.util.Collections$SynchronizedSet(id=1542)
}

+ +

排序:

+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
main[1] list
222
223 @Override
224 public int score(LeafCollector collector, Bits acceptDocs, int min, int max)
225 throws IOException {
226 => collector.setScorer(scorer);
227 DocIdSetIterator scorerIterator = twoPhase == null ? iterator : twoPhase.approximation();
228 DocIdSetIterator competitiveIterator = collector.competitiveIterator();
229 DocIdSetIterator filteredIterator;
230 if (competitiveIterator == null) {
231 filteredIterator = scorerIterator;
main[1] where
[1] org.apache.lucene.search.Weight$DefaultBulkScorer.score (Weight.java:226)
[2] org.apache.lucene.search.BulkScorer.score (BulkScorer.java:38)
[3] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:770)
[4] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:693)
[5] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:687)
[6] org.apache.lucene.search.IndexSearcher.searchAfter (IndexSearcher.java:532)
[7] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:542)
[8] org.apache.lucene.demo.SearchFiles.doPagingSearch (SearchFiles.java:180)
[9] org.apache.lucene.demo.SearchFiles.main (SearchFiles.java:150)

+ +

排序

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
private static class SimpleTopScoreDocCollector extends TopScoreDocCollector {

...

@Override
public LeafCollector getLeafCollector(LeafReaderContext context) throws IOException {
...
return new ScorerLeafCollector() {
...
@Override
public void collect(int doc) throws IOException {
float score = scorer.score(); <---- 这里不用传docId 就能获取score ,是因为可以从父类TopScoreDocCollector 获取docId

// This collector relies on the fact that scorers produce positive values:
assert score >= 0; // NOTE: false for NaN

totalHits++;
hitsThresholdChecker.incrementHitCount();

if (minScoreAcc != null && (totalHits & minScoreAcc.modInterval) == 0) {
updateGlobalMinCompetitiveScore(scorer);
}

if (score <= pqTop.score) {
if (totalHitsRelation == TotalHits.Relation.EQUAL_TO) {
// we just reached totalHitsThreshold, we can start setting the min
// competitive score now
updateMinCompetitiveScore(scorer);
}
// Since docs are returned in-order (i.e., increasing doc Id), a document
// with equal score to pqTop.score cannot compete since HitQueue favors
// documents with lower doc Ids. Therefore reject those docs too.
return;
}
pqTop.doc = doc + docBase;
pqTop.score = score;
pqTop = pq.updateTop();
updateMinCompetitiveScore(scorer);
}
};
}
+ + +
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
main[1] print scorer
scorer = "scorer(weight(contents:am))[org.apache.lucene.search.TermScorer@290dbf45]"
main[1] dump scorer
scorer = {
postingsEnum: instance of org.apache.lucene.index.SlowImpactsEnum(id=1546)
impactsEnum: instance of org.apache.lucene.index.SlowImpactsEnum(id=1546)
iterator: instance of org.apache.lucene.search.ImpactsDISI(id=1547)
docScorer: instance of org.apache.lucene.search.LeafSimScorer(id=1548)
impactsDisi: instance of org.apache.lucene.search.ImpactsDISI(id=1547)
$assertionsDisabled: true
org.apache.lucene.search.Scorer.weight: instance of org.apache.lucene.search.TermQuery$TermWeight(id=1549)
}
main[1] where
[1] org.apache.lucene.search.TopScoreDocCollector$SimpleTopScoreDocCollector$1.collect (TopScoreDocCollector.java:76) <--- 这里没有传doc_id 进去scorer 是因为有个回调, 可以获取doc_id , 这里会有歌pq,是一个排序好的doc
[2] org.apache.lucene.search.Weight$DefaultBulkScorer.scoreAll (Weight.java:305)
[3] org.apache.lucene.search.Weight$DefaultBulkScorer.score (Weight.java:247)
[4] org.apache.lucene.search.BulkScorer.score (BulkScorer.java:38)
[5] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:770)
[6] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:693)
[7] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:687)
[8] org.apache.lucene.search.IndexSearcher.searchAfter (IndexSearcher.java:532)
[9] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:542)
[10] org.apache.lucene.demo.SearchFiles.doPagingSearch (SearchFiles.java:180)
[11] org.apache.lucene.demo.SearchFiles.main (SearchFiles.java:150)
main[1]

+ +

核心算分函数

排序算分

+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
main[1] list
246 // float. And then monotonicity is preserved through composition via
247 // x -> 1 + x and x -> 1 - 1/x.
248 // Finally we expand weight * (1 - 1 / (1 + freq * 1/norm)) to
249 // weight - weight / (1 + freq * 1/norm), which runs slightly faster.
250 => float normInverse = cache[((byte) encodedNorm) & 0xFF];
251 return weight - weight / (1f + freq * normInverse);
252 }
253
254 @Override
255 public Explanation explain(Explanation freq, long encodedNorm) {
main[1] where
[1] org.apache.lucene.search.similarities.BM25Similarity$BM25Scorer.score (BM25Similarity.java:250)
[2] org.apache.lucene.search.LeafSimScorer.score (LeafSimScorer.java:60)
[3] org.apache.lucene.search.TermScorer.score (TermScorer.java:75)
[4] org.apache.lucene.search.TopScoreDocCollector$SimpleTopScoreDocCollector$1.collect (TopScoreDocCollector.java:73)
[5] org.apache.lucene.search.Weight$DefaultBulkScorer.scoreAll (Weight.java:305)
[6] org.apache.lucene.search.Weight$DefaultBulkScorer.score (Weight.java:247)
[7] org.apache.lucene.search.BulkScorer.score (BulkScorer.java:38)
[8] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:770)
[9] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:693)
[10] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:687)
[11] org.apache.lucene.search.IndexSearcher.searchAfter (IndexSearcher.java:532)
[12] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:542)
[13] org.apache.lucene.demo.SearchFiles.doPagingSearch (SearchFiles.java:180)
[14] org.apache.lucene.demo.SearchFiles.main (SearchFiles.java:150)

+

reduce 过程

+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
main[1] list
60 * Populates the results array with the ScoreDoc instances. This can be overridden in case a
61 * different ScoreDoc type should be returned.
62 */
63 protected void populateResults(ScoreDoc[] results, int howMany) {
64 => for (int i = howMany - 1; i >= 0; i--) {
65 results[i] = pq.pop();
66 }
67 }
68
69 /**
main[1] where
[1] org.apache.lucene.search.TopDocsCollector.populateResults (TopDocsCollector.java:64)
[2] org.apache.lucene.search.TopDocsCollector.topDocs (TopDocsCollector.java:166)
[3] org.apache.lucene.search.TopDocsCollector.topDocs (TopDocsCollector.java:98)
[4] org.apache.lucene.search.IndexSearcher$2.reduce (IndexSearcher.java:526)
[5] org.apache.lucene.search.IndexSearcher$2.reduce (IndexSearcher.java:505)
[6] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:694)
[7] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:687)
[8] org.apache.lucene.search.IndexSearcher.searchAfter (IndexSearcher.java:532)
[9] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:542)
[10] org.apache.lucene.demo.SearchFiles.doPagingSearch (SearchFiles.java:180)
[11] org.apache.lucene.demo.SearchFiles.main (SearchFiles.java:150)
+ +

辅助函数,获取topk的数据内容

+

堆栈:

+
1
2
3
4
5
6
7
8
9
10
11
12
main[1] where
[1] org.apache.lucene.search.TopDocs.mergeAux (TopDocs.java:312)
[2] org.apache.lucene.search.TopDocs.merge (TopDocs.java:216)
[3] org.apache.lucene.search.IndexSearcher$2.reduce (IndexSearcher.java:528)
[4] org.apache.lucene.search.IndexSearcher$2.reduce (IndexSearcher.java:505)
[5] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:694)
[6] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:687)
[7] org.apache.lucene.search.IndexSearcher.searchAfter (IndexSearcher.java:532)
[8] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:542)
[9] org.apache.lucene.demo.SearchFiles.doPagingSearch (SearchFiles.java:180)
[10] org.apache.lucene.demo.SearchFiles.main (SearchFiles.java:150)

+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
/**
* Auxiliary method used by the {@link #merge} impls. A sort value of null is used to indicate
* that docs should be sorted by score.
*/
private static TopDocs mergeAux(
Sort sort, int start, int size, TopDocs[] shardHits, Comparator<ScoreDoc> tieBreaker) {

final PriorityQueue<ShardRef> queue;
if (sort == null) {
queue = new ScoreMergeSortQueue(shardHits, tieBreaker);
} else {
queue = new MergeSortQueue(sort, shardHits, tieBreaker);
}

long totalHitCount = 0;
TotalHits.Relation totalHitsRelation = TotalHits.Relation.EQUAL_TO;
int availHitCount = 0;
for (int shardIDX = 0; shardIDX < shardHits.length; shardIDX++) {
final TopDocs shard = shardHits[shardIDX];
// totalHits can be non-zero even if no hits were
// collected, when searchAfter was used:
totalHitCount += shard.totalHits.value;
// If any hit count is a lower bound then the merged
// total hit count is a lower bound as well
if (shard.totalHits.relation == TotalHits.Relation.GREATER_THAN_OR_EQUAL_TO) {
totalHitsRelation = TotalHits.Relation.GREATER_THAN_OR_EQUAL_TO;
}
if (shard.scoreDocs != null && shard.scoreDocs.length > 0) {
availHitCount += shard.scoreDocs.length;
queue.add(new ShardRef(shardIDX));
}
}

final ScoreDoc[] hits;
boolean unsetShardIndex = false;
if (availHitCount <= start) {
hits = new ScoreDoc[0];
} else {
hits = new ScoreDoc[Math.min(size, availHitCount - start)];
int requestedResultWindow = start + size;
int numIterOnHits = Math.min(availHitCount, requestedResultWindow);
int hitUpto = 0;
while (hitUpto < numIterOnHits) {
assert queue.size() > 0;
ShardRef ref = queue.top();
final ScoreDoc hit = shardHits[ref.shardIndex].scoreDocs[ref.hitIndex++];

// Irrespective of whether we use shard indices for tie breaking or not, we check for
// consistent
// order in shard indices to defend against potential bugs
if (hitUpto > 0) {
if (unsetShardIndex != (hit.shardIndex == -1)) {
throw new IllegalArgumentException("Inconsistent order of shard indices");
}
}

unsetShardIndex |= hit.shardIndex == -1;

if (hitUpto >= start) {
hits[hitUpto - start] = hit;
}

hitUpto++;

if (ref.hitIndex < shardHits[ref.shardIndex].scoreDocs.length) {
// Not done with this these TopDocs yet:
queue.updateTop();
} else {
queue.pop();
}
}
}

TotalHits totalHits = new TotalHits(totalHitCount, totalHitsRelation);
if (sort == null) {
return new TopDocs(totalHits, hits);
} else {
return new TopFieldDocs(totalHits, hits, sort.getSort());
}
}
+ + + +

通过docid 获取对应的文档

1
2
3
4
fieldsStream.seek(startPointer);
decompressor.decompress(fieldsStream, totalLength, offset, length, bytes);
assert bytes.length == length;
documentInput = new ByteArrayDataInput(bytes.bytes, bytes.offset, bytes.length);
+ +

堆栈:

+
1
2
3
4
5
6
7
8
9
10
11
12
13
main[1] where
[1] org.apache.lucene.store.ByteBufferIndexInput$SingleBufferImpl.seek (ByteBufferIndexInput.java:576)
[2] org.apache.lucene.codecs.lucene90.compressing.Lucene90CompressingStoredFieldsReader$BlockState.document (Lucene90CompressingStoredFieldsReader.java:594)
[3] org.apache.lucene.codecs.lucene90.compressing.Lucene90CompressingStoredFieldsReader.document (Lucene90CompressingStoredFieldsReader.java:610)
[4] org.apache.lucene.codecs.lucene90.compressing.Lucene90CompressingStoredFieldsReader.visitDocument (Lucene90CompressingStoredFieldsReader.java:628)
[5] org.apache.lucene.index.CodecReader.document (CodecReader.java:89)
[6] org.apache.lucene.index.BaseCompositeReader.document (BaseCompositeReader.java:154)
[7] org.apache.lucene.index.IndexReader.document (IndexReader.java:380)
[8] org.apache.lucene.search.IndexSearcher.doc (IndexSearcher.java:380)
[9] org.apache.lucene.demo.SearchFiles.doPagingSearch (SearchFiles.java:214)
[10] org.apache.lucene.demo.SearchFiles.main (SearchFiles.java:150)


+ + +

mmap加载文件到内存:

+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
Breakpoint hit: "thread=main", org.apache.lucene.store.ByteBufferIndexInput.setCurBuf(), line=86 bci=0
86 this.curBuf = curBuf;

main[1] where
[1] org.apache.lucene.store.ByteBufferIndexInput.setCurBuf (ByteBufferIndexInput.java:86)
[2] org.apache.lucene.store.ByteBufferIndexInput$SingleBufferImpl.<init> (ByteBufferIndexInput.java:556)
[3] org.apache.lucene.store.ByteBufferIndexInput.newInstance (ByteBufferIndexInput.java:63)
[4] org.apache.lucene.store.MMapDirectory.openInput (MMapDirectory.java:238)
[5] org.apache.lucene.store.Directory.openChecksumInput (Directory.java:152)
[6] org.apache.lucene.index.SegmentInfos.readCommit (SegmentInfos.java:290)
[7] org.apache.lucene.index.StandardDirectoryReader$1.doBody (StandardDirectoryReader.java:88)
[8] org.apache.lucene.index.StandardDirectoryReader$1.doBody (StandardDirectoryReader.java:77)
[9] org.apache.lucene.index.SegmentInfos$FindSegmentsFile.run (SegmentInfos.java:798)
[10] org.apache.lucene.index.StandardDirectoryReader.open (StandardDirectoryReader.java:109)
[11] org.apache.lucene.index.StandardDirectoryReader.open (StandardDirectoryReader.java:67)
[12] org.apache.lucene.index.DirectoryReader.open (DirectoryReader.java:60)
[13] org.apache.lucene.demo.SearchFiles.main (SearchFiles.java:105)

+ +

很明显,打开文件是在org.apache.lucene.store.MMapDirectory.openInput 这个类实现就是打开文件。

+

先打开文件segments_1

+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
main[1] print name
name = "segments_1"
main[1] list
228
229 /** Creates an IndexInput for the file with the given name. */
230 @Override
231 public IndexInput openInput(String name, IOContext context) throws IOException {
232 => ensureOpen();
233 ensureCanRead(name);
234 Path path = directory.resolve(name);
235 try (FileChannel c = FileChannel.open(path, StandardOpenOption.READ)) {
236 final String resourceDescription = "MMapIndexInput(path=\"" + path.toString() + "\")";
237 final boolean useUnmap = getUseUnmap();
main[1]
+ + +

举例读取字符串:

+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
private static void readField(DataInput in, StoredFieldVisitor visitor, FieldInfo info, int bits)
throws IOException {
switch (bits & TYPE_MASK) {
case BYTE_ARR:
int length = in.readVInt();
byte[] data = new byte[length];
in.readBytes(data, 0, length);
visitor.binaryField(info, data);
break;
case STRING:
visitor.stringField(info, in.readString());
break;
case NUMERIC_INT:
visitor.intField(info, in.readZInt());
break;
case NUMERIC_FLOAT:
visitor.floatField(info, readZFloat(in));
break;
case NUMERIC_LONG:
visitor.longField(info, readTLong(in));
break;
case NUMERIC_DOUBLE:
visitor.doubleField(info, readZDouble(in));
break;
default:
throw new AssertionError("Unknown type flag: " + Integer.toHexString(bits));
}
}
+
1
2
3
4
5
6
7
8
9
10
11
main[1] where
[1] org.apache.lucene.codecs.lucene90.compressing.Lucene90CompressingStoredFieldsReader.readField (Lucene90CompressingStoredFieldsReader.java:246)
[2] org.apache.lucene.codecs.lucene90.compressing.Lucene90CompressingStoredFieldsReader.visitDocument (Lucene90CompressingStoredFieldsReader.java:640)
[3] org.apache.lucene.index.CodecReader.document (CodecReader.java:89)
[4] org.apache.lucene.index.BaseCompositeReader.document (BaseCompositeReader.java:154)
[5] org.apache.lucene.index.IndexReader.document (IndexReader.java:380)
[6] org.apache.lucene.search.IndexSearcher.doc (IndexSearcher.java:380)
[7] org.apache.lucene.demo.SearchFiles.doPagingSearch (SearchFiles.java:214)
[8] org.apache.lucene.demo.SearchFiles.main (SearchFiles.java:150)
main[1]

+ + + +
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
main[1] list
66 }
67
68 @Override
69 public void stringField(FieldInfo fieldInfo, String value) throws IOException {
70 => final FieldType ft = new FieldType(TextField.TYPE_STORED);
71 ft.setStoreTermVectors(fieldInfo.hasVectors());
72 ft.setOmitNorms(fieldInfo.omitsNorms());
73 ft.setIndexOptions(fieldInfo.getIndexOptions());
74 doc.add(
75 new StoredField(
main[1] print value
value = "/home/dai/docs/aaa.txt"
main[1] where
[1] org.apache.lucene.document.DocumentStoredFieldVisitor.stringField (DocumentStoredFieldVisitor.java:70)
[2] org.apache.lucene.codecs.lucene90.compressing.Lucene90CompressingStoredFieldsReader.readField (Lucene90CompressingStoredFieldsReader.java:246)
[3] org.apache.lucene.codecs.lucene90.compressing.Lucene90CompressingStoredFieldsReader.visitDocument (Lucene90CompressingStoredFieldsReader.java:640)
[4] org.apache.lucene.index.CodecReader.document (CodecReader.java:89)
[5] org.apache.lucene.index.BaseCompositeReader.document (BaseCompositeReader.java:154)
[6] org.apache.lucene.index.IndexReader.document (IndexReader.java:380)
[7] org.apache.lucene.search.IndexSearcher.doc (IndexSearcher.java:380)
[8] org.apache.lucene.demo.SearchFiles.doPagingSearch (SearchFiles.java:214)
[9] org.apache.lucene.demo.SearchFiles.main (SearchFiles.java:150)

+ + +

将读到的string 加载到doc对象里面

这是核心函数 , mmap 读取文件,然后seek 算出偏移和长度 ,从文件中读取出来并构造成对象

+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32

/**
* Get the serialized representation of the given docID. This docID has to be contained in the
* current block.
*/
SerializedDocument document(int docID) throws IOException {
if (contains(docID) == false) {
throw new IllegalArgumentException();
}

final int index = docID - docBase;
final int offset = Math.toIntExact(offsets[index]);
final int length = Math.toIntExact(offsets[index + 1]) - offset;
final int totalLength = Math.toIntExact(offsets[chunkDocs]);
final int numStoredFields = Math.toIntExact(this.numStoredFields[index]);

final BytesRef bytes;
if (merging) {
bytes = this.bytes;
} else {
bytes = new BytesRef();
}
...
fieldsStream.seek(startPointer); // 计算偏移量
decompressor.decompress(fieldsStream, totalLength, offset, length, bytes); // 解压内容
assert bytes.length == length;
documentInput = new ByteArrayDataInput(bytes.bytes, bytes.offset, bytes.length); // 将内容塞到对象里面
}

return new SerializedDocument(documentInput, length, numStoredFields);
}
}
+ +

获取doc

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
Breakpoint hit: "thread=main", org.apache.lucene.codecs.lucene90.Lucene90PostingsReader$BlockDocsEnum.advance(), line=498 bci=0
498 if (docFreq > BLOCK_SIZE && target > nextSkipDoc) {

main[1] where
[1] org.apache.lucene.codecs.lucene90.Lucene90PostingsReader$BlockDocsEnum.advance (Lucene90PostingsReader.java:498)
[2] org.apache.lucene.index.SlowImpactsEnum.advance (SlowImpactsEnum.java:77)
[3] org.apache.lucene.search.ImpactsDISI.advance (ImpactsDISI.java:128)
[4] org.apache.lucene.search.ImpactsDISI.nextDoc (ImpactsDISI.java:133)
[5] org.apache.lucene.search.Weight$DefaultBulkScorer.scoreAll (Weight.java:301)
[6] org.apache.lucene.search.Weight$DefaultBulkScorer.score (Weight.java:247)
[7] org.apache.lucene.search.BulkScorer.score (BulkScorer.java:38)
[8] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:770)
[9] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:693)
[10] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:687)
[11] org.apache.lucene.search.IndexSearcher.searchAfter (IndexSearcher.java:532)
[12] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:542)
[13] org.apache.lucene.demo.SearchFiles.doPagingSearch (SearchFiles.java:180)
[14] org.apache.lucene.demo.SearchFiles.main (SearchFiles.java:150)

+ +

term query 和遍历

注意到 ImpactsEnum 实现了iteratorDocId

+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
1,138      }
1,139
1,140 @Override
1,141 public ImpactsEnum impacts(int flags) throws IOException {
1,142 => assert !eof;
1,143 // if (DEBUG) {
1,144 // System.out.println("BTTR.docs seg=" + segment);
1,145 // }
1,146 currentFrame.decodeMetaData();
1,147 // if (DEBUG) {
main[1] where
[1] org.apache.lucene.codecs.lucene90.blocktree.SegmentTermsEnum.impacts (SegmentTermsEnum.java:1,142)
[2] org.apache.lucene.search.TermQuery$TermWeight.scorer (TermQuery.java:114)
[3] org.apache.lucene.search.Weight.bulkScorer (Weight.java:166)
[4] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:767)
[5] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:693)
[6] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:687)
[7] org.apache.lucene.search.IndexSearcher.searchAfter (IndexSearcher.java:532)
[8] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:542)
[9] org.apache.lucene.demo.SearchFiles.doPagingSearch (SearchFiles.java:180)
[10] org.apache.lucene.demo.SearchFiles.main (SearchFiles.java:150)

+ + +

注意到PostingsEnum 也有docidIterater

+

排序topk

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28

main[1] where
[1] org.apache.lucene.util.PriorityQueue.upHeap (PriorityQueue.java:276)
[2] org.apache.lucene.util.PriorityQueue.add (PriorityQueue.java:161)
[3] org.apache.lucene.search.TopDocs.mergeAux (TopDocs.java:303)
[4] org.apache.lucene.search.TopDocs.merge (TopDocs.java:216)
[5] org.apache.lucene.search.IndexSearcher$2.reduce (IndexSearcher.java:528)
[6] org.apache.lucene.search.IndexSearcher$2.reduce (IndexSearcher.java:505)
[7] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:694)
[8] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:687)
[9] org.apache.lucene.search.IndexSearcher.searchAfter (IndexSearcher.java:532)
[10] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:542)
[11] org.apache.lucene.demo.SearchFiles.doPagingSearch (SearchFiles.java:180)
[12] org.apache.lucene.demo.SearchFiles.main (SearchFiles.java:150)


@Override
public boolean lessThan(ShardRef first, ShardRef second) {
assert first != second;
ScoreDoc firstScoreDoc = shardHits[first.shardIndex][first.hitIndex];
ScoreDoc secondScoreDoc = shardHits[second.shardIndex][second.hitIndex];
if (firstScoreDoc.score < secondScoreDoc.score) {
return false;
} else if (firstScoreDoc.score > secondScoreDoc.score) {
return true;
} else {
return tieBreakLessThan(first, firstScoreDoc, second, secondScoreDoc, tieBreakerComparator);
}
+ +

相关阅读

+ +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/2022/07/06/clickhouse-parser/index.html b/2022/07/06/clickhouse-parser/index.html new file mode 100644 index 0000000000..08a5b91ee9 --- /dev/null +++ b/2022/07/06/clickhouse-parser/index.html @@ -0,0 +1,441 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + clickhouse parser | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ clickhouse parser +

+ + +
+ + + + +
+ + +

clickhouse 堆栈

+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
(lldb) bt
* thread #3, name = 'HTTPHandler', stop reason = breakpoint 1.1
* frame #0: 0x000000001d9c6522 clickhouse-server`DB::tryParseQuery(parser=0x00007fff2d3ef620, _out_query_end=0x00007fff2d3ecc60, all_queries_end="\xa5\xa5\xa5\xa5\xa5\xa5\xa5\xa5\xa5\xa5\xa5\xa5\xa5\xa5\xa5\xa5\xa5\xa5ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZtext/plain; charset=UTF-8", out_error_message="", hilite=false, query_description="", allow_multi_statements=false, max_query_size=262144, max_parser_depth=1000) at parseQuery.cpp:237:32
frame #1: 0x000000001d9c77bd clickhouse-server`DB::parseQueryAndMovePosition(parser=0x00007fff2d3ef620, pos=0x00007fff2d3ecc60, end="\xa5\xa5\xa5\xa5\xa5\xa5\xa5\xa5\xa5\xa5\xa5\xa5\xa5\xa5\xa5\xa5\xa5\xa5ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZtext/plain; charset=UTF-8", query_description="", allow_multi_statements=false, max_query_size=262144, max_parser_depth=1000) at parseQuery.cpp:343:18
frame #2: 0x000000001d9c7926 clickhouse-server`DB::parseQuery(parser=0x00007fff2d3ef620, begin="show databases\xa5\xa5\xa5\xa5\xa5\xa5\xa5\xa5\xa5\xa5\xa5\xa5\xa5\xa5\xa5\xa5\xa5\xa5ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZtext/plain; charset=UTF-8", end="\xa5\xa5\xa5\xa5\xa5\xa5\xa5\xa5\xa5\xa5\xa5\xa5\xa5\xa5\xa5\xa5\xa5\xa5ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZtext/plain; charset=UTF-8", query_description="", max_query_size=262144, max_parser_depth=1000) at parseQuery.cpp:360:12
frame #3: 0x000000001b95ec13 clickhouse-server`DB::executeQueryImpl(begin="show databases\xa5\xa5\xa5\xa5\xa5\xa5\xa5\xa5\xa5\xa5\xa5\xa5\xa5\xa5\xa5\xa5\xa5\xa5ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZtext/plain; charset=UTF-8", end="\xa5\xa5\xa5\xa5\xa5\xa5\xa5\xa5\xa5\xa5\xa5\xa5\xa5\xa5\xa5\xa5\xa5\xa5ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZtext/plain; charset=UTF-8", context=std::__1::shared_ptr<DB::Context>::element_type @ 0x00007ffff5b23000 strong=4 weak=5, internal=false, stage=Complete, istr=0x00007fff2403d000) at executeQuery.cpp:442:15
frame #4: 0x000000001b965181 clickhouse-server`DB::executeQuery(istr=0x00007fff2403d000, ostr=0x00007fff240393d8, allow_into_outfile=false, context=std::__1::shared_ptr<DB::Context>::element_type @ 0x00007ffff5b23000 strong=4 weak=5, set_result_details=DB::SetResultDetailsFunc @ 0x00007fff2d3f0bf0, output_format_settings= Has Value=false )>, std::__1::optional<DB::FormatSettings> const&) at executeQuery.cpp:1136:30
frame #5: 0x000000001d3b7b17 clickhouse-server`DB::HTTPHandler::processQuery(this=0x00007ffff7154d40, request=0x00007fff2d3f2438, params=0x00007fff2d3f1e10, response=0x00007fff2d3f24f0, used_output=0x00007fff2d3f1ec8, query_scope= Has Value=true ) at HTTPHandler.cpp:822:5
frame #6: 0x000000001d3ba4fe clickhouse-server`DB::HTTPHandler::handleRequest(this=0x00007ffff7154d40, request=0x00007fff2d3f2438, response=0x00007fff2d3f24f0) at HTTPHandler.cpp:960:9
frame #7: 0x000000001d3f1e17 clickhouse-server`DB::HTTPServerConnection::run(this=0x00007ffff5b18000) at HTTPServerConnection.cpp:65:34
frame #8: 0x000000002308e1d9 clickhouse-server`Poco::Net::TCPServerConnection::start(this=0x00007ffff5b18000) at TCPServerConnection.cpp:43:3
frame #9: 0x000000002308e9e6 clickhouse-server`Poco::Net::TCPServerDispatcher::run(this=0x00007fff12875500) at TCPServerDispatcher.cpp:115:20
frame #10: 0x00000000232cecf4 clickhouse-server`Poco::PooledThread::run(this=0x00007ffff702df80) at ThreadPool.cpp:199:14
frame #11: 0x00000000232cb81a clickhouse-server`Poco::(anonymous namespace)::RunnableHolder::run(this=0x00007ffff7001330) at Thread.cpp:55:11
frame #12: 0x00000000232ca5fe clickhouse-server`Poco::ThreadImpl::runnableEntry(pThread=0x00007ffff702dfb8) at Thread_POSIX.cpp:345:27
frame #13: 0x00007ffff7df8b43 libc.so.6`start_thread(arg=<unavailable>) at pthread_create.c:442:8
frame #14: 0x00007ffff7e8aa00 libc.so.6`__clone3 at clone3.S:81

+ +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/2022/08/16/paper/index.html b/2022/08/16/paper/index.html new file mode 100644 index 0000000000..c82aa5d5df --- /dev/null +++ b/2022/08/16/paper/index.html @@ -0,0 +1,451 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + paper | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ paper +

+ + +
+ + + + +
+ + +

背景

整理相关经典论文

+

数据库

mvcc

无敌推荐的数据库事务相关pdf

+

执行器相关

+

Efficient Query Evaluation using a Two-Level Retrieval
Process

+

topk min/max heap

+

parser

lr(k)

+

gc

Mostly Concurrent Garbage Collectio
Garbage-First Garbage Collection
A Fast Write Barrier for Generational Garbage Collectors
Incremental Collection of Mature Objects*

+

database

The Vertica Analytic Database: C-Store 7 Years Later

+ +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git "a/2022/08/19/lucene-tim\346\240\274\345\274\217/index.html" "b/2022/08/19/lucene-tim\346\240\274\345\274\217/index.html" new file mode 100644 index 0000000000..a5f40aa747 --- /dev/null +++ "b/2022/08/19/lucene-tim\346\240\274\345\274\217/index.html" @@ -0,0 +1,458 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + lucene tim格式 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ lucene tim格式 +

+ + +
+ + + + +
+ + +

背景

tim文件是lucene 存储词相关统计信息的文件.
与它相关的还有tip文件

+

格式和例子

文件格式:

可以从最下面的相关阅读可以获取对应的文档

+
1
2
3
4
5
6
7
8
9
TermsDict (.tim) --> Header, PostingsHeader, NodeBlockNumBlocks, Footer
NodeBlock --> (OuterNode | InnerNode)
OuterNode --> EntryCount, SuffixLength, ByteSuffixLength, StatsLength, < TermStats >EntryCount, MetaLength, <TermMetadata>EntryCount
InnerNode --> EntryCount, SuffixLength[,Sub?], ByteSuffixLength, StatsLength, < TermStats ? >EntryCount, MetaLength, <TermMetadata ? >EntryCount
TermStats --> DocFreq, TotalTermFreq
Header --> CodecHeader
EntryCount,SuffixLength,StatsLength,DocFreq,MetaLength --> VInt
TotalTermFreq --> VLong
Footer --> CodecFooter
+ +

例子

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
hexdump -C  _j_Lucene90_0.tim 

00000000 3f d7 6c 17 12 42 6c 6f 63 6b 54 72 65 65 54 65 |?.l..BlockTreeTe|
00000010 72 6d 73 44 69 63 74 00 00 00 00 fe ea 80 e6 45 |rmsDict........E|
00000020 20 d8 56 64 1b 1b 1b 89 70 fe 67 0a 4c 75 63 65 | .Vd....p.g.Luce|
00000030 6e 65 39 30 5f 30 25 bc 03 61 6d 61 6e 64 62 75 |ne90_0%..amandbu|
00000040 74 63 61 6e 64 6f 68 65 6c 6c 6f 68 69 69 69 73 |tcandohellohiiis|
00000050 69 74 6b 6e 6f 77 6d 61 79 6d 6f 6e 67 6f 6e 6f |itknowmaymongono|
00000060 74 74 72 79 77 68 61 74 77 6f 72 6c 64 79 6f 75 |ttrywhatworldyou|
00000070 24 02 03 03 03 02 05 02 01 02 02 04 03 05 03 03 |$...............|
00000080 04 05 03 10 04 00 09 02 01 04 00 03 02 01 01 02 |................|
00000090 01 07 02 02 26 7a 3d 04 01 02 03 01 01 01 01 01 |....&z=.........| <--- 第六个字节 ,也就是7a开头
000000a0 05 01 01 01 00 02 04 00 02 01 01 01 01 01 02 01 |................|
000000b0 01 01 02 01 01 01 01 05 01 03 01 05 a4 03 2f 68 |............../h|
000000c0 6f 6d 65 2f 75 62 75 6e 74 75 2f 64 6f 63 2f 68 |ome/ubuntu/doc/h|
000000d0 65 6c 6c 6f 2e 74 78 74 2f 68 6f 6d 65 2f 75 62 |ello.txt/home/ub|
000000e0 75 6e 74 75 2f 64 6f 63 2f 6d 6f 6e 67 6f 2e 74 |untu/doc/mongo.t|
000000f0 78 74 05 1a 01 03 04 82 01 01 03 c0 28 93 e8 00 |xt..........(...|
00000100 00 00 00 00 00 00 00 da 02 a3 a3 |...........|
+

这里的ste.in 是tim文件的数据

+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
main[2] list
472 }
473 }
474
475 // metadata
476 => ste.fr.parent.postingsReader.decodeTerm(bytesReader, ste.fr.fieldInfo, state, absolute);
477
478 metaDataUpto++;
479 absolute = false;
480 }
481 state.termBlockOrd = metaDataUpto;
main[2] print ste.in
ste.in = "MMapIndexInput(path="/home/ubuntu/index/_j_Lucene90_0.tim")"

+ +

这里的对应的是

+
1
2
3
4
5
main[2] dump bytesReader.bytes
bytesReader.bytes = {
122, 61, 4, 1, 2, 3, 1, 1, 1, 1, 1, 5, 1, 1, 1, 0, 2, 4, 0, 2, 1, 1, 1, 1, 1, 2, 1, 1, 1, 2, 1, 1, 1, 1, 5, 1, 3, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
}

+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
```
hexdump -C _j_Lucene90_0.tim

00000000 3f d7 6c 17 12 42 6c 6f 63 6b 54 72 65 65 54 65 |?.l..BlockTreeTe|
00000010 72 6d 73 44 69 63 74 00 00 00 00 fe ea 80 e6 45 |rmsDict........E|
00000020 20 d8 56 64 1b 1b 1b 89 70 fe 67 0a 4c 75 63 65 | .Vd....p.g.Luce|
00000030 6e 65 39 30 5f 30 25 bc 03 61 6d 61 6e 64 62 75 |ne90_0%..amandbu|
00000040 74 63 61 6e 64 6f 68 65 6c 6c 6f 68 69 69 69 73 |tcandohellohiiis|
00000050 69 74 6b 6e 6f 77 6d 61 79 6d 6f 6e 67 6f 6e 6f |itknowmaymongono|
00000060 74 74 72 79 77 68 61 74 77 6f 72 6c 64 79 6f 75 |ttrywhatworldyou|
00000070 24 02 03 03 03 02 05 02 01 02 02 04 03 05 03 03 |$...............|
00000080 04 05 03 10 04 00 09 02 01 04 00 03 02 01 01 02 |................|
00000090 01 07 02 02 26 7a 3d 04 01 02 03 01 01 01 01 01 |....&z=.........|
000000a0 05 01 01 01 00 02 04 00 02 01 01 01 01 01 02 01 |................|
000000b0 01 01 02 01 01 01 01 05 01 03 01 05 a4 03 2f 68 |............../h|
000000c0 6f 6d 65 2f 75 62 75 6e 74 75 2f 64 6f 63 2f 68 |ome/ubuntu/doc/h|
000000d0 65 6c 6c 6f 2e 74 78 74 2f 68 6f 6d 65 2f 75 62 |ello.txt/home/ub|
000000e0 75 6e 74 75 2f 64 6f 63 2f 6d 6f 6e 67 6f 2e 74 |untu/doc/mongo.t|
000000f0 78 74 05 1a 01 03 04 82 01 01 03 c0 28 93 e8 00 |xt..........(...|
00000100 00 00 00 00 00 00 00 da 02 a3 a3 |...........|
+

+
+
+## 相关阅读
+- [格式文档](https://lucene.apache.org/core/9_0_0/core/org/apache/lucene/codecs/lucene90/blocktree/Lucene90BlockTreeTermsWriter.html)
+- [tim 格式](https://www.jianshu.com/p/b05eed0da6ad)
+
+ +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git "a/2022/08/23/java-static-\345\235\227/index.html" "b/2022/08/23/java-static-\345\235\227/index.html" new file mode 100644 index 0000000000..05ee236e31 --- /dev/null +++ "b/2022/08/23/java-static-\345\235\227/index.html" @@ -0,0 +1,444 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + java static 块 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ java static 块 +

+ + +
+ + + + +
+ + +

背景

记录java的一些简单的语法

+

static 块

static block

+
1
2
3
4
5
8.7 Static Initializers
A static initializer declared in a class is executed when the class is initialized
(§12.4.2). Together with any field initializers for class variables (§8.3.2), static
initializers may be used to initialize the class variables of the class.

+

static 块会在类加载之后回调,在对象实例化之前

+ +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/2022/08/26/3-Method-Reference-Expressions/index.html b/2022/08/26/3-Method-Reference-Expressions/index.html new file mode 100644 index 0000000000..e3d91d212b --- /dev/null +++ b/2022/08/26/3-Method-Reference-Expressions/index.html @@ -0,0 +1,444 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Method Reference Expressions | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ Method Reference Expressions +

+ + +
+ + + + +
+ + +

背景

了解java 的method reference

+

介绍

java method reference
在上述文档的:15.13 Method Reference Expressions 这一个小节有介绍

+

java method reference 是java的一个表达式, 表达式经过求值,会得到一个对象.
那么method reference结果是一个functional interface type

+
1
2
Evaluation of a method reference expression produces an instance of a functional
interface type
+ +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/2022/09/05/max-min-heap/index.html b/2022/09/05/max-min-heap/index.html new file mode 100644 index 0000000000..c445acfc01 --- /dev/null +++ b/2022/09/05/max-min-heap/index.html @@ -0,0 +1,462 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + priority queue | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ priority queue +

+ + +
+ + + + +
+ + +

背景

lucene 搜索的结果搜索经过soccer算出分数之后,还需要topK取前几个数据,所以需要使用到topk的算法。
一般用优先队列实现。

+

介绍

下面都是描述最大优先队列

+

优先队列分为两个,一个是最小优先,一个是最大优先。其实就是方向改变而已。

+

我们先介绍他的性质:

+

组成 : 优先队列是item集合S 。每个item 包含两个内容:element 和key

+

操作

+
    +
  • insert(S , item)
  • +
  • maxnum(S)
  • +
  • extract_max(S)
  • +
  • increase_key (S,element,key) 将优先队列里面
  • +
+

证明

对于一个非空满二叉树,第一个节点编号是index=1
则对每个节点index :

+
    +
  • 他的左节点left=index *2
  • +
  • right = index *2 +1
  • +
+

证明:
归纳法
init:
当index= 1 时, left = 2 , 满足left = index*2
当index=1 时,right = 3 , 满足 right = index*2 +1

+

deduction:
n+1 元素:
如果他是左节点 , 则他的前一个节点满足 n = (pre_parent *2 +1)
对于n+1 个元素 , n+1 = (pre_parent *2 +1) +1 = (pre_parent +1 )*2
即满足递推公式

+

同理右节点同理

+

所以证明完毕

+

相关论文

算法导论

+ +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git "a/2022/09/06/httpServerletRequest-autowired-\345\216\237\345\233\240/index.html" "b/2022/09/06/httpServerletRequest-autowired-\345\216\237\345\233\240/index.html" new file mode 100644 index 0000000000..508606c9ad --- /dev/null +++ "b/2022/09/06/httpServerletRequest-autowired-\345\216\237\345\233\240/index.html" @@ -0,0 +1,448 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + httpServerletRequest autowired 原因 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ httpServerletRequest autowired 原因 +

+ + +
+ + + + +
+ + +

背景

autowired 默认都是一般都是单例,为什么httpServerletRequest是做到不是单例呢?

+

堆栈:

+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
getRequestAttributes:105, RequestContextHolder (org.springframework.web.context.request)
processRequest:997, FrameworkServlet (org.springframework.web.servlet)
doPost:909, FrameworkServlet (org.springframework.web.servlet)
service:652, HttpServlet (javax.servlet.http)
service:883, FrameworkServlet (org.springframework.web.servlet)
service:733, HttpServlet (javax.servlet.http)
internalDoFilter:227, ApplicationFilterChain (org.apache.catalina.core)
doFilter:162, ApplicationFilterChain (org.apache.catalina.core)
doFilter:53, WsFilter (org.apache.tomcat.websocket.server)
internalDoFilter:189, ApplicationFilterChain (org.apache.catalina.core)
doFilter:162, ApplicationFilterChain (org.apache.catalina.core)
doFilterInternal:100, RequestContextFilter (org.springframework.web.filter)
doFilter:119, OncePerRequestFilter (org.springframework.web.filter)
internalDoFilter:189, ApplicationFilterChain (org.apache.catalina.core)
doFilter:162, ApplicationFilterChain (org.apache.catalina.core)
doFilterInternal:93, FormContentFilter (org.springframework.web.filter)
doFilter:119, OncePerRequestFilter (org.springframework.web.filter)
internalDoFilter:189, ApplicationFilterChain (org.apache.catalina.core)
doFilter:162, ApplicationFilterChain (org.apache.catalina.core)
doFilterInternal:97, WebMvcMetricsFilter (org.springframework.boot.actuate.metrics.web.servlet)
doFilter:119, OncePerRequestFilter (org.springframework.web.filter)
internalDoFilter:189, ApplicationFilterChain (org.apache.catalina.core)
doFilter:162, ApplicationFilterChain (org.apache.catalina.core)
doFilterInternal:201, CharacterEncodingFilter (org.springframework.web.filter)
doFilter:119, OncePerRequestFilter (org.springframework.web.filter)
internalDoFilter:189, ApplicationFilterChain (org.apache.catalina.core)
doFilter:162, ApplicationFilterChain (org.apache.catalina.core)
invoke:202, StandardWrapperValve (org.apache.catalina.core)
invoke:97, StandardContextValve (org.apache.catalina.core)
invoke:542, AuthenticatorBase (org.apache.catalina.authenticator)
invoke:143, StandardHostValve (org.apache.catalina.core)
invoke:92, ErrorReportValve (org.apache.catalina.valves)
invoke:78, StandardEngineValve (org.apache.catalina.core)
service:357, CoyoteAdapter (org.apache.catalina.connector)
service:374, Http11Processor (org.apache.coyote.http11)
process:65, AbstractProcessorLight (org.apache.coyote)
process:893, AbstractProtocol$ConnectionHandler (org.apache.coyote)
doRun:1707, NioEndpoint$SocketProcessor (org.apache.tomcat.util.net)
run:49, SocketProcessorBase (org.apache.tomcat.util.net)
runWorker:1128, ThreadPoolExecutor (java.util.concurrent)
run:628, ThreadPoolExecutor$Worker (java.util.concurrent)
run:61, TaskThread$WrappingRunnable (org.apache.tomcat.util.threads)
run:834, Thread (java.lang)
+ + +

相关阅读

+ +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git "a/2022/09/11/java-thread-local-\345\210\235\345\247\213\345\214\226\346\227\266\346\234\272/index.html" "b/2022/09/11/java-thread-local-\345\210\235\345\247\213\345\214\226\346\227\266\346\234\272/index.html" new file mode 100644 index 0000000000..c126d1ad66 --- /dev/null +++ "b/2022/09/11/java-thread-local-\345\210\235\345\247\213\345\214\226\346\227\266\346\234\272/index.html" @@ -0,0 +1,462 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + java thread local 初始化时机 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ java thread local 初始化时机 +

+ + +
+ + + + +
+ + +

背景

java 的spring boot 不少地方用到了利用java 的thread local map 来实现线程变量隔离。
想要理解java的相关内容。

+

实现

核心就在于每次创建线程的对象实例的时候,在线程的初始化时候会把threadlocal map 创建好 , 每个线程一个自己的map , 从而实现线程隔离

+

文件路径在jdk 的这里
src/java.base/share/classes/java/lang/Thread.java

+
1
2
3
4
5
6
7
8
9
10
11
12
13
/**
* Initializes a virtual Thread.
*
* @param name thread name, can be null
* @param characteristics thread characteristics
* @param bound true when bound to an OS thread
*/
Thread(String name, int characteristics, boolean bound) {
...
this.inheritableThreadLocals = ThreadLocal.createInheritedMap(parentMap);
...
}

+ +

threadlocal 初始化

threadlocal 有多个入口 ,最后都是通过createMap 初始化ThreadlocalMap , 这个map 由Thread 的实例化对象持有,

+

核心对象成员: threadLocals . 每个线程自己持有一个map , 这个map的entry是一个weakreference

+
1
2
3
/* ThreadLocal values pertaining to this thread. This map is maintained
* by the ThreadLocal class. */
ThreadLocal.ThreadLocalMap threadLocals = null;
+ +
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
/**
* Sets the current thread's copy of this thread-local variable
* to the specified value. Most subclasses will have no need to
* override this method, relying solely on the {@link #initialValue}
* method to set the values of thread-locals.
*
* @param value the value to be stored in the current thread's copy of
* this thread-local.
*/
public void set(T value) {
Thread t = Thread.currentThread();
ThreadLocalMap map = getMap(t);
if (map != null) {
map.set(this, value);
} else {
createMap(t, value); // 初始化
}
}
+ +
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
/**
* Variant of set() to establish initialValue. Used instead
* of set() in case user has overridden the set() method.
*
* @return the initial value
*/
private T setInitialValue() {
T value = initialValue();
Thread t = Thread.currentThread();
ThreadLocalMap map = getMap(t);
if (map != null) {
map.set(this, value);
} else {
createMap(t, value); // 初始化
}
if (this instanceof TerminatingThreadLocal) {
TerminatingThreadLocal.register((TerminatingThreadLocal<?>) this);
}
return value;
+ +

每个thread 持有一个threadLocals 对象

+
1
2
3
4
5
6
7
8
9
10
/**
* Get the map associated with a ThreadLocal. Overridden in
* InheritableThreadLocal.
*
* @param t the current thread
* @return the map
*/
ThreadLocalMap getMap(Thread t) {
return t.threadLocals;
}
+ + +

相关阅读

+ +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git "a/2022/09/20/jdk\347\274\226\350\257\221/index.html" "b/2022/09/20/jdk\347\274\226\350\257\221/index.html" new file mode 100644 index 0000000000..a61569ae66 --- /dev/null +++ "b/2022/09/20/jdk\347\274\226\350\257\221/index.html" @@ -0,0 +1,469 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + jdk编译 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ jdk编译 +

+ + +
+ + + + +
+ + +

第一步

拉取代码

+
1
git clone https://github.com/openjdk/jdk.git  
+
+ + +

第二步

本地版本:

+

我本地的jdk版本是17

+
1
2
java -version
openjdk version "17.0.4" 2022-07-19
+ +

jdk 只能由上一个版本自举,所以必须要切到下一个大版本 ,我本地是jdk17 ,所以要切到jdk 18

+
1
2
3
4
5
6
7
8
9
## 切换到jdk18
git checkout jdk-18+37

## 生成makefile

./configure --with-debug-level=slowdebug --with-extra-cflags="-Wno-nonnull -Wno-maybe-uninitialized -Wno-free-nonheap-object"
## 2个线程同时编译

make JOBS=2 CONF=linux-x86_64-server-slowdebug
+ + + +

相关错误

1
2
onfigure: error: Could not find all X11 headers (shape.h Xrender.h Xrandr.h XTest.h Intrinsic.h). You might be able to fix this by running 'sudo apt-get install libx11-dev libxext-dev libxrender-dev libxrandr-dev libxtst-dev libxt-dev'.
configure exiting with result code 1
+

解决方案:

+
1
sudo apt-get install libx11-dev libxext-dev libxrender-dev libxrandr-dev libxtst-dev libxt-dev
+ + + +
1
2
configure: error: Could not find cups! You might be able to fix this by running 'sudo apt-get install libcups2-dev'. 
configure exiting with result code 1
+

解决方案:

+
1
sudo apt-get install libcups2-dev
+
+ +

相关阅读

+ +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git "a/2022/09/21/java-\345\240\206\346\240\210/index.html" "b/2022/09/21/java-\345\240\206\346\240\210/index.html" new file mode 100644 index 0000000000..12219b15e0 --- /dev/null +++ "b/2022/09/21/java-\345\240\206\346\240\210/index.html" @@ -0,0 +1,443 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + jdk 堆栈 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ jdk 堆栈 +

+ + +
+ + + + +
+ + +

背景

想了解jdk的的实现和php有什么不一样。

+

堆栈

jdk 会将opcode 生成对应的汇编代码,生成汇编的代码如下:

+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
(gdb) where
#0 AbstractAssembler::emit_int8 (this=0x7ffff00198a0, x=-64 '\300') at /home/ubuntu/jdk/src/hotspot/share/asm/assembler.hpp:286
#1 0x00007ffff62513e9 in Assembler::emit_arith (this=0x7ffff00198a0, op1=133, op2=192, dst=0x0, src=0x0) at /home/ubuntu/jdk/src/hotspot/cpu/x86/assembler_x86.cpp:300
#2 0x00007ffff6284cf3 in Assembler::testq (this=0x7ffff00198a0, dst=0x0, src=0x0) at /home/ubuntu/jdk/src/hotspot/cpu/x86/assembler_x86.cpp:9191
#3 0x00007ffff6bcd75b in MacroAssembler::testptr (this=0x7ffff00198a0, dst=0x0, src=0x0) at /home/ubuntu/jdk/src/hotspot/cpu/x86/macroAssembler_x86.cpp:4072
#4 0x00007ffff6f5b67f in StubGenerator::generate_forward_exception (this=0x7ffff5b68890) at /home/ubuntu/jdk/src/hotspot/cpu/x86/stubGenerator_x86_64.cpp:537
#5 0x00007ffff6f7dd2a in StubGenerator::generate_initial (this=0x7ffff5b68890) at /home/ubuntu/jdk/src/hotspot/cpu/x86/stubGenerator_x86_64.cpp:5752
#6 0x00007ffff6f7e9db in StubGenerator::StubGenerator (this=0x7ffff5b68890, code=0x7ffff5b68940, all=false)
at /home/ubuntu/jdk/src/hotspot/cpu/x86/stubGenerator_x86_64.cpp:5994
#7 0x00007ffff6f589d3 in StubGenerator_generate (code=0x7ffff5b68940, all=false) at /home/ubuntu/jdk/src/hotspot/cpu/x86/stubGenerator_x86_64.cpp:6000
#8 0x00007ffff6f7eae9 in StubRoutines::initialize1 () at /home/ubuntu/jdk/src/hotspot/share/runtime/stubRoutines.cpp:195
#9 0x00007ffff6f7fb77 in stubRoutines_init1 () at /home/ubuntu/jdk/src/hotspot/share/runtime/stubRoutines.cpp:374
#10 0x00007ffff686a610 in init_globals () at /home/ubuntu/jdk/src/hotspot/share/runtime/init.cpp:112
#11 0x00007ffff700e2fd in Threads::create_vm (args=0x7ffff5b68e20, canTryAgain=0x7ffff5b68d2b) at /home/ubuntu/jdk/src/hotspot/share/runtime/thread.cpp:3729
#12 0x00007ffff697a82d in JNI_CreateJavaVM_inner (vm=0x7ffff5b68e78, penv=0x7ffff5b68e80, args=0x7ffff5b68e20) at /home/ubuntu/jdk/src/hotspot/share/prims/jni.cpp:3935
#13 0x00007ffff697ab47 in JNI_CreateJavaVM (vm=0x7ffff5b68e78, penv=0x7ffff5b68e80, args=0x7ffff5b68e20) at /home/ubuntu/jdk/src/hotspot/share/prims/jni.cpp:4021
#14 0x00007ffff7fba8a2 in InitializeJVM (pvm=0x7ffff5b68e78, penv=0x7ffff5b68e80, ifn=0x7ffff5b68ed0) at /home/ubuntu/jdk/src/java.base/share/native/libjli/java.c:1529
#15 0x00007ffff7fb7453 in JavaMain (_args=0x7ffffffface0) at /home/ubuntu/jdk/src/java.base/share/native/libjli/java.c:414
#16 0x00007ffff7d79609 in start_thread (arg=<optimized out>) at pthread_create.c:477
#17 0x00007ffff7ed5163 in clone () at ../sysdeps/unix/sysv/linux/x86_64/clone.S:95
+ +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/2022/10/24/cpp-flag/index.html b/2022/10/24/cpp-flag/index.html new file mode 100644 index 0000000000..9defaf9b28 --- /dev/null +++ b/2022/10/24/cpp-flag/index.html @@ -0,0 +1,447 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + cpp_flag | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ cpp_flag +

+ + +
+ + + + +
+ + +

背景

编译c++ 时候需要了解编译过程

+

cmake 添加c++ flag

当我们用g++编译一个程序的时候,经常会有些警告或者error。这时候,会弹出对应的警告和error,举例:

+
1
error: unused variable 'productSize' [-Werror,-Wunused-variable]
+ +

你会看到-Werror,-Wunused-variable , 意思是因为这个选项导致error,其实是我有定义了但是没有使用的变量。

+

如果我们想关闭,可以添加-Wno-unused-variable 也就是在原来的报错的-W-xxx 改成-Wno-xxx 即可

+

我们可以在add_compile_options 添加对应的编译flag

+
1
add_compile_options(-Wall -Wextra -pedantic -Werror  -Wno-unused-variable)
+
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/2022/11/09/zookeeper-connetion-loss/index.html b/2022/11/09/zookeeper-connetion-loss/index.html new file mode 100644 index 0000000000..d77299ed3a --- /dev/null +++ b/2022/11/09/zookeeper-connetion-loss/index.html @@ -0,0 +1,446 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + zookeeper connetion loss | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ zookeeper connetion loss +

+ + +
+ + + + +
+ + +

背景

1
2
returned error: Code: 999, e.displayText() = DB::Exception: Cannot allocate block number in ZooKeeper: Coordination::Exception: Connection loss, path: xxx

+ + +

场景

最近偶尔发生clickhouse发生链接丢失的情况

+ +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git "a/2022/11/16/jdk-\345\217\215\346\261\207\347\274\226/index.html" "b/2022/11/16/jdk-\345\217\215\346\261\207\347\274\226/index.html" new file mode 100644 index 0000000000..f9768775ad --- /dev/null +++ "b/2022/11/16/jdk-\345\217\215\346\261\207\347\274\226/index.html" @@ -0,0 +1,471 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + jdk 反汇编 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ jdk 反汇编 +

+ + +
+ + + + +
+ + +

背景

很多时候可以通过java 命令打印反汇编信息

+

相关命令

jvm参数: HotSpot options (with an -XX: prefix on the command line)

+

核心参数是 -XX:+PrintAssembly , 这个参数可以获取对应的反汇编编码

+
1
./java   -Xcomp -XX:+UnlockDiagnosticVMOptions -XX:+PrintAssembly -XX:CompileCommand=compileonly,*com.Hello::testIncr  com.Hello  >> test.txt
+ +

解析参数

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
Thread 2 "java" hit Hardware watchpoint 19: PrintAssembly

Old value = false
New value = true
JVMFlag::write<bool> (this=0x7f1814bcf140 <flagTable+17600>, value=true) at /var/jdk/src/hotspot/share/runtime/flags/jvmFlag.hpp:237
237 }
(gdb) bt
#0 JVMFlag::write<bool> (this=0x7f1814bcf140 <flagTable+17600>, value=true) at /var/jdk/src/hotspot/share/runtime/flags/jvmFlag.hpp:237
#1 0x00007f18137cf984 in TypedFlagAccessImpl<bool, EventBooleanFlagChanged>::check_constraint_and_set (this=0x7f1814bd49c0 <flag_access_bool>, flag=0x7f1814bcf140 <flagTable+17600>, value_addr=0x7f18129e7f54,
origin=JVMFlagOrigin::COMMAND_LINE, verbose=true) at /var/jdk/src/hotspot/share/runtime/flags/jvmFlagAccess.cpp:75
#2 0x00007f18137ce655 in FlagAccessImpl_bool::set_impl (this=0x7f1814bd49c0 <flag_access_bool>, flag=0x7f1814bcf140 <flagTable+17600>, value_addr=0x7f18129e7f54, origin=JVMFlagOrigin::COMMAND_LINE)
at /var/jdk/src/hotspot/share/runtime/flags/jvmFlagAccess.cpp:94
#3 0x00007f18137ce576 in FlagAccessImpl::set (this=0x7f1814bd49c0 <flag_access_bool>, flag=0x7f1814bcf140 <flagTable+17600>, value=0x7f18129e7f54, origin=JVMFlagOrigin::COMMAND_LINE)
at /var/jdk/src/hotspot/share/runtime/flags/jvmFlagAccess.cpp:49
#4 0x00007f18137cc5db in JVMFlagAccess::set_impl (flag=0x7f1814bcf140 <flagTable+17600>, value=0x7f18129e7f54, origin=JVMFlagOrigin::COMMAND_LINE) at /var/jdk/src/hotspot/share/runtime/flags/jvmFlagAccess.cpp:299
#5 0x00007f181306cdf9 in JVMFlagAccess::set<bool, 0> (flag=0x7f1814bcf140 <flagTable+17600>, value=0x7f18129e7f54, origin=JVMFlagOrigin::COMMAND_LINE) at /var/jdk/src/hotspot/share/runtime/flags/jvmFlagAccess.hpp:120
#6 0x00007f181306bd96 in JVMFlagAccess::set_bool (f=0x7f1814bcf140 <flagTable+17600>, v=0x7f18129e7f54, origin=JVMFlagOrigin::COMMAND_LINE) at /var/jdk/src/hotspot/share/runtime/flags/jvmFlagAccess.hpp:133
#7 0x00007f1813060002 in set_bool_flag (flag=0x7f1814bcf140 <flagTable+17600>, value=true, origin=JVMFlagOrigin::COMMAND_LINE) at /var/jdk/src/hotspot/share/runtime/arguments.cpp:825
#8 0x00007f18130607a9 in Arguments::parse_argument (arg=0x55dcf2d093a4 "+PrintAssembly", origin=JVMFlagOrigin::COMMAND_LINE) at /var/jdk/src/hotspot/share/runtime/arguments.cpp:993
#9 0x00007f18130611be in Arguments::process_argument (arg=0x55dcf2d093a4 "+PrintAssembly", ignore_unrecognized=0 '\000', origin=JVMFlagOrigin::COMMAND_LINE) at /var/jdk/src/hotspot/share/runtime/arguments.cpp:1179
#10 0x00007f1813066867 in Arguments::parse_each_vm_init_arg (args=0x7f18129e8d50, patch_mod_javabase=0x7f18129e87fb, origin=JVMFlagOrigin::COMMAND_LINE) at /var/jdk/src/hotspot/share/runtime/arguments.cpp:2972
#11 0x00007f18130639da in Arguments::parse_vm_init_args (vm_options_args=0x7f18129e8878, java_tool_options_args=0x7f18129e88b8, java_options_args=0x7f18129e88f8, cmd_line_args=0x7f18129e8d50)
at /var/jdk/src/hotspot/share/runtime/arguments.cpp:2174
#12 0x00007f1813068a46 in Arguments::parse (initial_cmd_args=0x7f18129e8d50) at /var/jdk/src/hotspot/share/runtime/arguments.cpp:3946
#13 0x00007f1813e5def7 in Threads::create_vm (args=0x7f18129e8d50, canTryAgain=0x7f18129e8c5b) at /var/jdk/src/hotspot/share/runtime/thread.cpp:2734
#14 0x00007f181378343b in JNI_CreateJavaVM_inner (vm=0x7f18129e8da8, penv=0x7f18129e8db0, args=0x7f18129e8d50) at /var/jdk/src/hotspot/share/prims/jni.cpp:3613
#15 0x00007f1813783787 in JNI_CreateJavaVM (vm=0x7f18129e8da8, penv=0x7f18129e8db0, args=0x7f18129e8d50) at /var/jdk/src/hotspot/share/prims/jni.cpp:3701
#16 0x00007f1814efaa6a in InitializeJVM (pvm=0x7f18129e8da8, penv=0x7f18129e8db0, ifn=0x7f18129e8e00) at /var/jdk/src/java.base/share/native/libjli/java.c:1459
#17 0x00007f1814ef75ec in JavaMain (_args=0x7ffc68186870) at /var/jdk/src/java.base/share/native/libjli/java.c:411
#18 0x00007f1814efe5ec in ThreadJavaMain (args=0x7ffc68186870) at /var/jdk/src/java.base/unix/native/libjli/java_md.c:651
#19 0x00007f1814d59b43 in start_thread (arg=<optimized out>) at ./nptl/pthread_create.c:442
+ + + +

堆栈:

+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
#0  Compile::Compile (this=0x7f5eb0b5ea80, ci_env=0x7f5eb0b5f6b0, generator=0x7f5ecfee55f2 <OptoRuntime::new_instance_Type()>, 
stub_function=0x7f5ecfee3d90 <OptoRuntime::new_instance_C(Klass*, JavaThread*)> "\363\017\036\372UH\211\345H\203\354`H\211}\250H\211u\240\350[2$\377H9E\240\017\225\300\204\300t?H\215\005m\241\364",
stub_name=0x7f5ed074e78a "_new_instance_Java", is_fancy_jump=0, pass_tls=true, return_pc=false, directive=0x7f5ec822a050) at /var/jdk/src/hotspot/share/opto/compile.cpp:892
#1 0x00007f5ecfee3c98 in OptoRuntime::generate_stub (env=0x7f5eb0b5f6b0, gen=0x7f5ecfee55f2 <OptoRuntime::new_instance_Type()>,
C_function=0x7f5ecfee3d90 <OptoRuntime::new_instance_C(Klass*, JavaThread*)> "\363\017\036\372UH\211\345H\203\354`H\211}\250H\211u\240\350[2$\377H9E\240\017\225\300\204\300t?H\215\005m\241\364", name=0x7f5ed074e78a "_new_instance_Java",
is_fancy_jump=0, pass_tls=true, return_pc=false) at /var/jdk/src/hotspot/share/opto/runtime.cpp:171
#2 0x00007f5ecfee374d in OptoRuntime::generate (env=0x7f5eb0b5f6b0) at /var/jdk/src/hotspot/share/opto/runtime.cpp:139
#3 0x00007f5ecf48ab83 in C2Compiler::init_c2_runtime () at /var/jdk/src/hotspot/share/opto/c2compiler.cpp:78
#4 0x00007f5ecf48ac07 in C2Compiler::initialize (this=0x7f5ec8342980) at /var/jdk/src/hotspot/share/opto/c2compiler.cpp:91
#5 0x00007f5ecf5c2ab2 in CompileBroker::init_compiler_runtime () at /var/jdk/src/hotspot/share/compiler/compileBroker.cpp:1782
#6 0x00007f5ecf5c3046 in CompileBroker::compiler_thread_loop () at /var/jdk/src/hotspot/share/compiler/compileBroker.cpp:1919
#7 0x00007f5ecf5e5462 in CompilerThread::thread_entry (thread=0x7f5ec8343060, __the_thread__=0x7f5ec8343060) at /var/jdk/src/hotspot/share/compiler/compilerThread.cpp:59
#8 0x00007f5ed00c0009 in JavaThread::thread_main_inner (this=0x7f5ec8343060) at /var/jdk/src/hotspot/share/runtime/thread.cpp:1297
#9 0x00007f5ed00bfe92 in JavaThread::run (this=0x7f5ec8343060) at /var/jdk/src/hotspot/share/runtime/thread.cpp:1280
#10 0x00007f5ed00bd57f in Thread::call_run (this=0x7f5ec8343060) at /var/jdk/src/hotspot/share/runtime/thread.cpp:358
#11 0x00007f5ecfe041e7 in thread_native_entry (thread=0x7f5ec8343060) at /var/jdk/src/hotspot/os/linux/os_linux.cpp:705
#12 0x00007f5ed0fc0b43 in start_thread (arg=<optimized out>) at ./nptl/pthread_create.c:442
#13 0x00007f5ed1051bb4 in clone () at ../sysdeps/unix/sysv/linux/x86_64/clone.S:100
(gdb) b Compile::Compile
+ + +
1
2
3
4
5
6
7
8
9
10
11
12
13
14
(gdb) bt
#0 nmethod::print (this=0x7fd21d591010, st=0x7fd22c000b80) at /var/jdk/src/hotspot/share/code/nmethod.cpp:2518
#1 0x00007fd23498cf10 in nmethod::decode2 (this=0x7fd21d591010, ost=0x7fd22c000b80) at /var/jdk/src/hotspot/share/code/nmethod.cpp:2887
#2 0x00007fd234985e16 in nmethod::print_nmethod (this=0x7fd21d591010, printmethod=true) at /var/jdk/src/hotspot/share/code/nmethod.cpp:962
#3 0x00007fd234985c95 in nmethod::maybe_print_nmethod (this=0x7fd21d591010, directive=0x7fd22c229f20) at /var/jdk/src/hotspot/share/code/nmethod.cpp:935
#4 0x00007fd2341a2a9e in CompileBroker::invoke_compiler_on_method (task=0x7fd22c359c10) at /var/jdk/src/hotspot/share/compiler/compileBroker.cpp:2345
#5 0x00007fd2341a12c1 in CompileBroker::compiler_thread_loop () at /var/jdk/src/hotspot/share/compiler/compileBroker.cpp:1966
#6 0x00007fd2341c3462 in CompilerThread::thread_entry (thread=0x7fd22c344ac0, __the_thread__=0x7fd22c344ac0) at /var/jdk/src/hotspot/share/compiler/compilerThread.cpp:59
#7 0x00007fd234c9e009 in JavaThread::thread_main_inner (this=0x7fd22c344ac0) at /var/jdk/src/hotspot/share/runtime/thread.cpp:1297
#8 0x00007fd234c9de92 in JavaThread::run (this=0x7fd22c344ac0) at /var/jdk/src/hotspot/share/runtime/thread.cpp:1280
#9 0x00007fd234c9b57f in Thread::call_run (this=0x7fd22c344ac0) at /var/jdk/src/hotspot/share/runtime/thread.cpp:358
#10 0x00007fd2349e21e7 in thread_native_entry (thread=0x7fd22c344ac0) at /var/jdk/src/hotspot/os/linux/os_linux.cpp:705
#11 0x00007fd235b9eb43 in start_thread (arg=<optimized out>) at ./nptl/pthread_create.c:442
#12 0x00007fd235c2fbb4 in clone () at ../sysdeps/unix/sysv/linux/x86_64/clone.S:100
+ +

解析指令

+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
(gdb) bt
#0 DirectivesStack::getMatchingDirective (method=..., comp=0x7f4b2c2bef40) at /var/jdk/src/hotspot/share/compiler/compilerDirectives.cpp:670
#1 0x00007f4b34793667 in CompileBroker::compile_method (method=..., osr_bci=-1, comp_level=3, hot_method=..., hot_count=0, compile_reason=CompileTask::Reason_MustBeCompiled, __the_thread__=0x7f4b2c02a5c0)
at /var/jdk/src/hotspot/share/compiler/compileBroker.cpp:1349
#2 0x00007f4b34770655 in CompilationPolicy::compile_if_required (m=..., __the_thread__=0x7f4b2c02a5c0) at /var/jdk/src/hotspot/share/compiler/compilationPolicy.cpp:110
#3 0x00007f4b34ae8f97 in JavaCalls::call_helper (result=0x7f4b33e21750, method=..., args=0x7f4b33e217a0, __the_thread__=0x7f4b2c02a5c0) at /var/jdk/src/hotspot/share/runtime/javaCalls.cpp:359
#4 0x00007f4b34fe0344 in os::os_exception_wrapper (f=0x7f4b34ae8ccc <JavaCalls::call_helper(JavaValue*, methodHandle const&, JavaCallArguments*, JavaThread*)>, value=0x7f4b33e21750, method=..., args=0x7f4b33e217a0, thread=0x7f4b2c02a5c0)
at /var/jdk/src/hotspot/os/linux/os_linux.cpp:4794
#5 0x00007f4b34ae8cc9 in JavaCalls::call (result=0x7f4b33e21750, method=..., args=0x7f4b33e217a0, __the_thread__=0x7f4b2c02a5c0) at /var/jdk/src/hotspot/share/runtime/javaCalls.cpp:330
#6 0x00007f4b34ab6626 in InstanceKlass::call_class_initializer (this=0x80004c5e8, __the_thread__=0x7f4b2c02a5c0) at /var/jdk/src/hotspot/share/oops/instanceKlass.cpp:1519
#7 0x00007f4b34ab50aa in InstanceKlass::initialize_impl (this=0x80004c5e8, __the_thread__=0x7f4b2c02a5c0) at /var/jdk/src/hotspot/share/oops/instanceKlass.cpp:1177
#8 0x00007f4b34ab3adc in InstanceKlass::initialize (this=0x80004c5e8, __the_thread__=0x7f4b2c02a5c0) at /var/jdk/src/hotspot/share/oops/instanceKlass.cpp:796
#9 0x00007f4b352905de in initialize_class (class_name=0x7f4b3114d470, __the_thread__=0x7f4b2c02a5c0) at /var/jdk/src/hotspot/share/runtime/thread.cpp:689
#10 0x00007f4b35296d82 in Threads::initialize_jsr292_core_classes (__the_thread__=0x7f4b2c02a5c0) at /var/jdk/src/hotspot/share/runtime/thread.cpp:2687
#11 0x00007f4b352975e0 in Threads::create_vm (args=0x7f4b33e21d50, canTryAgain=0x7f4b33e21c5b) at /var/jdk/src/hotspot/share/runtime/thread.cpp:2987
#12 0x00007f4b34bbc43b in JNI_CreateJavaVM_inner (vm=0x7f4b33e21da8, penv=0x7f4b33e21db0, args=0x7f4b33e21d50) at /var/jdk/src/hotspot/share/prims/jni.cpp:3613
#13 0x00007f4b34bbc787 in JNI_CreateJavaVM (vm=0x7f4b33e21da8, penv=0x7f4b33e21db0, args=0x7f4b33e21d50) at /var/jdk/src/hotspot/share/prims/jni.cpp:3701
#14 0x00007f4b36333a6a in InitializeJVM (pvm=0x7f4b33e21da8, penv=0x7f4b33e21db0, ifn=0x7f4b33e21e00) at /var/jdk/src/java.base/share/native/libjli/java.c:1459
#15 0x00007f4b363305ec in JavaMain (_args=0x7fff1f7dd300) at /var/jdk/src/java.base/share/native/libjli/java.c:411
#16 0x00007f4b363375ec in ThreadJavaMain (args=0x7fff1f7dd300) at /var/jdk/src/java.base/unix/native/libjli/java_md.c:651
#17 0x00007f4b36192b43 in start_thread (arg=<optimized out>) at ./nptl/pthread_create.c:442
#18 0x00007f4b36223bb4 in clone () at ../sysdeps/unix/sysv/linux/x86_64/clone.S:100
+ +

输出结果

输出结果:

+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35

------------------------ OptoAssembly for Compile_id = 26 -----------------------
#
# void ( )
#
# -- Old rsp -- Framesize: 32 --
#r591 rsp+28: in_preserve
#r590 rsp+24: return address
#r589 rsp+20: in_preserve
#r588 rsp+16: saved fp register
#r587 rsp+12: pad2, stack alignment
#r586 rsp+ 8: pad2, stack alignment
#r585 rsp+ 4: Fixed slot 1
#r584 rsp+ 0: Fixed slot 0
#
000 N1: # out( B1 ) <- in( B1 ) Freq: 1

000 B1: # out( N1 ) <- BLOCK HEAD IS JUNK Freq: 1
000 # stack bang (96 bytes)
pushq rbp # Save rbp
subq rsp, #16 # Create frame

00c movq R10, java/lang/Class:exact * # ptr
016 movl R8, [R10 + #112 (8-bit)] # int ! Field: volatile com/Hello.i
01a MEMBAR-acquire ! (empty encoding)
01a MEMBAR-release ! (empty encoding)
01a incl R8 # int
01d movl [R10 + #112 (8-bit)], R8 # int ! Field: volatile com/Hello.i
021 lock addl [rsp + #0], 0 ! membar_volatile
027 addq rsp, 16 # Destroy frame
popq rbp
cmpq rsp, poll_offset[r15_thread]
ja #safepoint_stub # Safepoint: poll for GC

039 ret
+ + +

相关指令:

+
1
2
3
// jdk 中的函数输出上面的汇编
PhaseOutput::dump_asm_on

+ + +

相关阅读

+ +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/2022/11/21/xid-equal-to-close-xid/index.html b/2022/11/21/xid-equal-to-close-xid/index.html new file mode 100644 index 0000000000..eb95a22ad9 --- /dev/null +++ b/2022/11/21/xid-equal-to-close-xid/index.html @@ -0,0 +1,451 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + xid equal to close_xid | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ xid equal to close_xid +

+ + +
+ + + + +
+ + +

背景

线上环境写入clickhouse的时候出现错误xid equal to close_xid , 只报错一次 ,后续就好了

+
1
returned error: Code: 999, e.displayText() = DB::Exception: Cannot allocate block number in ZooKeeper: Coordination::Exception: xid equal to close_xid (Session expired) 
+ + +

排查问题

看了一下pr , 是19年加的pr ,当xid 等于0xffffffff 的时候就抛出异常,防止出现死锁 .
在异常之后的下一次请求,clickhouse会重新zookpeer,然后重置xid ,保证整个链接正常

+

相关阅读

+ +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/2022/11/29/javac/index.html b/2022/11/29/javac/index.html new file mode 100644 index 0000000000..2b143cae6b --- /dev/null +++ b/2022/11/29/javac/index.html @@ -0,0 +1,470 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + javac | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ javac +

+ + +
+ + + + +
+ + +

背景

    +
  • 了解java 编译过程
  • +
  • 使用java的module调用javac
  • +
+

java的编译命令

在java jdk9 开始,拥有了module 系统 ,jdk里面内部的库也拆分成为不同的module

+

java的编译前端命令是javac ,实际上调用的是jdk.compiler 这个module下面的类com.sun.tools.javac.Main

+

也就是说javac 这个命令和 java --module jdk.compiler/com.sun.tools.javac.Main 这个命令是一致的

+

java前端

列出所有的module

+
1
2
3
4
5
6
7
8

$ java --list-modules
...
jdk.attach@17.0.5
jdk.charsets@17.0.5
jdk.compiler@17.0.5
...

+ +

我的jdk现在是jdk17 , 可以看到其中有一个jdk.compiler 的module , 这个就是java编译器前端

+

使用module 方式调用

先创建一个hello world

+
1
vim com/Hello.java
+

内容如下:

+
1
2
3
4
5
6
7
8
package com;
public class Hello{
public static void main(String [] argc){

System.out.print(argc.length);

}
}
+ +

然后使用以下命令编译:

+
1
java  --module   jdk.compiler/com.sun.tools.javac.Main  com/Hello.java
+ +

获取class 文件:

+
1
2
3
4
$tree com
com
├── Hello.class
└── Hello.java
+ + +

执行这个hello world 的demo:

+
1
2
$ java com.Hello
0
+ +

正常执行

+

相关阅读

+ +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git "a/2022/11/29/springboot-\350\257\267\346\261\202\346\265\201\347\250\213/index.html" "b/2022/11/29/springboot-\350\257\267\346\261\202\346\265\201\347\250\213/index.html" new file mode 100644 index 0000000000..41bcac40dc --- /dev/null +++ "b/2022/11/29/springboot-\350\257\267\346\261\202\346\265\201\347\250\213/index.html" @@ -0,0 +1,451 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + springboot 请求流程 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ springboot 请求流程 +

+ + +
+ + + + +
+ + +

背景

了解java里面spring boot 一个http请求的生命周期

+

tomcat 启动

1
2
3
4
5
6
7
8
9
10
11
12
13
initialize:108, TomcatWebServer (org.springframework.boot.web.embedded.tomcat)
<init>:104, TomcatWebServer (org.springframework.boot.web.embedded.tomcat)
getTomcatWebServer:440, TomcatServletWebServerFactory (org.springframework.boot.web.embedded.tomcat)
getWebServer:193, TomcatServletWebServerFactory (org.springframework.boot.web.embedded.tomcat)
createWebServer:178, ServletWebServerApplicationContext (org.springframework.boot.web.servlet.context)
onRefresh:158, ServletWebServerApplicationContext (org.springframework.boot.web.servlet.context)
refresh:545, AbstractApplicationContext (org.springframework.context.support)
refresh:143, ServletWebServerApplicationContext (org.springframework.boot.web.servlet.context)
refresh:755, SpringApplication (org.springframework.boot)
refresh:747, SpringApplication (org.springframework.boot)
refreshContext:402, SpringApplication (org.springframework.boot)
run:312, SpringApplication (org.springframework.boot)
main:22, Application
+ +

堆栈:

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
getLanguagesAllList:35, CommonController (com.patpat.mms.mdp.base.core.rest.controller)
invoke:-1, CommonController$$FastClassBySpringCGLIB$$2cf69542 (com.patpat.mms.mdp.base.core.rest.controller)
invoke:218, MethodProxy (org.springframework.cglib.proxy)
invokeJoinpoint:779, CglibAopProxy$CglibMethodInvocation (org.springframework.aop.framework)
proceed:163, ReflectiveMethodInvocation (org.springframework.aop.framework)
proceed:750, CglibAopProxy$CglibMethodInvocation (org.springframework.aop.framework)
proceed:88, MethodInvocationProceedingJoinPoint (org.springframework.aop.aspectj)
doAround:27, AbstractLogAspect (com.patpat.marketing.common.aspect.log)
invoke0:-1, NativeMethodAccessorImpl (jdk.internal.reflect)
invoke:62, NativeMethodAccessorImpl (jdk.internal.reflect)
invoke:43, DelegatingMethodAccessorImpl (jdk.internal.reflect)
invoke:566, Method (java.lang.reflect)
invokeAdviceMethodWithGivenArgs:644, AbstractAspectJAdvice (org.springframework.aop.aspectj)
invokeAdviceMethod:633, AbstractAspectJAdvice (org.springframework.aop.aspectj)
invoke:70, AspectJAroundAdvice (org.springframework.aop.aspectj)
proceed:186, ReflectiveMethodInvocation (org.springframework.aop.framework)
proceed:750, CglibAopProxy$CglibMethodInvocation (org.springframework.aop.framework)
invoke:95, ExposeInvocationInterceptor (org.springframework.aop.interceptor)
proceed:186, ReflectiveMethodInvocation (org.springframework.aop.framework)
proceed:750, CglibAopProxy$CglibMethodInvocation (org.springframework.aop.framework)
intercept:692, CglibAopProxy$DynamicAdvisedInterceptor (org.springframework.aop.framework)
getLanguagesAllList:-1, CommonController$$EnhancerBySpringCGLIB$$cea69971 (com.patpat.mms.mdp.base.core.rest.controller)
invoke0:-1, NativeMethodAccessorImpl (jdk.internal.reflect)
invoke:62, NativeMethodAccessorImpl (jdk.internal.reflect)
invoke:43, DelegatingMethodAccessorImpl (jdk.internal.reflect)
invoke:566, Method (java.lang.reflect)
doInvoke:190, InvocableHandlerMethod (org.springframework.web.method.support)
invokeForRequest:138, InvocableHandlerMethod (org.springframework.web.method.support)
invokeAndHandle:105, ServletInvocableHandlerMethod (org.springframework.web.servlet.mvc.method.annotation)
invokeHandlerMethod:878, RequestMappingHandlerAdapter (org.springframework.web.servlet.mvc.method.annotation)
handleInternal:792, RequestMappingHandlerAdapter (org.springframework.web.servlet.mvc.method.annotation)
handle:87, AbstractHandlerMethodAdapter (org.springframework.web.servlet.mvc.method)
doDispatch:1040, DispatcherServlet (org.springframework.web.servlet)
doService:943, DispatcherServlet (org.springframework.web.servlet)
processRequest:1006, FrameworkServlet (org.springframework.web.servlet)
doPost:909, FrameworkServlet (org.springframework.web.servlet)
service:652, HttpServlet (javax.servlet.http)
service:883, FrameworkServlet (org.springframework.web.servlet)
service:733, HttpServlet (javax.servlet.http)
internalDoFilter:227, ApplicationFilterChain (org.apache.catalina.core)
doFilter:162, ApplicationFilterChain (org.apache.catalina.core)
doFilter:53, WsFilter (org.apache.tomcat.websocket.server)
internalDoFilter:189, ApplicationFilterChain (org.apache.catalina.core)
doFilter:162, ApplicationFilterChain (org.apache.catalina.core)
doFilterInternal:100, RequestContextFilter (org.springframework.web.filter)
doFilter:119, OncePerRequestFilter (org.springframework.web.filter)
internalDoFilter:189, ApplicationFilterChain (org.apache.catalina.core)
doFilter:162, ApplicationFilterChain (org.apache.catalina.core)
doFilterInternal:93, FormContentFilter (org.springframework.web.filter)
doFilter:119, OncePerRequestFilter (org.springframework.web.filter)
internalDoFilter:189, ApplicationFilterChain (org.apache.catalina.core)
doFilter:162, ApplicationFilterChain (org.apache.catalina.core)
doFilterInternal:97, WebMvcMetricsFilter (org.springframework.boot.actuate.metrics.web.servlet)
doFilter:119, OncePerRequestFilter (org.springframework.web.filter)
internalDoFilter:189, ApplicationFilterChain (org.apache.catalina.core)
doFilter:162, ApplicationFilterChain (org.apache.catalina.core)
doFilterInternal:201, CharacterEncodingFilter (org.springframework.web.filter)
doFilter:119, OncePerRequestFilter (org.springframework.web.filter)
internalDoFilter:189, ApplicationFilterChain (org.apache.catalina.core)
doFilter:162, ApplicationFilterChain (org.apache.catalina.core)
invoke:202, StandardWrapperValve (org.apache.catalina.core)
invoke:97, StandardContextValve (org.apache.catalina.core)
invoke:542, AuthenticatorBase (org.apache.catalina.authenticator)
invoke:143, StandardHostValve (org.apache.catalina.core)
invoke:92, ErrorReportValve (org.apache.catalina.valves)
invoke:78, StandardEngineValve (org.apache.catalina.core)
service:357, CoyoteAdapter (org.apache.catalina.connector)
service:374, Http11Processor (org.apache.coyote.http11)
process:65, AbstractProcessorLight (org.apache.coyote)
process:893, AbstractProtocol$ConnectionHandler (org.apache.coyote)
doRun:1707, NioEndpoint$SocketProcessor (org.apache.tomcat.util.net)
run:49, SocketProcessorBase (org.apache.tomcat.util.net)
runWorker:1128, ThreadPoolExecutor (java.util.concurrent)
run:628, ThreadPoolExecutor$Worker (java.util.concurrent)
run:61, TaskThread$WrappingRunnable (org.apache.tomcat.util.threads)
run:834, Thread (java.lang)
+

dispatcher servelet

堆栈

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
doService:911, DispatcherServlet (org.springframework.web.servlet)
processRequest:1006, FrameworkServlet (org.springframework.web.servlet)
doPost:909, FrameworkServlet (org.springframework.web.servlet)
service:652, HttpServlet (javax.servlet.http)
service:883, FrameworkServlet (org.springframework.web.servlet)
service:733, HttpServlet (javax.servlet.http)
internalDoFilter:227, ApplicationFilterChain (org.apache.catalina.core)
doFilter:162, ApplicationFilterChain (org.apache.catalina.core)
doFilter:53, WsFilter (org.apache.tomcat.websocket.server)
internalDoFilter:189, ApplicationFilterChain (org.apache.catalina.core)
doFilter:162, ApplicationFilterChain (org.apache.catalina.core)
doFilterInternal:100, RequestContextFilter (org.springframework.web.filter)
doFilter:119, OncePerRequestFilter (org.springframework.web.filter)
internalDoFilter:189, ApplicationFilterChain (org.apache.catalina.core)
doFilter:162, ApplicationFilterChain (org.apache.catalina.core)
doFilterInternal:93, FormContentFilter (org.springframework.web.filter)
doFilter:119, OncePerRequestFilter (org.springframework.web.filter)
internalDoFilter:189, ApplicationFilterChain (org.apache.catalina.core)
doFilter:162, ApplicationFilterChain (org.apache.catalina.core)
doFilterInternal:97, WebMvcMetricsFilter (org.springframework.boot.actuate.metrics.web.servlet)
doFilter:119, OncePerRequestFilter (org.springframework.web.filter)
internalDoFilter:189, ApplicationFilterChain (org.apache.catalina.core)
doFilter:162, ApplicationFilterChain (org.apache.catalina.core)
doFilterInternal:201, CharacterEncodingFilter (org.springframework.web.filter)
doFilter:119, OncePerRequestFilter (org.springframework.web.filter)
internalDoFilter:189, ApplicationFilterChain (org.apache.catalina.core)
doFilter:162, ApplicationFilterChain (org.apache.catalina.core)
invoke:202, StandardWrapperValve (org.apache.catalina.core)
invoke:97, StandardContextValve (org.apache.catalina.core)
invoke:542, AuthenticatorBase (org.apache.catalina.authenticator)
invoke:143, StandardHostValve (org.apache.catalina.core)
invoke:92, ErrorReportValve (org.apache.catalina.valves)
invoke:78, StandardEngineValve (org.apache.catalina.core)
service:357, CoyoteAdapter (org.apache.catalina.connector)
service:374, Http11Processor (org.apache.coyote.http11)
process:65, AbstractProcessorLight (org.apache.coyote)
process:893, AbstractProtocol$ConnectionHandler (org.apache.coyote)
doRun:1707, NioEndpoint$SocketProcessor (org.apache.tomcat.util.net)
run:49, SocketProcessorBase (org.apache.tomcat.util.net)
runWorker:1128, ThreadPoolExecutor (java.util.concurrent)
run:628, ThreadPoolExecutor$Worker (java.util.concurrent)
run:61, TaskThread$WrappingRunnable (org.apache.tomcat.util.threads)
run:834, Thread (java.lang)
+ +

相关阅读

+ +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/2022/11/30/java-assert/index.html b/2022/11/30/java-assert/index.html new file mode 100644 index 0000000000..dc2ea0052d --- /dev/null +++ b/2022/11/30/java-assert/index.html @@ -0,0 +1,441 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + java assert | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ java assert +

+ + +
+ + + + +
+ + +

背景

assert 是glibc 一个函数.
java也有类似功能,断言是可以快速在测试环境发现问题的功能.
在java里面,assert是一个Statement 也就是一个语句

+ +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git "a/2022/11/30/java-\344\275\277\347\224\250lua-script/index.html" "b/2022/11/30/java-\344\275\277\347\224\250lua-script/index.html" new file mode 100644 index 0000000000..5ccc839e12 --- /dev/null +++ "b/2022/11/30/java-\344\275\277\347\224\250lua-script/index.html" @@ -0,0 +1,448 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + java 使用lua script | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + + + + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git "a/2022/11/30/tomcat-\347\274\226\350\257\221/index.html" "b/2022/11/30/tomcat-\347\274\226\350\257\221/index.html" new file mode 100644 index 0000000000..d0214a5933 --- /dev/null +++ "b/2022/11/30/tomcat-\347\274\226\350\257\221/index.html" @@ -0,0 +1,508 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + tomcat 编译 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ tomcat 编译 +

+ + +
+ + + + +
+ + +

背景

了解tomcat生命周期,了解一个http的生命周期

+

步骤

    +
  • 1 拉取tomcat 代码
    1
    git clone https://github.com/apache/tomcat
  • +
+

相关的编译后的安装目录在${tomcat.source}/build.propertiesfile 里面

+
    +
  • 2 安装ant
  • +
+

我的是ubuntu所以直接通过包管理安装

+
1
2
sudo apt install ant

+ +
    +
  • 3 修改复制build.properties 文件
    这里面会有相关的属性,和下载代码相关的路径需要在这里配置
    1
    2
    3
    cp build.properties.default  build.properties


  • +
+

我们需要将调试符号编译时生成,如果是一个java程序,则是javac -g , 也就是在编译的时候添加到javac 中.
在tomcat的源代码中的build.xml 中很容易看到compile.debug 这个环境变量

+
1
2
3
4
5
6
7
8
<javac srcdir="java" destdir="${tomcat.classes}"
debug="${compile.debug}" <--! 这里会有compile.debug 环境变量 ->
deprecation="${compile.deprecation}"
release="${compile.release}"
encoding="ISO-8859-1"
includeAntRuntime="true" >
<!-- Uncomment this to show unchecked warnings:
<compilerarg value="-Xlint:unchecked"/>
+

那么这个环境变量是在哪里控制的呢?
打开 build.properties ,就能看到,所以默认下载下来的tomcat 就是开了-g 选项的,不需要修改

+
1
2
# ----- Build control flags -----
compile.debug=true
+ + +
    +
  • 4 执行构建命令ant
  • +
+
1
ant
+ +

编译好的相关代码会在 , source_code 就是你的源代码

+
1
{source_code}/output/build/bin
+ +

切换目录到{source_code}/output/build/bin

+
1
2
3
4
5
6
7
8
9
./startup.sh 
Using CATALINA_BASE: /home/dai/tomcat/output/build
Using CATALINA_HOME: /home/dai/tomcat/output/build
Using CATALINA_TMPDIR: /home/dai/tomcat/output/build/temp
Using JRE_HOME: /usr
Using CLASSPATH: /home/dai/tomcat/output/build/bin/bootstrap.jar:/home/dai/tomcat/output/build/bin/tomcat-juli.jar
Using CATALINA_OPTS:
Tomcat started.

+ +

查看tomcat 的命令 , 就是以下内容:

+
1
2
3
4
5
6
ps aux | grep tomcat


/usr/bin/java -Djava.util.logging.config.file=/home/dai/tomcat/output/build/conf/logging.properties -Djava.util.logging.manager=org.apache.juli.ClassLoaderLogManager -Djdk.tls.ephemeralDHKeySize=2048 -Djava.protocol.handler.pkgs=org.apache.catalina.webresources -Dorg.apache.catalina.security.SecurityListener.UMASK=0027 --add-opens=java.base/java.lang=ALL-UNNAMED --add-opens=java.base/java.io=ALL-UNNAMED --add-opens=java.base/java.util=ALL-UNNAMED --add-opens=java.base/java.util.concurrent=ALL-UNNAMED --add-opens=java.rmi/sun.rmi.transport=ALL-UNNAMED -classpath /home/dai/tomcat/output/build/bin/bootstrap.jar:/home/dai/tomcat/output/build/bin/tomcat-juli.jar -Dcatalina.base=/home/dai/tomcat/output/build -Dcatalina.home=/home/dai/tomcat/output/build -Djava.io.tmpdir=/home/dai/tomcat/output/build/temp org.apache.catalina.startup.Bootstrap start


+ + +

请求tomcat

在浏览器输入 http://127.0.0.1:8080/

+

debug tomcat

断点到main函数

在上面步骤用ps aux | grep "tomcat" 获取执行的命令 , 然后在前面/usr/bin/java 紧接着的地方加上参数-agentlib:jdwp=transport=dt_socket,server=y,suspend=y,address=8000 ,让程序断点在main函数

+

参数说明:

+
    +
  • suspend 代表是否暂停
  • +
  • address: 指定地址 , 也可以只指定端口,我这里是8000 端口
  • +
  • -agentlib:jdwp=transport 协议 ,一般本地用socket 通讯 , 也可以共享内存什么的 。 我这里是dt_socket
  • +
+
1
2
/usr/bin/java -agentlib:jdwp=transport=dt_socket,server=y,suspend=y,address=8000   -Djava.util.logging.config.file=/home/dai/tomcat/output/build/conf/logging.properties -Djava.util.logging.manager=org.apache.juli.ClassLoaderLogManager -Djdk.tls.ephemeralDHKeySize=2048 -Djava.protocol.handler.pkgs=org.apache.catalina.webresources -Dorg.apache.catalina.security.SecurityListener.UMASK=0027 --add-opens=java.base/java.lang=ALL-UNNAMED --add-opens=java.base/java.io=ALL-UNNAMED --add-opens=java.base/java.util=ALL-UNNAMED --add-opens=java.base/java.util.concurrent=ALL-UNNAMED --add-opens=java.rmi/sun.rmi.transport=ALL-UNNAMED -classpath /home/dai/tomcat/output/build/bin/bootstrap.jar:/home/dai/tomcat/output/build/bin/tomcat-juli.jar -Dcatalina.base=/home/dai/tomcat/output/build -Dcatalina.home=/home/dai/tomcat/output/build -Djava.io.tmpdir=/home/dai/tomcat/output/build/temp org.apache.catalina.startup.Bootstrap start

+ +

jdb 开启调试

1
jdb  -attach  8000 -sourcepath /home/dai/tomcat/java/
+ + +
1
2
3
4
5
6
7
8

### 在 org.apache.catalina.startup.Bootstrap.main 打断点
main[1] stop in org.apache.catalina.startup.Bootstrap.main
Deferring breakpoint org.apache.catalina.startup.Bootstrap.main.
It will be set after the class is loaded.
#### 使用run开始执行
main[1] run

+ + +

使用list 列出代码 然后就会断点到main 函数

+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
Breakpoint hit: "thread=main", org.apache.catalina.startup.Bootstrap.main(), line=442 bci=0
442 synchronized (daemonLock) {

main[1] list
438 * @param args Command line arguments to be processed
439 */
440 public static void main(String args[]) {
441
442 => synchronized (daemonLock) {
443 if (daemon == null) {
444 // Don't set daemon until init() has completed
445 Bootstrap bootstrap = new Bootstrap();
446 try {
447 bootstrap.init();
main[1]
+ + +

servelet 请求路径

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
http-nio-8080-exec-1[1] where
[1] HelloWorldExample.doGet (HelloWorldExample.java:41)
[2] jakarta.servlet.http.HttpServlet.service (HttpServlet.java:705)
[3] jakarta.servlet.http.HttpServlet.service (HttpServlet.java:814)
[4] org.apache.catalina.core.ApplicationFilterChain.internalDoFilter (ApplicationFilterChain.java:223)
[5] org.apache.catalina.core.ApplicationFilterChain.doFilter (ApplicationFilterChain.java:158)
[6] org.apache.tomcat.websocket.server.WsFilter.doFilter (WsFilter.java:53)
[7] org.apache.catalina.core.ApplicationFilterChain.internalDoFilter (ApplicationFilterChain.java:185)
[8] org.apache.catalina.core.ApplicationFilterChain.doFilter (ApplicationFilterChain.java:158)
[9] org.apache.catalina.filters.HttpHeaderSecurityFilter.doFilter (HttpHeaderSecurityFilter.java:126)
[10] org.apache.catalina.core.ApplicationFilterChain.internalDoFilter (ApplicationFilterChain.java:185)
[11] org.apache.catalina.core.ApplicationFilterChain.doFilter (ApplicationFilterChain.java:158)
[12] org.apache.catalina.core.StandardWrapperValve.invoke (StandardWrapperValve.java:177)
[13] org.apache.catalina.core.StandardContextValve.invoke (StandardContextValve.java:97)
[14] org.apache.catalina.authenticator.AuthenticatorBase.invoke (AuthenticatorBase.java:542)
[15] org.apache.catalina.core.StandardHostValve.invoke (StandardHostValve.java:119)
[16] org.apache.catalina.valves.ErrorReportValve.invoke (ErrorReportValve.java:92)
[17] org.apache.catalina.valves.AbstractAccessLogValve.invoke (AbstractAccessLogValve.java:690)
[18] org.apache.catalina.core.StandardEngineValve.invoke (StandardEngineValve.java:78)
[19] org.apache.catalina.connector.CoyoteAdapter.service (CoyoteAdapter.java:357)
[20] org.apache.coyote.http11.Http11Processor.service (Http11Processor.java:400)
[21] org.apache.coyote.AbstractProcessorLight.process (AbstractProcessorLight.java:65)
[22] org.apache.coyote.AbstractProtocol$ConnectionHandler.process (AbstractProtocol.java:859)
[23] org.apache.tomcat.util.net.NioEndpoint$SocketProcessor.doRun (NioEndpoint.java:1,734)
[24] org.apache.tomcat.util.net.SocketProcessorBase.run (SocketProcessorBase.java:52)
[25] org.apache.tomcat.util.threads.ThreadPoolExecutor.runWorker (ThreadPoolExecutor.java:1,191)
[26] org.apache.tomcat.util.threads.ThreadPoolExecutor$Worker.run (ThreadPoolExecutor.java:659)
[27] org.apache.tomcat.util.threads.TaskThread$WrappingRunnable.run (TaskThread.java:61)
[28] java.lang.Thread.run (Thread.java:833)
+ + +

相关阅读

+ +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git "a/2022/12/01/java-\346\226\271\346\263\225\347\255\276\345\220\215/index.html" "b/2022/12/01/java-\346\226\271\346\263\225\347\255\276\345\220\215/index.html" new file mode 100644 index 0000000000..f25255343a --- /dev/null +++ "b/2022/12/01/java-\346\226\271\346\263\225\347\255\276\345\220\215/index.html" @@ -0,0 +1,449 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + java 方法签名 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ java 方法签名 +

+ + +
+ + + + +
+ + +

背景

了解java的签名相关内容

+

javap 查看签名

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
javap -s java.lang.String
Compiled from "String.java"
public final class java.lang.String implements java.io.Serializable, java.lang.Comparable<java.lang.String>, java.lang.CharSequence {
static final boolean COMPACT_STRINGS;
descriptor: Z
public static final java.util.Comparator<java.lang.String> CASE_INSENSITIVE_ORDER;
descriptor: Ljava/util/Comparator;
static final byte LATIN1;
descriptor: B
static final byte UTF16;
descriptor: B
public java.lang.String();
descriptor: ()V



public java.lang.String(byte[], int, int, int);
descriptor: ([BIII)V

public java.lang.String(byte[], int);
descriptor: ([BI)V

public java.lang.String(byte[], int, int, java.lang.String) throws java.io.UnsupportedEncodingException;
descriptor: ([BIILjava/lang/String;)V


}
+ + +

相关阅读

+ +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git "a/2022/12/04/micro-k8s-\344\275\277\347\224\250/index.html" "b/2022/12/04/micro-k8s-\344\275\277\347\224\250/index.html" new file mode 100644 index 0000000000..a3ab252ced --- /dev/null +++ "b/2022/12/04/micro-k8s-\344\275\277\347\224\250/index.html" @@ -0,0 +1,464 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + micro-k8s-使用 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ micro-k8s-使用 +

+ + +
+ + + + +
+ + +

背景

遇到错误his is not a valid name for a Kubernetes node, causing node registration to fail.

+

处理步骤

查看错误:

+
1
$ microk8s inspect
+ + +
1
2
3
WARNING:  This machine's hostname contains capital letters and/or underscores. 
This is not a valid name for a Kubernetes node, causing node registration to fail.
Please change the machine's hostname or refer to the documentation for more details:
+ + +

遇到这个错误的原因是: hostname 不合法

+

如何查自己的hostname ?

+

使用hostname 或者hostnamectl 命令

+
1
2
$ hostname
dai-MS-7B89
+ +
1
2
3
4
5
6
7
8
9
10
11
$ hostnamectl
Static hostname: dai-MS-7B89
Icon name: computer-desktop
Chassis: desktop
Machine ID: d55c62a250474c459bda9aecc21307a7
Boot ID: e97354108f364004a0775ce12cc57d98
Operating System: Ubuntu 22.04 LTS
Kernel: Linux 5.15.0-56-generic
Architecture: x86-64
Hardware Vendor: Micro-Star International Co., Ltd.
Hardware Model: MS-7B89
+ +

我现在的名字是大写还有横杠

+

修改成myhost

+
1
sudo hostnamectl set-hostname  myhost
+ +

相关阅读

+ +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/2022/12/06/java-volalite/index.html b/2022/12/06/java-volalite/index.html new file mode 100644 index 0000000000..608c475641 --- /dev/null +++ b/2022/12/06/java-volalite/index.html @@ -0,0 +1,488 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + java volatile | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ java volatile +

+ + +
+ + + + +
+ + +

背景

了解java的volaite 内容

+

volatile

volatile 在jls有几个描述:

+
    +
  • keyword
  • +
  • filed modified
  • +
+

volatile目的

volatile详细描述在: 8.3.1.4 volatile Fields

+
1
2
3
4
5
6
The Java programming language allows threads to access shared variables (§17.1).
As a rule, to ensure that shared variables are consistently and reliably updated, a
thread should ensure that it has exclusive use of such variables by obtaining a lock
that, conventionally, enforces mutual exclusion for those shared variables.
The Java programming language provides a second mechanism, volatile fields,
that is more convenient than locking for some purposes
+ +

翻译:

+
1
2
3
4
5
在java中,可以由不同的线程共享一个变量.
线程必须通过获得锁来保证共享的变量可以被可靠和一致的更新.
简单来说,就是使用mutual的排他性来获取这个特性
java除了通过锁,还有另外一个机制来访问和更新共享变量,这就是volatile fields.
在某些情况下volatile field会比锁更加方便
+ + +

visibility, ordering and atomicity

同步有三个问题:

+
    +
  • 可见性
  • +
  • 排序
  • +
  • 原子性
  • +
+

jsr-133 里面有三个不正常同步会出现的问题 ,原文如下:

+
1
2
If a program is not correctly synchronized, then three types of problems can appear:
visibility, ordering and atomicity.
+ +

orderging

下面是jsr133 里面给的例子:

+

分别有两个线程:
threadOne 和线程threadTwo

+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
class BadlyOrdered {
boolean a = false;
boolean b = false;

void threadOne() {
a = true;
b = true;
}

boolean threadTwo() {
boolean r1 = b; // sees true
boolean r2 = a; // sees false
return r1 && !r2; // returns true
}
}
+ + +

在网上找了个c++ 版本的cpu指令重排的例子来源

+

命名为 reorder.cpp

+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
#include <pthread.h>
#include <semaphore.h>
#include <stdio.h>

// Set either of these to 1 to prevent CPU reordering
#define USE_CPU_FENCE 0
#define USE_SINGLE_HW_THREAD 0 // Supported on Linux, but not Cygwin or PS3

#if USE_SINGLE_HW_THREAD
#include <sched.h>
#endif


//-------------------------------------
// MersenneTwister
// A thread-safe random number generator with good randomness
// in a small number of instructions. We'll use it to introduce
// random timing delays.
//-------------------------------------
#define MT_IA 397
#define MT_LEN 624

class MersenneTwister
{
unsigned int m_buffer[MT_LEN];
int m_index;

public:
MersenneTwister(unsigned int seed);
// Declare noinline so that the function call acts as a compiler barrier:
unsigned int integer() __attribute__((noinline));
};

MersenneTwister::MersenneTwister(unsigned int seed)
{
// Initialize by filling with the seed, then iterating
// the algorithm a bunch of times to shuffle things up.
for (int i = 0; i < MT_LEN; i++)
m_buffer[i] = seed;
m_index = 0;
for (int i = 0; i < MT_LEN * 100; i++)
integer();
}

unsigned int MersenneTwister::integer()
{
// Indices
int i = m_index;
int i2 = m_index + 1; if (i2 >= MT_LEN) i2 = 0; // wrap-around
int j = m_index + MT_IA; if (j >= MT_LEN) j -= MT_LEN; // wrap-around

// Twist
unsigned int s = (m_buffer[i] & 0x80000000) | (m_buffer[i2] & 0x7fffffff);
unsigned int r = m_buffer[j] ^ (s >> 1) ^ ((s & 1) * 0x9908B0DF);
m_buffer[m_index] = r;
m_index = i2;

// Swizzle
r ^= (r >> 11);
r ^= (r << 7) & 0x9d2c5680UL;
r ^= (r << 15) & 0xefc60000UL;
r ^= (r >> 18);
return r;
}


//-------------------------------------
// Main program, as decribed in the post
//-------------------------------------
sem_t beginSema1;
sem_t beginSema2;
sem_t endSema;

int X, Y;
int r1, r2;

void *thread1Func(void *param)
{
MersenneTwister random(1);
for (;;)
{
sem_wait(&beginSema1); // Wait for signal
while (random.integer() % 8 != 0) {} // Random delay

// ----- THE TRANSACTION! -----
X = 1;
#if USE_CPU_FENCE
asm volatile("mfence" ::: "memory"); // Prevent CPU reordering
#else
asm volatile("" ::: "memory"); // Prevent compiler reordering
#endif
r1 = Y;

sem_post(&endSema); // Notify transaction complete
}
return NULL; // Never returns
};

void *thread2Func(void *param)
{
MersenneTwister random(2);
for (;;)
{
sem_wait(&beginSema2); // Wait for signal
while (random.integer() % 8 != 0) {} // Random delay

// ----- THE TRANSACTION! -----
Y = 1;
#if USE_CPU_FENCE
asm volatile("mfence" ::: "memory"); // Prevent CPU reordering
#else
asm volatile("" ::: "memory"); // Prevent compiler reordering
#endif
r2 = X;

sem_post(&endSema); // Notify transaction complete
}
return NULL; // Never returns
};

int main()
{
// Initialize the semaphores
sem_init(&beginSema1, 0, 0);
sem_init(&beginSema2, 0, 0);
sem_init(&endSema, 0, 0);

// Spawn the threads
pthread_t thread1, thread2;
pthread_create(&thread1, NULL, thread1Func, NULL);
pthread_create(&thread2, NULL, thread2Func, NULL);

#if USE_SINGLE_HW_THREAD
// Force thread affinities to the same cpu core.
cpu_set_t cpus;
CPU_ZERO(&cpus);
CPU_SET(0, &cpus);
pthread_setaffinity_np(thread1, sizeof(cpu_set_t), &cpus);
pthread_setaffinity_np(thread2, sizeof(cpu_set_t), &cpus);
#endif

// Repeat the experiment ad infinitum
int detected = 0;
for (int iterations = 1; ; iterations++)
{
// Reset X and Y
X = 0;
Y = 0;
// Signal both threads
sem_post(&beginSema1);
sem_post(&beginSema2);
// Wait for both threads
sem_wait(&endSema);
sem_wait(&endSema);
// Check if there was a simultaneous reorder
if (r1 == 0 && r2 == 0)
{
detected++;
printf("%d reorders detected after %d iterations\n", detected, iterations);
}
}
return 0; // Never returns
}

+ + +
1
2
3
4
5
## 然后编译
gcc -O2 reorder.cpp -o reorder

## 执行
./reorder
+ + + + + + + + +

相关阅读

+ +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/2022/12/07/java-redis-client/index.html b/2022/12/07/java-redis-client/index.html new file mode 100644 index 0000000000..49507f96f4 --- /dev/null +++ b/2022/12/07/java-redis-client/index.html @@ -0,0 +1,451 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + java redis client | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ java redis client +

+ + +
+ + + + +
+ + +

背景

背景是需要了解java的redis是怎么使用的

+

redission

redisson 是java的一个redis客户端

+

接入spring boot 遇到的问题

spring boot 启动遇到了问题redirection loop detected , 是因为测试环境redis是cluster 模式 , 但是本地配置是单个节点,所以会有问题

+
1
org.springframework.dao.InvalidDataAccessApiUsageException: MOVED redirection loop detected. Node redis://10.2.26.106:6379 has further redirect to redis://xxx:6379; nested exception is org.redisson.client.RedisException: MOVED redirection loop detected. Node redis://10.2.26.106:6379 has further redirect to redis://xxx:6379
+ +

解决方案 : 在配置文件改成cluster 就可以正常get 和set 了

+

相关阅读

+ +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/2022/12/07/spring-boot/index.html b/2022/12/07/spring-boot/index.html new file mode 100644 index 0000000000..cd49897548 --- /dev/null +++ b/2022/12/07/spring-boot/index.html @@ -0,0 +1,486 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + spring boot | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ spring boot +

+ + +
+ + + + +
+ + +

背景

需要做到以下几步:

+
    +
  • 搭建spring boot ,
  • +
  • 使用spring boot
  • +
  • 打包spring boot
  • +
+

开始

下载spring boot demo ,链接在

+
1
https://start.spring.io/
+ +

spring boot download

+

一个可用的例子

+
1
https://start.spring.io/#!type=maven-project&language=java&platformVersion=3.0.0&packaging=jar&jvmVersion=17&groupId=com.example&artifactId=demo&name=demo&description=Demo%20project%20for%20Spring%20Boot&packageName=com.example.demo
+ +

解压

然后下载下来名字叫demo.zip, 然后需要解压

+
1
unzip  demo.zip 
+ +

安装maven

maven 是java的一个包管理工具

+

对于ubuntu 来说 ,使用下面的命令安装maven

+
1
sudo apt install maven
+ +

添加tomcat

pom.xml 添加tomcat相关内容

+
1
2
3
4
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-web</artifactId>
</dependency>
+ +

编译成一个fatjar

使用命令 mvn spring-boot:repackage 编译成一个fat-jar

+
1
mvn package
+ + +

启动jar包

命令为java -jar ./target/demo-0.0.1-SNAPSHOT.jar

+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
$ java -jar ./target/demo-0.0.1-SNAPSHOT.jar 

. ____ _ __ _ _
/\\ / ___'_ __ _ _(_)_ __ __ _ \ \ \ \
( ( )\___ | '_ | '_| | '_ \/ _` | \ \ \ \
\\/ ___)| |_)| | | | | || (_| | ) ) ) )
' |____| .__|_| |_|_| |_\__, | / / / /
=========|_|==============|___/=/_/_/_/
:: Spring Boot :: (v3.0.0)

2022-12-09T00:43:52.343+08:00 INFO 1459280 --- [ main] com.example.demo.DemoApplication : Starting DemoApplication v0.0.1-SNAPSHOT using Java 17.0.5 with PID 1459280 (/home/dai/spring/demo/target/demo-0.0.1-SNAPSHOT.jar started by dai in /home/dai/spring/demo)
2022-12-09T00:43:52.346+08:00 INFO 1459280 --- [ main] com.example.demo.DemoApplication : No active profile set, falling back to 1 default profile: "default"
2022-12-09T00:43:53.153+08:00 INFO 1459280 --- [ main] o.s.b.w.embedded.tomcat.TomcatWebServer : Tomcat initialized with port(s): 8080 (http)
2022-12-09T00:43:53.162+08:00 INFO 1459280 --- [ main] o.apache.catalina.core.StandardService : Starting service [Tomcat]
2022-12-09T00:43:53.163+08:00 INFO 1459280 --- [ main] o.apache.catalina.core.StandardEngine : Starting Servlet engine: [Apache Tomcat/10.1.1]
2022-12-09T00:43:53.232+08:00 INFO 1459280 --- [ main] o.a.c.c.C.[Tomcat].[localhost].[/] : Initializing Spring embedded WebApplicationContext
2022-12-09T00:43:53.234+08:00 INFO 1459280 --- [ main] w.s.c.ServletWebServerApplicationContext : Root WebApplicationContext: initialization completed in 835 ms
2022-12-09T00:43:53.537+08:00 INFO 1459280 --- [ main] o.s.b.w.embedded.tomcat.TomcatWebServer : Tomcat started on port(s): 8080 (http) with context path ''
2022-12-09T00:43:53.549+08:00 INFO 1459280 --- [ main] com.example.demo.DemoApplication : Started DemoApplication in 1.522 seconds (process running for 1.859)

+ +

相关阅读

+ +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/2022/12/10/create-a-maven-plugin/index.html b/2022/12/10/create-a-maven-plugin/index.html new file mode 100644 index 0000000000..a78b582ebd --- /dev/null +++ b/2022/12/10/create-a-maven-plugin/index.html @@ -0,0 +1,455 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + create a maven plugin | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ create a maven plugin +

+ + +
+ + + + +
+ + +

背景

如何创建maven 扩展

+

步骤

使用maven创建一个叫hello-maven-plugin 的插件

+
1
2
3
4
5
mvn archetype:generate \
-DgroupId=sample.plugin \
-DartifactId=hello-maven-plugin \
-DarchetypeGroupId=org.apache.maven.archetypes \
-DarchetypeArtifactId=maven-archetype-plugin
+ + +

构建的tree

+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
$ tree  .
.
└── hello-maven-plugin
├── pom.xml
└── src
├── it
│ ├── settings.xml
│ └── simple-it
│ ├── pom.xml
│ └── verify.groovy
└── main
└── java
└── sample
└── plugin
└── MyMojo.java
+ +

可以看到创建了一个hello-maven-plugin 目录, 其中pom.xml文件

+

这是核心的pom内容:

+
1
2
3
4
5
6
<groupId>sample.plugin</groupId>
<artifactId>hello-maven-plugin</artifactId>
<version>1.0-SNAPSHOT</version>
<packaging>maven-plugin</packaging>

<name>hello-maven-plugin Maven Plugin</name>
+ + + + + +

相关阅读

+ +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git "a/2022/12/12/mockito-\344\275\277\347\224\250/index.html" "b/2022/12/12/mockito-\344\275\277\347\224\250/index.html" new file mode 100644 index 0000000000..862ad6d4ef --- /dev/null +++ "b/2022/12/12/mockito-\344\275\277\347\224\250/index.html" @@ -0,0 +1,443 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + mockito 使用 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ mockito 使用 +

+ + +
+ + + + +
+ + +

背景

我们新的项目使用mockito来mock数据,所以需要学习mockito的使用

+

使用

如何使用?

+

可以去官网https://site.mockito.org/ 查看如何使用

+ +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/2022/12/12/mybatis-dollor-and-sharp/index.html b/2022/12/12/mybatis-dollor-and-sharp/index.html new file mode 100644 index 0000000000..375701891a --- /dev/null +++ b/2022/12/12/mybatis-dollor-and-sharp/index.html @@ -0,0 +1,444 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + mybatis dollor and sharp | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + + + + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git "a/2022/12/15/java-int-overflow-\346\216\242\347\251\266/index.html" "b/2022/12/15/java-int-overflow-\346\216\242\347\251\266/index.html" new file mode 100644 index 0000000000..53e0bfbf08 --- /dev/null +++ "b/2022/12/15/java-int-overflow-\346\216\242\347\251\266/index.html" @@ -0,0 +1,463 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + java int overflow 探究 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ java int overflow 探究 +

+ + +
+ + + + +
+ + +

背景

遇到java int overflow的问题,想了解java的数字类型溢出是怎么处理的

+
1
2
jshell> 2147483647 + 1
$3 ==> -2147483648
+ +

jls确认规则

对于+操作,如果结果溢出会怎么处理?

+

数字类型有两部分:符号位数字位 , 对于溢出的数字,规则如下

+

数字有两部分:

+
    +
  • 符号位 : 符号位和数学上的结果的符号相反
  • +
  • 数字位 : 2进制补码的低位
  • +
+

jls 文档

+
1
2
3
4
If an integer addition overflows, then the result is the low-order bits of the
mathematical sum as represented in some sufficiently large two's-complement
format. If overflow occurs, then the sign of the result is not the same as the sign of
the mathematical sum of the two operand values.
+ +

例子解释

1
2147483647 + 1 
+

这里面 21474836471 都是int 的字面量 , +操作之后会溢出,

+

10进制的值2147483647 对应的16进制是 7fffffff

+

扩展之后的值 2147483647 +1 对应的16进制是 ...000 10000000
然后 2147483647 +1 2的补码 ...111 011111111111111 ,

+

所以:
低位就是 11111111111
符号位: 和之前相反,所以是 1
所以 int 的每一位都是 1 , 所以是 -2147483648

+

相关阅读

+ +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git "a/2022/12/15/spring-boot-\345\237\272\347\241\200/index.html" "b/2022/12/15/spring-boot-\345\237\272\347\241\200/index.html" new file mode 100644 index 0000000000..78a82e91d1 --- /dev/null +++ "b/2022/12/15/spring-boot-\345\237\272\347\241\200/index.html" @@ -0,0 +1,445 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + spring boot 基础 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ spring boot 基础 +

+ + +
+ + + + +
+ + +

背景

常用的spring boot 问题收集

+

注解

@Bean@Component 注解优先使用哪个注入

好像得看代码实现,可能和版本有关

+

bean 和 component 注解优先使用哪个注入

+

@Component 的使用

@Component 挂在类上面 , @Bean 挂在方法里面,@Bean 更加灵活

+

[Component ]https://www.baeldung.com/spring-component-annotation

+ +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/2022/12/16/ConcurrentHashMap-npe/index.html b/2022/12/16/ConcurrentHashMap-npe/index.html new file mode 100644 index 0000000000..40d4333473 --- /dev/null +++ b/2022/12/16/ConcurrentHashMap-npe/index.html @@ -0,0 +1,443 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + ConcurrentHashMap npe | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ ConcurrentHashMap npe +

+ + +
+ + + + +
+ + +

背景

线上遇到ConcurrentHashMap 空指针异常,发现ConcurrentHashMap 不能getput 一个 null的值

+ +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/2022/12/16/java-thread-pool/index.html b/2022/12/16/java-thread-pool/index.html new file mode 100644 index 0000000000..4ff7b2d23d --- /dev/null +++ b/2022/12/16/java-thread-pool/index.html @@ -0,0 +1,445 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + java thread pool | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ java thread pool +

+ + +
+ + + + +
+ + +

背景

了解java 线程池内容

+

线程池

在java中,线程都是调用pthread_create 来生成的线程的 , 但是对于线程池,则是在上面封装的管理类

+

相关阅读

+ +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git "a/2022/12/16/spring-boot-repackage-\345\222\214\345\205\245\345\217\243/index.html" "b/2022/12/16/spring-boot-repackage-\345\222\214\345\205\245\345\217\243/index.html" new file mode 100644 index 0000000000..ab0d4a850c --- /dev/null +++ "b/2022/12/16/spring-boot-repackage-\345\222\214\345\205\245\345\217\243/index.html" @@ -0,0 +1,450 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + spring boot repackage 和入口 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ spring boot repackage 和入口 +

+ + +
+ + + + +
+ + +

背景

了解java打包的过程和入口

+

例子

我工作环境的spring boot jar 包打包后是这样的:

+
1
2
3
4
5
6
7
8
9
10
11
Manifest-Version: 1.0
Created-By: Maven Jar Plugin 3.2.0
Build-Jdk-Spec: 11
Implementation-Title: mdp-biz-engine-rest
Implementation-Version: 3.0.0-SNAPSHOT
Main-Class: org.springframework.boot.loader.JarLauncher
Start-Class: com.xxx.Application
Spring-Boot-Version: 2.3.12.RELEASE
Spring-Boot-Classes: BOOT-INF/classes/
Spring-Boot-Lib: BOOT-INF/lib/
Spring-Boot-Classpath-Index: BOOT-INF/classpath.idx
+ + +

相关阅读

+ +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git "a/2022/12/16/\347\274\226\350\257\221sping-boot/index.html" "b/2022/12/16/\347\274\226\350\257\221sping-boot/index.html" new file mode 100644 index 0000000000..7688028b18 --- /dev/null +++ "b/2022/12/16/\347\274\226\350\257\221sping-boot/index.html" @@ -0,0 +1,461 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 编译sping boot | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ 编译sping boot +

+ + +
+ + + + +
+ + +

背景

编译spring boot

+

流程

github 主页有写怎么编译

+
    +
  • 下载代码
    1
    2
    3
    4
    5
    6
    7
    8
    ## 下载代码
    git clone https://github.com/spring-projects/spring-boot.git
    ## 切换目录
    cd spring-boot
    ## 编译
    ./gradlew


  • +
+

如果下载国外的包比较慢,可以添加代理

+
1
vim build.gradle
+ +

编译好的jar包在哪呢?
在每个子模块的build/libs 里面

+
1
2
3
4
5
$ tree spring-boot-project/spring-boot/build/libs/
spring-boot-project/spring-boot/build/libs/
├── spring-boot-3.0.1-SNAPSHOT.jar
└── spring-boot-3.0.1-SNAPSHOT-sources.jar

+ + +

spring boot 启动

maven 的启动:
spring-boot-project/spring-boot-tools/spring-boot-maven-plugin/src/main/java/org/springframework/boot/maven/AbstractRunMojo.java

+
1
2
3
4
5
6
7
8
9
10
@Override
public void execute() throws MojoExecutionException, MojoFailureException {
if (this.skip) {
getLog().debug("skipping run as per configuration.");
return;
}
String startClass = (this.mainClass != null) ? this.mainClass
: SpringBootApplicationClassFinder.findSingleClass(this.classesDirectory); // 查找main类
run(startClass); // 启动
}
+ + + +

相关阅读

+ +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git "a/2022/12/19/gradle-\344\275\277\347\224\250/index.html" "b/2022/12/19/gradle-\344\275\277\347\224\250/index.html" new file mode 100644 index 0000000000..83ff346836 --- /dev/null +++ "b/2022/12/19/gradle-\344\275\277\347\224\250/index.html" @@ -0,0 +1,469 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + gradle 使用 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ gradle 使用 +

+ + +
+ + + + +
+ + +

背景

spring boot 使用gradle 构建 , 需要了解gradle的使用

+

下载安装

+

这里可以下载

+

配置环境变量

    +
  • windows
  • +
+

解压前文件是

+

gredle zip

+

解压后路径:

+

gradle

+

gradle envirnment

+

测试安装成功

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
$ gradle -version

Welcome to Gradle 7.6!

Here are the highlights of this release:
- Added support for Java 19.
- Introduced `--rerun` flag for individual task rerun.
- Improved dependency block for test suites to be strongly typed.
- Added a pluggable system for Java toolchains provisioning.

For more details see https://docs.gradle.org/7.6/release-notes.html


------------------------------------------------------------
Gradle 7.6
------------------------------------------------------------

Build time: 2022-11-25 13:35:10 UTC
Revision: daece9dbc5b79370cc8e4fd6fe4b2cd400e150a8

Kotlin: 1.7.10
Groovy: 3.0.13
Ant: Apache Ant(TM) version 1.10.11 compiled on July 10 2021
JVM: 11 (Oracle Corporation 11+28)
OS: Windows 10 10.0 amd64
+ + +

第一个gradle 项目

来源文档

+
1
2
$ mkdir demo
$ cd demo
+ +
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
$ gradle init
Starting a Gradle Daemon (subsequent builds will be faster)
<-------------> 0% INITIALIZING [2s]77ms] <-------------> 0% INITIALIZING [783ms]<

Select type of project to generate:
1: basic
2: application
3: library
4: Gradle plugin
Enter selection (default: basic) [1..4] 2

Select implementation language:
1: C++ 2: Groovy [11s]]7s]
3: Java
4: Kotlin
5: Scala
6: Swift
Enter selection (default: Java) [1..6] 3

Split functionality across multiple subprojects?:
1: no - only one application project 2: yes - application and library projects
Enter selection (default: no - only one application project) [1..2] 1

Select build script DSL:
1: Groovy 2: Kotlin
Enter selection (default: Groovy) [1..2] 1

Generate build using new APIs and b
Select test framework:
1: JUnit 4 2: TestNG
3: Spock
4: JUnit Jupiter
Enter selection (default: JUnit Jupiter) [1..4] 1

Project name (default: demo):
Source package (default: demo):

> Task :init EGet more help with your project: https://docs.gradle.org/7.6/samples/sample_building_java_applications.html

BUILD SUCCESSFUL in 1m 13s
2 actionable tasks: 2 executed
+ + +

目录结构

1
2
3
4
5
6
7
8
9
10
11
12
13
├─.gradle
├─app
│ └─src
│ ├─main
│ │ ├─java
│ │ │ └─demo
│ │ └─resources
│ └─test
│ ├─java
│ │ └─demo
│ └─resources
└─gradle
└─wrapper
+ + + +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/2022/12/19/memory-model/index.html b/2022/12/19/memory-model/index.html new file mode 100644 index 0000000000..26c4fd62b2 --- /dev/null +++ b/2022/12/19/memory-model/index.html @@ -0,0 +1,464 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + memory model | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ memory model +

+ + +
+ + + + +
+ + +

背景

了解内存模型

+
    +
  • 编译时防止重排
  • +
+
1
2
3
4
"memory"
The "memory" clobber tells the compiler that the assembly code performs memory reads or writes to items other than those listed in the input and output operands (for example, accessing the memory pointed to by one of the input parameters). To ensure memory contains correct values, GCC may need to flush specific register values to memory before executing the asm. Further, the compiler does not assume that any values read from memory before an asm remain unchanged after that asm; it reloads them as needed. Using the "memory" clobber effectively forms a read/write memory barrier for the compiler.

Note that this clobber does not prevent the processor from doing speculative reads past the asm statement. To prevent that, you need processor-specific fence instructions.
+ + +

false sharding

demo

+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
#include <thread>

alignas(128) volatile int counter[1024]{};

void update(int idx) {
for (int j = 0; j < 100000000; j++) ++counter[idx];
}

static const int stride = SIZE/sizeof(counter[0]);
int main() {
std::thread t1(update, 0*stride);
std::thread t2(update, 1*stride);
std::thread t3(update, 2*stride);
std::thread t4(update, 3*stride);
t1.join();
t2.join();
t3.join();
t4.join();
}
+ +

编译

+
1
2
g++ -DSIZE=64 -pthread -O2 cacheline.c  && perf stat -etask-clock,context-switches,cpu-migrations,cycles -r20 ./a.out

+ + +

相关阅读

+ +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/2022/12/20/java-parser/index.html b/2022/12/20/java-parser/index.html new file mode 100644 index 0000000000..72af53b5d1 --- /dev/null +++ b/2022/12/20/java-parser/index.html @@ -0,0 +1,456 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + java parser | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ java parser +

+ + +
+ + + + +
+ + +

背景

    +
  • 了解java的.java文件的词法分析
  • +
  • 了解java的编译过程
  • +
+

parser

类型检查

+
1
src/jdk.compiler/share/classes/com/sun/tools/javac/comp/Check.java
+

java的的语法词法分析,生成parse树

+
1
src/jdk.compiler/share/classes/com/sun/tools/javac/parser/JavacParser.java
+ + +
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
(gdb) p (char *) buf
$3 = 0x7ffff5980170 "/home/dai/javademo/learn_java/generic/GenericMethod.java:8: error: name clash: sayHi(List<Integer>) and sayHi(List<String>) have the same erasure\n"
(gdb) bt
#0 __GI___libc_write (fd=fd@entry=2, buf=buf@entry=0x7ffff5980170, nbytes=nbytes@entry=146) at ../sysdeps/unix/sysv/linux/write.c:25
#1 0x00007ffff5851e28 in handleWrite (fd=2, buf=buf@entry=0x7ffff5980170, len=len@entry=146) at /home/dai/jdk/src/java.base/unix/native/libjava/io_util_md.c:196
#2 0x00007ffff58518ba in writeBytes (env=0x7ffff00295d0, this=0x7ffff5982290, bytes=<optimized out>, off=0, len=146, append=<optimized out>, fid=0xd08e043)
at /home/dai/jdk/src/java.base/share/native/libjava/io_util.c:189
#3 0x00007ffff584a2ab in Java_java_io_FileOutputStream_writeBytes (env=<optimized out>, this=<optimized out>, bytes=<optimized out>, off=<optimized out>, len=<optimized out>, append=<optimized out>)
at /home/dai/jdk/src/java.base/share/native/libjava/FileOutputStream.c:70
#4 0x00007fffe100f6cb in ?? ()
#5 0x00007ffff7d41000 in ?? ()
#6 0x0000555555581520 in ?? ()
#7 0x00007ffff00292f0 in ?? ()
#8 0x00007fffb4d44e58 in ?? ()
#9 0x00007fffe100f199 in ?? ()
#10 0x00007ffff5982208 in ?? ()
#11 0x00007fffb41f2960 in ?? ()
#12 0x00007ffff5982290 in ?? ()
#13 0x00007fffb41f3b38 in ?? ()
#14 0x0000000000000000 in ?? ()
+ + +

相关阅读

+ +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/2022/12/21/how-to-debug-javac/index.html b/2022/12/21/how-to-debug-javac/index.html new file mode 100644 index 0000000000..3a5f94538a --- /dev/null +++ b/2022/12/21/how-to-debug-javac/index.html @@ -0,0 +1,442 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + how to debug javac | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ how to debug javac +

+ + +
+ + + + +
+ + +

背景

javac 是java的编译器,目前对编译原理非常感兴趣,而且对类型擦除和java的类型系统感兴趣.所以需要调试javac

+

How to do

java9 之后新增了模块module功能.在这之前,javac是单独有个tools.java负载编译的.在java9之后,相关代码在jdk.compiler的module下面

+ +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git "a/2022/12/21/java-\345\212\250\346\200\201\344\273\243\347\220\206/index.html" "b/2022/12/21/java-\345\212\250\346\200\201\344\273\243\347\220\206/index.html" new file mode 100644 index 0000000000..791c3a9187 --- /dev/null +++ "b/2022/12/21/java-\345\212\250\346\200\201\344\273\243\347\220\206/index.html" @@ -0,0 +1,444 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + java 动态代理 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + + + + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/2022/12/22/java-arraycopy/index.html b/2022/12/22/java-arraycopy/index.html new file mode 100644 index 0000000000..d56da8ff9d --- /dev/null +++ b/2022/12/22/java-arraycopy/index.html @@ -0,0 +1,500 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + java arraycopy | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ java arraycopy +

+ + +
+ + + + +
+ + +

背景

了解arraycopy的实现,是浅拷贝还是深拷贝

+

arrays

在了解arraycopy之前,先了解arrays

+

jls里面有很详细的描述

+
1
2
In the Java programming language, arrays are objects (§4.3.1), are dynamically created
An array object contains a number of variables. The number of variables may be zero, in which case the array is said to be empty. The variables contained in an array have no names; instead they are referenced by array access expressions that use non-negative integer index values. These variables are called the components of the array. If an array has n components, we say n is the length of the array; the components of the array are referenced using integer indices from 0 to n - 1, inclusive.
+ +

arrays:

+
    +
  • 类型:Object
  • +
  • arrays对象持有的是变量variables
  • +
+

变量 variables

1
2
3
A variable is a storage location and has an associated type, sometimes called its compile-time type, that is either a primitive type (§4.2) or a reference type (§4.3).

A variable's value is changed by an assignment (§15.26) or by a prefix or postfix ++ (increment) or -- (decrement) operator (§15.14.2, §15.14.3, §15.15.1, §15.15.2).
+ +

variable 由两部分组成:

+
    +
  • type
      +
    • primitive type
    • +
    • reference type
    • +
    +
  • +
  • value
      +
    • primitive value
    • +
    • reference value
    • +
    +
  • +
+
1
2
3
4
4.3.1 Objects
An object is a class instance or an array.
The reference values (often just references) are pointers to these objects, and a
special null reference, which refers to no object.
+ +

jni 实现

src\hotspot\share\oops\objArrayKlass.cpp

+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
void ObjArrayKlass::copy_array(arrayOop s, int src_pos, arrayOop d,
int dst_pos, int length, TRAPS) {
assert(s->is_objArray(), "must be obj array");

if (!d->is_objArray()) {
ResourceMark rm(THREAD);
stringStream ss;
if (d->is_typeArray()) {
ss.print("arraycopy: type mismatch: can not copy object array[] into %s[]",
type2name_tab[ArrayKlass::cast(d->klass())->element_type()]);
} else {
ss.print("arraycopy: destination type %s is not an array", d->klass()->external_name());
}
THROW_MSG(vmSymbols::java_lang_ArrayStoreException(), ss.as_string());
}

// Check is all offsets and lengths are non negative
if (src_pos < 0 || dst_pos < 0 || length < 0) {
// Pass specific exception reason.
ResourceMark rm(THREAD);
stringStream ss;
if (src_pos < 0) {
ss.print("arraycopy: source index %d out of bounds for object array[%d]",
src_pos, s->length());
} else if (dst_pos < 0) {
ss.print("arraycopy: destination index %d out of bounds for object array[%d]",
dst_pos, d->length());
} else {
ss.print("arraycopy: length %d is negative", length);
}
THROW_MSG(vmSymbols::java_lang_ArrayIndexOutOfBoundsException(), ss.as_string());
}
// Check if the ranges are valid
if ((((unsigned int) length + (unsigned int) src_pos) > (unsigned int) s->length()) ||
(((unsigned int) length + (unsigned int) dst_pos) > (unsigned int) d->length())) {
// Pass specific exception reason.
ResourceMark rm(THREAD);
stringStream ss;
if (((unsigned int) length + (unsigned int) src_pos) > (unsigned int) s->length()) {
ss.print("arraycopy: last source index %u out of bounds for object array[%d]",
(unsigned int) length + (unsigned int) src_pos, s->length());
} else {
ss.print("arraycopy: last destination index %u out of bounds for object array[%d]",
(unsigned int) length + (unsigned int) dst_pos, d->length());
}
THROW_MSG(vmSymbols::java_lang_ArrayIndexOutOfBoundsException(), ss.as_string());
}

// Special case. Boundary cases must be checked first
// This allows the following call: copy_array(s, s.length(), d.length(), 0).
// This is correct, since the position is supposed to be an 'in between point', i.e., s.length(),
// points to the right of the last element.
if (length==0) {
return;
}
if (UseCompressedOops) {
size_t src_offset = (size_t) objArrayOopDesc::obj_at_offset<narrowOop>(src_pos);
size_t dst_offset = (size_t) objArrayOopDesc::obj_at_offset<narrowOop>(dst_pos);
assert(arrayOopDesc::obj_offset_to_raw<narrowOop>(s, src_offset, NULL) ==
objArrayOop(s)->obj_at_addr<narrowOop>(src_pos), "sanity");
assert(arrayOopDesc::obj_offset_to_raw<narrowOop>(d, dst_offset, NULL) ==
objArrayOop(d)->obj_at_addr<narrowOop>(dst_pos), "sanity");
do_copy(s, src_offset, d, dst_offset, length, CHECK);
} else {
size_t src_offset = (size_t) objArrayOopDesc::obj_at_offset<oop>(src_pos);
size_t dst_offset = (size_t) objArrayOopDesc::obj_at_offset<oop>(dst_pos);
assert(arrayOopDesc::obj_offset_to_raw<oop>(s, src_offset, NULL) ==
objArrayOop(s)->obj_at_addr<oop>(src_pos), "sanity");
assert(arrayOopDesc::obj_offset_to_raw<oop>(d, dst_offset, NULL) ==
objArrayOop(d)->obj_at_addr<oop>(dst_pos), "sanity");
do_copy(s, src_offset, d, dst_offset, length, CHECK);
}
}
+ +
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
(gdb) bt
#0 Copy::conjoint_oops_atomic (count=6, to=0x62a434ee8, from=0x62a42f220) at /home/dai/jdk/src/hotspot/share/utilities/copy.hpp:164
#1 AccessInternal::arraycopy_conjoint_oops (src=0x62a42f220, dst=0x62a434ee8, length=6) at /home/dai/jdk/src/hotspot/share/oops/accessBackend.cpp:94
#2 0x00007ffff7067317 in RawAccessBarrierArrayCopy::arraycopy<18112614ul, narrowOop> (length=6, dst_raw=<optimized out>, dst_offset_in_bytes=0, dst_obj=..., src_raw=0x62a42f220, src_offset_in_bytes=0,
src_obj=...) at /home/dai/jdk/src/hotspot/share/oops/accessBackend.inline.hpp:270
#3 RawAccessBarrier<18112614ul>::arraycopy<narrowOop> (length=6, dst_raw=0x62a434ee8, dst_offset_in_bytes=0, dst_obj=..., src_raw=<optimized out>, src_offset_in_bytes=0, src_obj=...)
at /home/dai/jdk/src/hotspot/share/oops/accessBackend.inline.hpp:344
#4 RawAccessBarrier<18112614ul>::oop_arraycopy<narrowOop> (length=6, dst_raw=0x62a434ee8, dst_offset_in_bytes=0, dst_obj=..., src_raw=<optimized out>, src_offset_in_bytes=0, src_obj=...)
at /home/dai/jdk/src/hotspot/share/oops/accessBackend.inline.hpp:128
#5 ModRefBarrierSet::AccessBarrier<18112614ul, G1BarrierSet>::oop_arraycopy_in_heap<narrowOop> (length=6, dst_raw=0x62a434ee8, dst_offset_in_bytes=<optimized out>, dst_obj=...,
src_raw=<optimized out>, src_offset_in_bytes=<optimized out>, src_obj=...) at /home/dai/jdk/src/hotspot/share/gc/shared/modRefBarrierSet.inline.hpp:108
#6 AccessInternal::PostRuntimeDispatch<G1BarrierSet::AccessBarrier<18112614ul, G1BarrierSet>, (AccessInternal::BarrierType)8, 18112614ul>::oop_access_barrier<HeapWordImpl*> (src_obj=...,
src_offset_in_bytes=<optimized out>, src_raw=<optimized out>, dst_obj=..., dst_offset_in_bytes=<optimized out>, dst_raw=<optimized out>, length=6)
at /home/dai/jdk/src/hotspot/share/oops/access.inline.hpp:142
#7 0x00007ffff7063f43 in AccessInternal::RuntimeDispatch<18112582ul, HeapWordImpl*, (AccessInternal::BarrierType)8>::arraycopy_init (src_obj=..., src_offset_in_bytes=16, src_raw=0x0, dst_obj=...,
dst_offset_in_bytes=<optimized out>, dst_raw=<optimized out>, length=<optimized out>) at /home/dai/jdk/src/hotspot/share/oops/access.inline.hpp:339
#8 0x00007ffff7061a0e in AccessInternal::RuntimeDispatch<18112582ul, HeapWordImpl*, (AccessInternal::BarrierType)8>::arraycopy (length=<optimized out>, dst_raw=<optimized out>,
dst_offset_in_bytes=<optimized out>, dst_obj=..., src_raw=<optimized out>, src_offset_in_bytes=<optimized out>, src_obj=...) at /home/dai/jdk/src/hotspot/share/oops/accessBackend.hpp:554
#9 AccessInternal::PreRuntimeDispatch::arraycopy<18112582ul, HeapWordImpl*> (length=<optimized out>, dst_raw=<optimized out>, dst_offset_in_bytes=<optimized out>, dst_obj=..., src_raw=<optimized out>,
src_offset_in_bytes=<optimized out>, src_obj=...) at /home/dai/jdk/src/hotspot/share/oops/accessBackend.hpp:907
#10 AccessInternal::arraycopy_reduce_types<18112580ul> (length=<optimized out>, dst_raw=<optimized out>, dst_offset_in_bytes=<optimized out>, dst_obj=..., src_raw=<optimized out>,
src_offset_in_bytes=<optimized out>, src_obj=...) at /home/dai/jdk/src/hotspot/share/oops/accessBackend.hpp:1054
#11 AccessInternal::arraycopy<18087940ul, HeapWordImpl*> (length=<optimized out>, dst_raw=<optimized out>, dst_offset_in_bytes=<optimized out>, dst_obj=..., src_raw=<optimized out>,
src_offset_in_bytes=<optimized out>, src_obj=...) at /home/dai/jdk/src/hotspot/share/oops/accessBackend.hpp:1208
#12 Access<18087936ul>::oop_arraycopy<HeapWordImpl*> (length=<optimized out>, dst_raw=<optimized out>, dst_offset_in_bytes=<optimized out>, dst_obj=..., src_raw=<optimized out>,
src_offset_in_bytes=<optimized out>, src_obj=...) at /home/dai/jdk/src/hotspot/share/oops/access.hpp:137
#13 ArrayAccess<16777216ul>::oop_arraycopy (length=<optimized out>, dst_offset_in_bytes=<optimized out>, dst_obj=..., src_offset_in_bytes=<optimized out>, src_obj=...)
at /home/dai/jdk/src/hotspot/share/oops/access.hpp:323
#14 ObjArrayKlass::do_copy (this=this@entry=0x800058a00, s=..., src_offset=src_offset@entry=16, d=..., dst_offset=dst_offset@entry=16, length=length@entry=6, __the_thread__=0x7ffff0028f20)
at /home/dai/jdk/src/hotspot/share/oops/objArrayKlass.cpp:213
#15 0x00007ffff7062e33 in ObjArrayKlass::copy_array (this=0x800058a00, s=..., src_pos=<optimized out>, d=..., dst_pos=<optimized out>, length=6, __the_thread__=0x7ffff0028f20)
at /home/dai/jdk/src/hotspot/share/oops/oopsHierarchy.hpp:85
#16 0x00007ffff6b2d75f in JVM_ArrayCopy (env=<optimized out>, ignored=<optimized out>, src=<optimized out>, src_pos=0, dst=<optimized out>, dst_pos=0, length=6)
at /home/dai/jdk/src/hotspot/share/prims/jvm.cpp:298

+ + +

x86的汇编代码如下src/hotspot/os_cpu/linux_x86/linux_x86_64.S

+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
        # Support for void Copy::arrayof_conjoint_jlongs(jlong* from,
# jlong* to,
# size_t count)
# Equivalent to
# conjoint_jlongs_atomic
# arrayof_conjoint_oops
# conjoint_oops_atomic
#
# rdi - from
# rsi - to
# rdx - count, treated as ssize_t
#
.p2align 4,,15
.type _Copy_arrayof_conjoint_jlongs,@function
.type _Copy_conjoint_jlongs_atomic,@function
_Copy_arrayof_conjoint_jlongs:
_Copy_conjoint_jlongs_atomic:
cmpq %rdi,%rsi
leaq -8(%rdi,%rdx,8),%rax # from + count*8 - 8
jbe acl_CopyRight
cmpq %rax,%rsi
jbe acl_CopyLeft
acl_CopyRight:
leaq -8(%rsi,%rdx,8),%rcx # to + count*8 - 8
negq %rdx
jmp 3f
1: movq 8(%rax,%rdx,8),%rsi
movq %rsi,8(%rcx,%rdx,8)
addq $1,%rdx
jnz 1b
ret
.p2align 4,,15
2: movq -24(%rax,%rdx,8),%rsi
movq %rsi,-24(%rcx,%rdx,8)
movq -16(%rax,%rdx,8),%rsi
movq %rsi,-16(%rcx,%rdx,8)
movq -8(%rax,%rdx,8),%rsi
movq %rsi,-8(%rcx,%rdx,8)
movq (%rax,%rdx,8),%rsi
movq %rsi,(%rcx,%rdx,8)
3: addq $4,%rdx
jle 2b
subq $4,%rdx
jl 1b
ret
4: movq -8(%rdi,%rdx,8),%rcx
movq %rcx,-8(%rsi,%rdx,8)
subq $1,%rdx
jnz 4b
ret
.p2align 4,,15
5: movq 24(%rdi,%rdx,8),%rcx
movq %rcx,24(%rsi,%rdx,8)
movq 16(%rdi,%rdx,8),%rcx
movq %rcx,16(%rsi,%rdx,8)
movq 8(%rdi,%rdx,8),%rcx
movq %rcx,8(%rsi,%rdx,8)
movq (%rdi,%rdx,8),%rcx
movq %rcx,(%rsi,%rdx,8)
acl_CopyLeft:
subq $4,%rdx
jge 5b
addq $4,%rdx
jg 4b
ret
+ +

这里的文件jz jne 是相对段的偏移 , 然后试试编译成字节码之后看效果

+

这里的as是将汇编代码编译成二进制代码

+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
$ as src/hotspot/os_cpu/linux_x86/linux_x86_64.S
$ objdump -S a.out

a.out: file format elf64-x86-64


Disassembly of section .text:

0000000000000000 <SpinPause>:
0: f3 90 pause
2: 48 c7 c0 01 00 00 00 mov $0x1,%rax
9: c3 ret
a: 66 0f 1f 44 00 00 nopw 0x0(%rax,%rax,1)

0000000000000010 <_Copy_arrayof_conjoint_bytes>:
10: 49 89 d0 mov %rdx,%r8
13: 48 c1 ea 03 shr $0x3,%rdx
17: 48 39 fe cmp %rdi,%rsi
1a: 4a 8d 44 07 ff lea -0x1(%rdi,%r8,1),%rax
1f: 76 09 jbe 2a <acb_CopyRight>
21: 48 39 c6 cmp %rax,%rsi
24: 0f 86 9e 00 00 00 jbe c8 <acb_CopyLeft>

000000000000002a <acb_CopyRight>:
2a: 48 8d 44 d7 f8 lea -0x8(%rdi,%rdx,8),%rax
2f: 48 8d 4c d6 f8 lea -0x8(%rsi,%rdx,8),%rcx
34: 48 f7 da neg %rdx
37: eb 7d jmp b6 <acb_CopyRight+0x8c>
39: 0f 1f 80 00 00 00 00 nopl 0x0(%rax)
40: 48 8b 74 d0 08 mov 0x8(%rax,%rdx,8),%rsi
45: 48 89 74 d1 08 mov %rsi,0x8(%rcx,%rdx,8)
4a: 48 83 c2 01 add $0x1,%rdx
4e: 75 f0 jne 40 <acb_CopyRight+0x16>
50: 49 f7 c0 04 00 00 00 test $0x4,%r8
57: 74 0e je 67 <acb_CopyRight+0x3d>
59: 8b 70 08 mov 0x8(%rax),%esi
5c: 89 71 08 mov %esi,0x8(%rcx)
5f: 48 83 c0 04 add $0x4,%rax
63: 48 83 c1 04 add $0x4,%rcx
67: 49 f7 c0 02 00 00 00 test $0x2,%r8
6e: 74 0c je 7c <acb_CopyRight+0x52>
70: 66 8b 70 08 mov 0x8(%rax),%si
74: 66 89 71 08 mov %si,0x8(%rcx)
78: 48 83 c1 02 add $0x2,%rcx
7c: 49 f7 c0 01 00 00 00 test $0x1,%r8
83: 74 08 je 8d <acb_CopyRight+0x63>
85: 42 8a 44 07 ff mov -0x1(%rdi,%r8,1),%al
8a: 88 41 08 mov %al,0x8(%rcx)
8d: c3 ret
8e: 66 90 xchg %ax,%ax
90: 48 8b 74 d0 e8 mov -0x18(%rax,%rdx,8),%rsi
95: 48 89 74 d1 e8 mov %rsi,-0x18(%rcx,%rdx,8)
9a: 48 8b 74 d0 f0 mov -0x10(%rax,%rdx,8),%rsi
9f: 48 89 74 d1 f0 mov %rsi,-0x10(%rcx,%rdx,8)
a4: 48 8b 74 d0 f8 mov -0x8(%rax,%rdx,8),%rsi
a9: 48 89 74 d1 f8 mov %rsi,-0x8(%rcx,%rdx,8)
ae: 48 8b 34 d0 mov (%rax,%rdx,8),%rsi
b2: 48 89 34 d1 mov %rsi,(%rcx,%rdx,8)
b6: 48 83 c2 04 add $0x4,%rdx
ba: 7e d4 jle 90 <acb_CopyRight+0x66>
bc: 48 83 ea 04 sub $0x4,%rdx
c0: 0f 8c 7a ff ff ff jl 40 <acb_CopyRight+0x16>
c6: eb 88 jmp 50 <acb_CopyRight+0x26>

00000000000000c8 <acb_CopyLeft>:
c8: 49 f7 c0 01 00 00 00 test $0x1,%r8
cf: 74 0e je df <acb_CopyLeft+0x17>
d1: 42 8a 4c 07 ff mov -0x1(%rdi,%r8,1),%cl
d6: 42 88 4c 06 ff mov %cl,-0x1(%rsi,%r8,1)
db: 49 83 e8 01 sub $0x1,%r8
df: 49 f7 c0 02 00 00 00 test $0x2,%r8
e6: 74 0c je f4 <acb_CopyLeft+0x2c>
e8: 66 42 8b 4c 07 fe mov -0x2(%rdi,%r8,1),%cx
ee: 66 42 89 4c 06 fe mov %cx,-0x2(%rsi,%r8,1)
f4: 49 f7 c0 04 00 00 00 test $0x4,%r8
fb: 74 59 je 156 <acb_CopyLeft+0x8e>
fd: 8b 0c d7 mov (%rdi,%rdx,8),%ecx
100: 89 0c d6 mov %ecx,(%rsi,%rdx,8)
103: eb 51 jmp 156 <acb_CopyLeft+0x8e>
105: 66 66 2e 0f 1f 84 00 data16 cs nopw 0x0(%rax,%rax,1)
10c: 00 00 00 00
110: 48 8b 4c d7 f8 mov -0x8(%rdi,%rdx,8),%rcx
115: 48 89 4c d6 f8 mov %rcx,-0x8(%rsi,%rdx,8)
11a: 48 83 ea 01 sub $0x1,%rdx
11e: 75 f0 jne 110 <acb_CopyLeft+0x48>
120: c3 ret
121: 66 66 2e 0f 1f 84 00 data16 cs nopw 0x0(%rax,%rax,1)
128: 00 00 00 00
12c: 0f 1f 40 00 nopl 0x0(%rax)
130: 48 8b 4c d7 18 mov 0x18(%rdi,%rdx,8),%rcx
135: 48 89 4c d6 18 mov %rcx,0x18(%rsi,%rdx,8)
13a: 48 8b 4c d7 10 mov 0x10(%rdi,%rdx,8),%rcx
13f: 48 89 4c d6 10 mov %rcx,0x10(%rsi,%rdx,8)
144: 48 8b 4c d7 08 mov 0x8(%rdi,%rdx,8),%rcx
149: 48 89 4c d6 08 mov %rcx,0x8(%rsi,%rdx,8)
14e: 48 8b 0c d7 mov (%rdi,%rdx,8),%rcx
152: 48 89 0c d6 mov %rcx,(%rsi,%rdx,8)
156: 48 83 ea 04 sub $0x4,%rdx
15a: 7d d4 jge 130 <acb_CopyLeft+0x68>
15c: 48 83 c2 04 add $0x4,%rdx
160: 7f ae jg 110 <acb_CopyLeft+0x48>
162: c3 ret
163: 66 66 2e 0f 1f 84 00 data16 cs nopw 0x0(%rax,%rax,1)
16a: 00 00 00 00
16e: 66 90 xchg %ax,%ax

0000000000000170 <_Copy_arrayof_conjoint_jshorts>:
170: 49 89 d0 mov %rdx,%r8
173: 48 c1 ea 02 shr $0x2,%rdx
177: 48 39 fe cmp %rdi,%rsi
17a: 4a 8d 44 47 fe lea -0x2(%rdi,%r8,2),%rax
17f: 76 05 jbe 186 <acs_CopyRight>
181: 48 39 c6 cmp %rax,%rsi
184: 76 7e jbe 204 <acs_CopyLeft>

0000000000000186 <acs_CopyRight>:
186: 48 8d 44 d7 f8 lea -0x8(%rdi,%rdx,8),%rax
18b: 48 8d 4c d6 f8 lea -0x8(%rsi,%rdx,8),%rcx
190: 48 f7 da neg %rdx
193: eb 61 jmp 1f6 <acs_CopyRight+0x70>
195: 48 8b 74 d0 08 mov 0x8(%rax,%rdx,8),%rsi
19a: 48 89 74 d1 08 mov %rsi,0x8(%rcx,%rdx,8)
19f: 48 83 c2 01 add $0x1,%rdx
1a3: 75 f0 jne 195 <acs_CopyRight+0xf>
1a5: 49 f7 c0 02 00 00 00 test $0x2,%r8
1ac: 74 0a je 1b8 <acs_CopyRight+0x32>
1ae: 8b 70 08 mov 0x8(%rax),%esi
1b1: 89 71 08 mov %esi,0x8(%rcx)
1b4: 48 83 c1 04 add $0x4,%rcx
1b8: 49 f7 c0 01 00 00 00 test $0x1,%r8
1bf: 74 0a je 1cb <acs_CopyRight+0x45>
1c1: 66 42 8b 74 47 fe mov -0x2(%rdi,%r8,2),%si
1c7: 66 89 71 08 mov %si,0x8(%rcx)
1cb: c3 ret
1cc: 0f 1f 40 00 nopl 0x0(%rax)
1d0: 48 8b 74 d0 e8 mov -0x18(%rax,%rdx,8),%rsi
1d5: 48 89 74 d1 e8 mov %rsi,-0x18(%rcx,%rdx,8)
1da: 48 8b 74 d0 f0 mov -0x10(%rax,%rdx,8),%rsi
1df: 48 89 74 d1 f0 mov %rsi,-0x10(%rcx,%rdx,8)
1e4: 48 8b 74 d0 f8 mov -0x8(%rax,%rdx,8),%rsi
1e9: 48 89 74 d1 f8 mov %rsi,-0x8(%rcx,%rdx,8)
1ee: 48 8b 34 d0 mov (%rax,%rdx,8),%rsi
1f2: 48 89 34 d1 mov %rsi,(%rcx,%rdx,8)
1f6: 48 83 c2 04 add $0x4,%rdx
1fa: 7e d4 jle 1d0 <acs_CopyRight+0x4a>
1fc: 48 83 ea 04 sub $0x4,%rdx
200: 7c 93 jl 195 <acs_CopyRight+0xf>
202: eb a1 jmp 1a5 <acs_CopyRight+0x1f>

0000000000000204 <acs_CopyLeft>:
204: 49 f7 c0 01 00 00 00 test $0x1,%r8
20b: 74 0c je 219 <acs_CopyLeft+0x15>
20d: 66 42 8b 4c 47 fe mov -0x2(%rdi,%r8,2),%cx
213: 66 42 89 4c 46 fe mov %cx,-0x2(%rsi,%r8,2)
219: 49 f7 c0 02 00 00 00 test $0x2,%r8
220: 74 44 je 266 <acs_CopyLeft+0x62>
222: 8b 0c d7 mov (%rdi,%rdx,8),%ecx
225: 89 0c d6 mov %ecx,(%rsi,%rdx,8)
228: eb 3c jmp 266 <acs_CopyLeft+0x62>
22a: 48 8b 4c d7 f8 mov -0x8(%rdi,%rdx,8),%rcx
22f: 48 89 4c d6 f8 mov %rcx,-0x8(%rsi,%rdx,8)
234: 48 83 ea 01 sub $0x1,%rdx
238: 75 f0 jne 22a <acs_CopyLeft+0x26>
23a: c3 ret
23b: 0f 1f 44 00 00 nopl 0x0(%rax,%rax,1)
240: 48 8b 4c d7 18 mov 0x18(%rdi,%rdx,8),%rcx
245: 48 89 4c d6 18 mov %rcx,0x18(%rsi,%rdx,8)
24a: 48 8b 4c d7 10 mov 0x10(%rdi,%rdx,8),%rcx
24f: 48 89 4c d6 10 mov %rcx,0x10(%rsi,%rdx,8)
254: 48 8b 4c d7 08 mov 0x8(%rdi,%rdx,8),%rcx
259: 48 89 4c d6 08 mov %rcx,0x8(%rsi,%rdx,8)
25e: 48 8b 0c d7 mov (%rdi,%rdx,8),%rcx
262: 48 89 0c d6 mov %rcx,(%rsi,%rdx,8)
266: 48 83 ea 04 sub $0x4,%rdx
26a: 7d d4 jge 240 <acs_CopyLeft+0x3c>
26c: 48 83 c2 04 add $0x4,%rdx
270: 7f b8 jg 22a <acs_CopyLeft+0x26>
272: c3 ret
273: 66 66 2e 0f 1f 84 00 data16 cs nopw 0x0(%rax,%rax,1)
27a: 00 00 00 00
27e: 66 90 xchg %ax,%ax

0000000000000280 <_Copy_arrayof_conjoint_jints>:
280: 49 89 d0 mov %rdx,%r8
283: 48 d1 ea shr %rdx
286: 48 39 fe cmp %rdi,%rsi
289: 4a 8d 44 87 fc lea -0x4(%rdi,%r8,4),%rax
28e: 76 05 jbe 295 <aci_CopyRight>
290: 48 39 c6 cmp %rax,%rsi
293: 76 6f jbe 304 <aci_CopyLeft>

0000000000000295 <aci_CopyRight>:
295: 48 8d 44 d7 f8 lea -0x8(%rdi,%rdx,8),%rax
29a: 48 8d 4c d6 f8 lea -0x8(%rsi,%rdx,8),%rcx
29f: 48 f7 da neg %rdx
2a2: eb 52 jmp 2f6 <aci_CopyRight+0x61>
2a4: 66 66 2e 0f 1f 84 00 data16 cs nopw 0x0(%rax,%rax,1)
2ab: 00 00 00 00
2af: 90 nop
2b0: 48 8b 74 d0 08 mov 0x8(%rax,%rdx,8),%rsi
2b5: 48 89 74 d1 08 mov %rsi,0x8(%rcx,%rdx,8)
2ba: 48 83 c2 01 add $0x1,%rdx
2be: 75 f0 jne 2b0 <aci_CopyRight+0x1b>
2c0: 49 f7 c0 01 00 00 00 test $0x1,%r8
2c7: 74 06 je 2cf <aci_CopyRight+0x3a>
2c9: 8b 70 08 mov 0x8(%rax),%esi
2cc: 89 71 08 mov %esi,0x8(%rcx)
2cf: c3 ret
2d0: 48 8b 74 d0 e8 mov -0x18(%rax,%rdx,8),%rsi
2d5: 48 89 74 d1 e8 mov %rsi,-0x18(%rcx,%rdx,8)
2da: 48 8b 74 d0 f0 mov -0x10(%rax,%rdx,8),%rsi
2df: 48 89 74 d1 f0 mov %rsi,-0x10(%rcx,%rdx,8)
2e4: 48 8b 74 d0 f8 mov -0x8(%rax,%rdx,8),%rsi
2e9: 48 89 74 d1 f8 mov %rsi,-0x8(%rcx,%rdx,8)
2ee: 48 8b 34 d0 mov (%rax,%rdx,8),%rsi
2f2: 48 89 34 d1 mov %rsi,(%rcx,%rdx,8)
2f6: 48 83 c2 04 add $0x4,%rdx
2fa: 7e d4 jle 2d0 <aci_CopyRight+0x3b>
2fc: 48 83 ea 04 sub $0x4,%rdx
300: 7c ae jl 2b0 <aci_CopyRight+0x1b>
302: eb bc jmp 2c0 <aci_CopyRight+0x2b>

0000000000000304 <aci_CopyLeft>:
304: 49 f7 c0 01 00 00 00 test $0x1,%r8
30b: 74 49 je 356 <aci_CopyLeft+0x52>
30d: 42 8b 4c 87 fc mov -0x4(%rdi,%r8,4),%ecx
312: 42 89 4c 86 fc mov %ecx,-0x4(%rsi,%r8,4)
317: eb 3d jmp 356 <aci_CopyLeft+0x52>
319: 48 8b 4c d7 f8 mov -0x8(%rdi,%rdx,8),%rcx
31e: 48 89 4c d6 f8 mov %rcx,-0x8(%rsi,%rdx,8)
323: 48 83 ea 01 sub $0x1,%rdx
327: 75 f0 jne 319 <aci_CopyLeft+0x15>
329: c3 ret
32a: 66 0f 1f 44 00 00 nopw 0x0(%rax,%rax,1)
330: 48 8b 4c d7 18 mov 0x18(%rdi,%rdx,8),%rcx
335: 48 89 4c d6 18 mov %rcx,0x18(%rsi,%rdx,8)
33a: 48 8b 4c d7 10 mov 0x10(%rdi,%rdx,8),%rcx
33f: 48 89 4c d6 10 mov %rcx,0x10(%rsi,%rdx,8)
344: 48 8b 4c d7 08 mov 0x8(%rdi,%rdx,8),%rcx
349: 48 89 4c d6 08 mov %rcx,0x8(%rsi,%rdx,8)
34e: 48 8b 0c d7 mov (%rdi,%rdx,8),%rcx
352: 48 89 0c d6 mov %rcx,(%rsi,%rdx,8)
356: 48 83 ea 04 sub $0x4,%rdx
35a: 7d d4 jge 330 <aci_CopyLeft+0x2c>
35c: 48 83 c2 04 add $0x4,%rdx
360: 7f b7 jg 319 <aci_CopyLeft+0x15>
362: c3 ret
363: 66 66 2e 0f 1f 84 00 data16 cs nopw 0x0(%rax,%rax,1)
36a: 00 00 00 00
36e: 66 90 xchg %ax,%ax

0000000000000370 <_Copy_arrayof_conjoint_jlongs>:
370: 48 39 fe cmp %rdi,%rsi
373: 48 8d 44 d7 f8 lea -0x8(%rdi,%rdx,8),%rax
378: 76 09 jbe 383 <acl_CopyRight>
37a: 48 39 c6 cmp %rax,%rsi
37d: 0f 86 93 00 00 00 jbe 416 <acl_CopyLeft>

0000000000000383 <acl_CopyRight>:
383: 48 8d 4c d6 f8 lea -0x8(%rsi,%rdx,8),%rcx
388: 48 f7 da neg %rdx
38b: eb 39 jmp 3c6 <acl_CopyRight+0x43>
38d: 48 8b 74 d0 08 mov 0x8(%rax,%rdx,8),%rsi
392: 48 89 74 d1 08 mov %rsi,0x8(%rcx,%rdx,8)
397: 48 83 c2 01 add $0x1,%rdx
39b: 75 f0 jne 38d <acl_CopyRight+0xa>
39d: c3 ret
39e: 66 90 xchg %ax,%ax
3a0: 48 8b 74 d0 e8 mov -0x18(%rax,%rdx,8),%rsi
3a5: 48 89 74 d1 e8 mov %rsi,-0x18(%rcx,%rdx,8)
3aa: 48 8b 74 d0 f0 mov -0x10(%rax,%rdx,8),%rsi
3af: 48 89 74 d1 f0 mov %rsi,-0x10(%rcx,%rdx,8)
3b4: 48 8b 74 d0 f8 mov -0x8(%rax,%rdx,8),%rsi
3b9: 48 89 74 d1 f8 mov %rsi,-0x8(%rcx,%rdx,8)
3be: 48 8b 34 d0 mov (%rax,%rdx,8),%rsi
3c2: 48 89 34 d1 mov %rsi,(%rcx,%rdx,8)
3c6: 48 83 c2 04 add $0x4,%rdx
3ca: 7e d4 jle 3a0 <acl_CopyRight+0x1d>
3cc: 48 83 ea 04 sub $0x4,%rdx
3d0: 7c bb jl 38d <acl_CopyRight+0xa>
3d2: c3 ret
3d3: 48 8b 4c d7 f8 mov -0x8(%rdi,%rdx,8),%rcx
3d8: 48 89 4c d6 f8 mov %rcx,-0x8(%rsi,%rdx,8)
3dd: 48 83 ea 01 sub $0x1,%rdx
3e1: 75 f0 jne 3d3 <acl_CopyRight+0x50>
3e3: c3 ret
3e4: 66 66 2e 0f 1f 84 00 data16 cs nopw 0x0(%rax,%rax,1)
3eb: 00 00 00 00
3ef: 90 nop
3f0: 48 8b 4c d7 18 mov 0x18(%rdi,%rdx,8),%rcx
3f5: 48 89 4c d6 18 mov %rcx,0x18(%rsi,%rdx,8)
3fa: 48 8b 4c d7 10 mov 0x10(%rdi,%rdx,8),%rcx
3ff: 48 89 4c d6 10 mov %rcx,0x10(%rsi,%rdx,8)
404: 48 8b 4c d7 08 mov 0x8(%rdi,%rdx,8),%rcx
409: 48 89 4c d6 08 mov %rcx,0x8(%rsi,%rdx,8)
40e: 48 8b 0c d7 mov (%rdi,%rdx,8),%rcx
412: 48 89 0c d6 mov %rcx,(%rsi,%rdx,8)

0000000000000416 <acl_CopyLeft>:
416: 48 83 ea 04 sub $0x4,%rdx
41a: 7d d4 jge 3f0 <acl_CopyRight+0x6d>
41c: 48 83 c2 04 add $0x4,%rdx
420: 7f b1 jg 3d3 <acl_CopyRight+0x50>
422: c3 ret
+ + +

总结

arrays 包含一堆变量.变量有两种值:primitive值 , reference值.

+

所以copyarrays复制的是变量,也就是复制的是引用

+
1
Copies an array from the specified source array, beginning at the specified position, to the specified position of the destination array. A subsequence of array components are copied from the source array referenced by src to the destination array referenced by dest. The number of components copied is equal to the length argument. The components at positions srcPos through srcPos+length-1 in the source array are copied into positions destPos through destPos+length-1, respectively, of the destination array.
+ + + + + + + + + + +

相关阅读

+ +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git "a/2022/12/22/nacos-\350\216\267\345\217\226\351\205\215\347\275\256/index.html" "b/2022/12/22/nacos-\350\216\267\345\217\226\351\205\215\347\275\256/index.html" new file mode 100644 index 0000000000..37c5912340 --- /dev/null +++ "b/2022/12/22/nacos-\350\216\267\345\217\226\351\205\215\347\275\256/index.html" @@ -0,0 +1,444 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + nacos 获取配置 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ nacos 获取配置 +

+ + +
+ + + + +
+ + +

背景

nacos 是一个服务注册/发现中间件

+

获取配置的堆栈

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
run:744, ClientWorker$ConfigRpcTransportClient$4 (com.alibaba.nacos.client.config.impl)
call:515, Executors$RunnableAdapter (java.util.concurrent)
run$$$capture:264, FutureTask (java.util.concurrent)
run:-1, FutureTask (java.util.concurrent)
- Async stack trace
<init>:151, FutureTask (java.util.concurrent)
<init>:215, ScheduledThreadPoolExecutor$ScheduledFutureTask (java.util.concurrent)
schedule:561, ScheduledThreadPoolExecutor (java.util.concurrent)
startInternal:739, ClientWorker$ConfigRpcTransportClient (com.alibaba.nacos.client.config.impl)
start:255, ConfigTransportClient (com.alibaba.nacos.client.config.impl)
<init>:472, ClientWorker (com.alibaba.nacos.client.config.impl)
<init>:81, NacosConfigService (com.alibaba.nacos.client.config)
newInstance0:-2, NativeConstructorAccessorImpl (jdk.internal.reflect)
newInstance:62, NativeConstructorAccessorImpl (jdk.internal.reflect)
newInstance:45, DelegatingConstructorAccessorImpl (jdk.internal.reflect)
newInstance:490, Constructor (java.lang.reflect)
createConfigService:43, ConfigFactory (com.alibaba.nacos.api.config)
createConfigService:44, NacosFactory (com.alibaba.nacos.api)
createConfigService:55, NacosConfigManager (com.alibaba.cloud.nacos)
<init>:43, NacosConfigManager (com.alibaba.cloud.nacos)
nacosConfigManager:43, NacosConfigBootstrapConfiguration (com.alibaba.cloud.nacos)
invoke0:-2, NativeMethodAccessorImpl (jdk.internal.reflect)
invoke:62, NativeMethodAccessorImpl (jdk.internal.reflect)
invoke:43, DelegatingMethodAccessorImpl (jdk.internal.reflect)
invoke:566, Method (java.lang.reflect)
instantiate:154, SimpleInstantiationStrategy (org.springframework.beans.factory.support)
instantiate:652, ConstructorResolver (org.springframework.beans.factory.support)
instantiateUsingFactoryMethod:637, ConstructorResolver (org.springframework.beans.factory.support)
instantiateUsingFactoryMethod:1341, AbstractAutowireCapableBeanFactory (org.springframework.beans.factory.support)
createBeanInstance:1181, AbstractAutowireCapableBeanFactory (org.springframework.beans.factory.support)
doCreateBean:556, AbstractAutowireCapableBeanFactory (org.springframework.beans.factory.support)
createBean:516, AbstractAutowireCapableBeanFactory (org.springframework.beans.factory.support)
lambda$doGetBean$0:324, AbstractBeanFactory (org.springframework.beans.factory.support)
getSingleton:234, DefaultSingletonBeanRegistry (org.springframework.beans.factory.support)
doGetBean:322, AbstractBeanFactory (org.springframework.beans.factory.support)
getBean:202, AbstractBeanFactory (org.springframework.beans.factory.support)
resolveCandidate:276, DependencyDescriptor (org.springframework.beans.factory.config)
doResolveDependency:1307, DefaultListableBeanFactory (org.springframework.beans.factory.support)
resolveDependency:1227, DefaultListableBeanFactory (org.springframework.beans.factory.support)
resolveAutowiredArgument:886, ConstructorResolver (org.springframework.beans.factory.support)
createArgumentArray:790, ConstructorResolver (org.springframework.beans.factory.support)
instantiateUsingFactoryMethod:540, ConstructorResolver (org.springframework.beans.factory.support)
instantiateUsingFactoryMethod:1341, AbstractAutowireCapableBeanFactory (org.springframework.beans.factory.support)
createBeanInstance:1181, AbstractAutowireCapableBeanFactory (org.springframework.beans.factory.support)
doCreateBean:556, AbstractAutowireCapableBeanFactory (org.springframework.beans.factory.support)
createBean:516, AbstractAutowireCapableBeanFactory (org.springframework.beans.factory.support)
lambda$doGetBean$0:324, AbstractBeanFactory (org.springframework.beans.factory.support)
getSingleton:234, DefaultSingletonBeanRegistry (org.springframework.beans.factory.support)
doGetBean:322, AbstractBeanFactory (org.springframework.beans.factory.support)
getBean:202, AbstractBeanFactory (org.springframework.beans.factory.support)
resolveCandidate:276, DependencyDescriptor (org.springframework.beans.factory.config)
addCandidateEntry:1525, DefaultListableBeanFactory (org.springframework.beans.factory.support)
findAutowireCandidates:1489, DefaultListableBeanFactory (org.springframework.beans.factory.support)
resolveMultipleBeans:1378, DefaultListableBeanFactory (org.springframework.beans.factory.support)
doResolveDependency:1265, DefaultListableBeanFactory (org.springframework.beans.factory.support)
resolveDependency:1227, DefaultListableBeanFactory (org.springframework.beans.factory.support)
resolveFieldValue:657, AutowiredAnnotationBeanPostProcessor$AutowiredFieldElement (org.springframework.beans.factory.annotation)
inject:640, AutowiredAnnotationBeanPostProcessor$AutowiredFieldElement (org.springframework.beans.factory.annotation)
inject:119, InjectionMetadata (org.springframework.beans.factory.annotation)
postProcessProperties:399, AutowiredAnnotationBeanPostProcessor (org.springframework.beans.factory.annotation)
populateBean:1425, AbstractAutowireCapableBeanFactory (org.springframework.beans.factory.support)
doCreateBean:593, AbstractAutowireCapableBeanFactory (org.springframework.beans.factory.support)
createBean:516, AbstractAutowireCapableBeanFactory (org.springframework.beans.factory.support)
lambda$doGetBean$0:324, AbstractBeanFactory (org.springframework.beans.factory.support)
getSingleton:234, DefaultSingletonBeanRegistry (org.springframework.beans.factory.support)
doGetBean:322, AbstractBeanFactory (org.springframework.beans.factory.support)
getBean:202, AbstractBeanFactory (org.springframework.beans.factory.support)
preInstantiateSingletons:897, DefaultListableBeanFactory (org.springframework.beans.factory.support)
finishBeanFactoryInitialization:879, AbstractApplicationContext (org.springframework.context.support)
refresh:551, AbstractApplicationContext (org.springframework.context.support)
refresh:755, SpringApplication (org.springframework.boot)
refresh:747, SpringApplication (org.springframework.boot)
refreshContext:402, SpringApplication (org.springframework.boot)
run:312, SpringApplication (org.springframework.boot)
run:140, SpringApplicationBuilder (org.springframework.boot.builder)
bootstrapServiceContext:212, BootstrapApplicationListener (org.springframework.cloud.bootstrap)
onApplicationEvent:117, BootstrapApplicationListener (org.springframework.cloud.bootstrap)
onApplicationEvent:74, BootstrapApplicationListener (org.springframework.cloud.bootstrap)
doInvokeListener:172, SimpleApplicationEventMulticaster (org.springframework.context.event)
invokeListener:165, SimpleApplicationEventMulticaster (org.springframework.context.event)
multicastEvent:139, SimpleApplicationEventMulticaster (org.springframework.context.event)
multicastEvent:127, SimpleApplicationEventMulticaster (org.springframework.context.event)
environmentPrepared:80, EventPublishingRunListener (org.springframework.boot.context.event)
environmentPrepared:53, SpringApplicationRunListeners (org.springframework.boot)
prepareEnvironment:342, SpringApplication (org.springframework.boot)
run:307, SpringApplication (org.springframework.boot)
loadContext:120, SpringBootContextLoader (org.springframework.boot.test.context)
loadContextInternal:99, DefaultCacheAwareContextLoaderDelegate (org.springframework.test.context.cache)
loadContext:124, DefaultCacheAwareContextLoaderDelegate (org.springframework.test.context.cache)
getApplicationContext:123, DefaultTestContext (org.springframework.test.context.support)
setUpRequestContextIfNecessary:190, ServletTestExecutionListener (org.springframework.test.context.web)
prepareTestInstance:132, ServletTestExecutionListener (org.springframework.test.context.web)
prepareTestInstance:244, TestContextManager (org.springframework.test.context)
createTest:227, SpringJUnit4ClassRunner (org.springframework.test.context.junit4)
runReflectiveCall:289, SpringJUnit4ClassRunner$1 (org.springframework.test.context.junit4)
run:12, ReflectiveCallable (org.junit.internal.runners.model)
methodBlock:291, SpringJUnit4ClassRunner (org.springframework.test.context.junit4)
runChild:246, SpringJUnit4ClassRunner (org.springframework.test.context.junit4)
runChild:97, SpringJUnit4ClassRunner (org.springframework.test.context.junit4)
run:331, ParentRunner$4 (org.junit.runners)
schedule:79, ParentRunner$1 (org.junit.runners)
runChildren:329, ParentRunner (org.junit.runners)
access$100:66, ParentRunner (org.junit.runners)
evaluate:293, ParentRunner$2 (org.junit.runners)
evaluate:61, RunBeforeTestClassCallbacks (org.springframework.test.context.junit4.statements)
evaluate:70, RunAfterTestClassCallbacks (org.springframework.test.context.junit4.statements)
evaluate:306, ParentRunner$3 (org.junit.runners)
run:413, ParentRunner (org.junit.runners)
run:190, SpringJUnit4ClassRunner (org.springframework.test.context.junit4)
run:137, JUnitCore (org.junit.runner)
startRunnerWithArgs:69, JUnit4IdeaTestRunner (com.intellij.junit4)
execute:38, IdeaTestRunner$Repeater$1 (com.intellij.rt.junit)
repeat:11, TestsRepeater (com.intellij.rt.execution.junit)
startRunnerWithArgs:35, IdeaTestRunner$Repeater (com.intellij.rt.junit)
prepareStreamsAndStart:235, JUnitStarter (com.intellij.rt.junit)
main:54, JUnitStarter (com.intellij.rt.junit)
+ +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/2022/12/22/rabbitmq-spring-boot/index.html b/2022/12/22/rabbitmq-spring-boot/index.html new file mode 100644 index 0000000000..6dff3affcd --- /dev/null +++ b/2022/12/22/rabbitmq-spring-boot/index.html @@ -0,0 +1,458 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + rabbitmq spring boot | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ rabbitmq spring boot +

+ + +
+ + + + +
+ + +

背景

了解springboot 的配置怎么加载的,了解spring-boot怎么读取rabbitmq配置的

+

堆栈

spring boot 设置配置

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
configure:40, SimpleRabbitListenerContainerFactoryConfigurer (org.springframework.boot.autoconfigure.amqp)
simpleRabbitListenerContainerFactory:81, RabbitAnnotationDrivenConfiguration (org.springframework.boot.autoconfigure.amqp)
invoke0:-1, NativeMethodAccessorImpl (jdk.internal.reflect)
invoke:62, NativeMethodAccessorImpl (jdk.internal.reflect)
invoke:43, DelegatingMethodAccessorImpl (jdk.internal.reflect)
invoke:566, Method (java.lang.reflect)
instantiate:154, SimpleInstantiationStrategy (org.springframework.beans.factory.support)
instantiate:652, ConstructorResolver (org.springframework.beans.factory.support)
instantiateUsingFactoryMethod:637, ConstructorResolver (org.springframework.beans.factory.support)
instantiateUsingFactoryMethod:1341, AbstractAutowireCapableBeanFactory (org.springframework.beans.factory.support)
createBeanInstance:1181, AbstractAutowireCapableBeanFactory (org.springframework.beans.factory.support)
doCreateBean:556, AbstractAutowireCapableBeanFactory (org.springframework.beans.factory.support)
createBean:516, AbstractAutowireCapableBeanFactory (org.springframework.beans.factory.support)
lambda$doGetBean$0:324, AbstractBeanFactory (org.springframework.beans.factory.support)
getObject:-1, 1555928242 (org.springframework.beans.factory.support.AbstractBeanFactory$$Lambda$290)
getSingleton:234, DefaultSingletonBeanRegistry (org.springframework.beans.factory.support)
doGetBean:322, AbstractBeanFactory (org.springframework.beans.factory.support)
getBean:202, AbstractBeanFactory (org.springframework.beans.factory.support)
preInstantiateSingletons:897, DefaultListableBeanFactory (org.springframework.beans.factory.support)
finishBeanFactoryInitialization:879, AbstractApplicationContext (org.springframework.context.support)
refresh:551, AbstractApplicationContext (org.springframework.context.support)
refresh:755, SpringApplication (org.springframework.boot)
refresh:747, SpringApplication (org.springframework.boot)
refreshContext:402, SpringApplication (org.springframework.boot)
run:312, SpringApplication (org.springframework.boot)
loadContext:120, SpringBootContextLoader (org.springframework.boot.test.context)
loadContextInternal:99, DefaultCacheAwareContextLoaderDelegate (org.springframework.test.context.cache)
loadContext:124, DefaultCacheAwareContextLoaderDelegate (org.springframework.test.context.cache)
getApplicationContext:123, DefaultTestContext (org.springframework.test.context.support)
setUpRequestContextIfNecessary:190, ServletTestExecutionListener (org.springframework.test.context.web)
prepareTestInstance:132, ServletTestExecutionListener (org.springframework.test.context.web)
prepareTestInstance:244, TestContextManager (org.springframework.test.context)
createTest:227, SpringJUnit4ClassRunner (org.springframework.test.context.junit4)
runReflectiveCall:289, SpringJUnit4ClassRunner$1 (org.springframework.test.context.junit4)
run:12, ReflectiveCallable (org.junit.internal.runners.model)
methodBlock:291, SpringJUnit4ClassRunner (org.springframework.test.context.junit4)
runChild:246, SpringJUnit4ClassRunner (org.springframework.test.context.junit4)
runChild:97, SpringJUnit4ClassRunner (org.springframework.test.context.junit4)
run:331, ParentRunner$4 (org.junit.runners)
schedule:79, ParentRunner$1 (org.junit.runners)
runChildren:329, ParentRunner (org.junit.runners)
access$100:66, ParentRunner (org.junit.runners)
evaluate:293, ParentRunner$2 (org.junit.runners)
evaluate:61, RunBeforeTestClassCallbacks (org.springframework.test.context.junit4.statements)
evaluate:70, RunAfterTestClassCallbacks (org.springframework.test.context.junit4.statements)
evaluate:306, ParentRunner$3 (org.junit.runners)
run:413, ParentRunner (org.junit.runners)
run:190, SpringJUnit4ClassRunner (org.springframework.test.context.junit4)
run:137, JUnitCore (org.junit.runner)
startRunnerWithArgs:69, JUnit4IdeaTestRunner (com.intellij.junit4)
execute:38, IdeaTestRunner$Repeater$1 (com.intellij.rt.junit)
repeat:11, TestsRepeater (com.intellij.rt.execution.junit)
startRunnerWithArgs:35, IdeaTestRunner$Repeater (com.intellij.rt.junit)
prepareStreamsAndStart:235, JUnitStarter (com.intellij.rt.junit)
main:54, JUnitStarter (com.intellij.rt.junit)
+ + +

设置属性

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
configure:40, SimpleRabbitListenerContainerFactoryConfigurer (org.springframework.boot.autoconfigure.amqp)
simpleRabbitListenerContainerFactory:81, RabbitAnnotationDrivenConfiguration (org.springframework.boot.autoconfigure.amqp)
invoke0:-1, NativeMethodAccessorImpl (jdk.internal.reflect)
invoke:62, NativeMethodAccessorImpl (jdk.internal.reflect)
invoke:43, DelegatingMethodAccessorImpl (jdk.internal.reflect)
invoke:566, Method (java.lang.reflect)
instantiate:154, SimpleInstantiationStrategy (org.springframework.beans.factory.support)
instantiate:652, ConstructorResolver (org.springframework.beans.factory.support)
instantiateUsingFactoryMethod:637, ConstructorResolver (org.springframework.beans.factory.support)
instantiateUsingFactoryMethod:1341, AbstractAutowireCapableBeanFactory (org.springframework.beans.factory.support)
createBeanInstance:1181, AbstractAutowireCapableBeanFactory (org.springframework.beans.factory.support)
doCreateBean:556, AbstractAutowireCapableBeanFactory (org.springframework.beans.factory.support)
createBean:516, AbstractAutowireCapableBeanFactory (org.springframework.beans.factory.support)
lambda$doGetBean$0:324, AbstractBeanFactory (org.springframework.beans.factory.support)
getObject:-1, 622043416 (org.springframework.beans.factory.support.AbstractBeanFactory$$Lambda$290)
getSingleton:234, DefaultSingletonBeanRegistry (org.springframework.beans.factory.support)
doGetBean:322, AbstractBeanFactory (org.springframework.beans.factory.support)
getBean:202, AbstractBeanFactory (org.springframework.beans.factory.support)
preInstantiateSingletons:897, DefaultListableBeanFactory (org.springframework.beans.factory.support)
finishBeanFactoryInitialization:879, AbstractApplicationContext (org.springframework.context.support)
refresh:551, AbstractApplicationContext (org.springframework.context.support)
refresh:755, SpringApplication (org.springframework.boot)
refresh:747, SpringApplication (org.springframework.boot)
refreshContext:402, SpringApplication (org.springframework.boot)
run:312, SpringApplication (org.springframework.boot)
loadContext:120, SpringBootContextLoader (org.springframework.boot.test.context)
loadContextInternal:99, DefaultCacheAwareContextLoaderDelegate (org.springframework.test.context.cache)
loadContext:124, DefaultCacheAwareContextLoaderDelegate (org.springframework.test.context.cache)
getApplicationContext:123, DefaultTestContext (org.springframework.test.context.support)
setUpRequestContextIfNecessary:190, ServletTestExecutionListener (org.springframework.test.context.web)
prepareTestInstance:132, ServletTestExecutionListener (org.springframework.test.context.web)
prepareTestInstance:244, TestContextManager (org.springframework.test.context)
createTest:227, SpringJUnit4ClassRunner (org.springframework.test.context.junit4)
runReflectiveCall:289, SpringJUnit4ClassRunner$1 (org.springframework.test.context.junit4)
run:12, ReflectiveCallable (org.junit.internal.runners.model)
methodBlock:291, SpringJUnit4ClassRunner (org.springframework.test.context.junit4)
runChild:246, SpringJUnit4ClassRunner (org.springframework.test.context.junit4)
runChild:97, SpringJUnit4ClassRunner (org.springframework.test.context.junit4)
run:331, ParentRunner$4 (org.junit.runners)
schedule:79, ParentRunner$1 (org.junit.runners)
runChildren:329, ParentRunner (org.junit.runners)
access$100:66, ParentRunner (org.junit.runners)
evaluate:293, ParentRunner$2 (org.junit.runners)
evaluate:61, RunBeforeTestClassCallbacks (org.springframework.test.context.junit4.statements)
evaluate:70, RunAfterTestClassCallbacks (org.springframework.test.context.junit4.statements)
evaluate:306, ParentRunner$3 (org.junit.runners)
run:413, ParentRunner (org.junit.runners)
run:190, SpringJUnit4ClassRunner (org.springframework.test.context.junit4)
run:137, JUnitCore (org.junit.runner)
startRunnerWithArgs:69, JUnit4IdeaTestRunner (com.intellij.junit4)
execute:38, IdeaTestRunner$Repeater$1 (com.intellij.rt.junit)
repeat:11, TestsRepeater (com.intellij.rt.execution.junit)
startRunnerWithArgs:35, IdeaTestRunner$Repeater (com.intellij.rt.junit)
prepareStreamsAndStart:235, JUnitStarter (com.intellij.rt.junit)
main:54, JUnitStarter (com.intellij.rt.junit)
+ + +

rabbitmq 配置注入

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
<init>:57, RabbitAnnotationDrivenConfiguration (org.springframework.boot.autoconfigure.amqp)
newInstance0:-1, NativeConstructorAccessorImpl (jdk.internal.reflect)
newInstance:62, NativeConstructorAccessorImpl (jdk.internal.reflect)
newInstance:45, DelegatingConstructorAccessorImpl (jdk.internal.reflect)
newInstance:490, Constructor (java.lang.reflect)
instantiateClass:204, BeanUtils (org.springframework.beans)
instantiate:117, SimpleInstantiationStrategy (org.springframework.beans.factory.support)
instantiate:310, ConstructorResolver (org.springframework.beans.factory.support)
autowireConstructor:295, ConstructorResolver (org.springframework.beans.factory.support)
autowireConstructor:1361, AbstractAutowireCapableBeanFactory (org.springframework.beans.factory.support)
createBeanInstance:1208, AbstractAutowireCapableBeanFactory (org.springframework.beans.factory.support)
doCreateBean:556, AbstractAutowireCapableBeanFactory (org.springframework.beans.factory.support)
createBean:516, AbstractAutowireCapableBeanFactory (org.springframework.beans.factory.support)
lambda$doGetBean$0:324, AbstractBeanFactory (org.springframework.beans.factory.support)
getObject:-1, 809260538 (org.springframework.beans.factory.support.AbstractBeanFactory$$Lambda$290)
getSingleton:234, DefaultSingletonBeanRegistry (org.springframework.beans.factory.support)
doGetBean:322, AbstractBeanFactory (org.springframework.beans.factory.support)
getBean:202, AbstractBeanFactory (org.springframework.beans.factory.support)
preInstantiateSingletons:897, DefaultListableBeanFactory (org.springframework.beans.factory.support)
finishBeanFactoryInitialization:879, AbstractApplicationContext (org.springframework.context.support)
refresh:551, AbstractApplicationContext (org.springframework.context.support)
refresh:755, SpringApplication (org.springframework.boot)
refresh:747, SpringApplication (org.springframework.boot)
refreshContext:402, SpringApplication (org.springframework.boot)
run:312, SpringApplication (org.springframework.boot)
loadContext:120, SpringBootContextLoader (org.springframework.boot.test.context)
loadContextInternal:99, DefaultCacheAwareContextLoaderDelegate (org.springframework.test.context.cache)
loadContext:124, DefaultCacheAwareContextLoaderDelegate (org.springframework.test.context.cache)
getApplicationContext:123, DefaultTestContext (org.springframework.test.context.support)
setUpRequestContextIfNecessary:190, ServletTestExecutionListener (org.springframework.test.context.web)
prepareTestInstance:132, ServletTestExecutionListener (org.springframework.test.context.web)
prepareTestInstance:244, TestContextManager (org.springframework.test.context)
createTest:227, SpringJUnit4ClassRunner (org.springframework.test.context.junit4)
runReflectiveCall:289, SpringJUnit4ClassRunner$1 (org.springframework.test.context.junit4)
run:12, ReflectiveCallable (org.junit.internal.runners.model)
methodBlock:291, SpringJUnit4ClassRunner (org.springframework.test.context.junit4)
runChild:246, SpringJUnit4ClassRunner (org.springframework.test.context.junit4)
runChild:97, SpringJUnit4ClassRunner (org.springframework.test.context.junit4)
run:331, ParentRunner$4 (org.junit.runners)
schedule:79, ParentRunner$1 (org.junit.runners)
runChildren:329, ParentRunner (org.junit.runners)
access$100:66, ParentRunner (org.junit.runners)
evaluate:293, ParentRunner$2 (org.junit.runners)
evaluate:61, RunBeforeTestClassCallbacks (org.springframework.test.context.junit4.statements)
evaluate:70, RunAfterTestClassCallbacks (org.springframework.test.context.junit4.statements)
evaluate:306, ParentRunner$3 (org.junit.runners)
run:413, ParentRunner (org.junit.runners)
run:190, SpringJUnit4ClassRunner (org.springframework.test.context.junit4)
run:137, JUnitCore (org.junit.runner)
startRunnerWithArgs:69, JUnit4IdeaTestRunner (com.intellij.junit4)
execute:38, IdeaTestRunner$Repeater$1 (com.intellij.rt.junit)
repeat:11, TestsRepeater (com.intellij.rt.execution.junit)
startRunnerWithArgs:35, IdeaTestRunner$Repeater (com.intellij.rt.junit)
prepareStreamsAndStart:235, JUnitStarter (com.intellij.rt.junit)
main:54, JUnitStarter (com.intellij.rt.junit)
+ + +

初始化空难的配置

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
<init>:47, RabbitProperties (org.springframework.boot.autoconfigure.amqp)
newInstance0:-1, NativeConstructorAccessorImpl (jdk.internal.reflect)
newInstance:62, NativeConstructorAccessorImpl (jdk.internal.reflect)
newInstance:45, DelegatingConstructorAccessorImpl (jdk.internal.reflect)
newInstance:490, Constructor (java.lang.reflect)
instantiateClass:204, BeanUtils (org.springframework.beans)
instantiate:87, SimpleInstantiationStrategy (org.springframework.beans.factory.support)
instantiateBean:1315, AbstractAutowireCapableBeanFactory (org.springframework.beans.factory.support)
createBeanInstance:1218, AbstractAutowireCapableBeanFactory (org.springframework.beans.factory.support)
doCreateBean:556, AbstractAutowireCapableBeanFactory (org.springframework.beans.factory.support)
createBean:516, AbstractAutowireCapableBeanFactory (org.springframework.beans.factory.support)
lambda$doGetBean$0:324, AbstractBeanFactory (org.springframework.beans.factory.support)
getObject:-1, 47925969 (org.springframework.beans.factory.support.AbstractBeanFactory$$Lambda$290)
getSingleton:234, DefaultSingletonBeanRegistry (org.springframework.beans.factory.support)
doGetBean:322, AbstractBeanFactory (org.springframework.beans.factory.support)
getBean:202, AbstractBeanFactory (org.springframework.beans.factory.support)
resolveCandidate:276, DependencyDescriptor (org.springframework.beans.factory.config)
doResolveDependency:1307, DefaultListableBeanFactory (org.springframework.beans.factory.support)
resolveDependency:1227, DefaultListableBeanFactory (org.springframework.beans.factory.support)
resolveAutowiredArgument:886, ConstructorResolver (org.springframework.beans.factory.support)
createArgumentArray:790, ConstructorResolver (org.springframework.beans.factory.support)
instantiateUsingFactoryMethod:540, ConstructorResolver (org.springframework.beans.factory.support)
instantiateUsingFactoryMethod:1341, AbstractAutowireCapableBeanFactory (org.springframework.beans.factory.support)
createBeanInstance:1181, AbstractAutowireCapableBeanFactory (org.springframework.beans.factory.support)
doCreateBean:556, AbstractAutowireCapableBeanFactory (org.springframework.beans.factory.support)
createBean:516, AbstractAutowireCapableBeanFactory (org.springframework.beans.factory.support)
lambda$doGetBean$0:324, AbstractBeanFactory (org.springframework.beans.factory.support)
getObject:-1, 47925969 (org.springframework.beans.factory.support.AbstractBeanFactory$$Lambda$290)
getSingleton:234, DefaultSingletonBeanRegistry (org.springframework.beans.factory.support)
doGetBean:322, AbstractBeanFactory (org.springframework.beans.factory.support)
getBean:202, AbstractBeanFactory (org.springframework.beans.factory.support)
resolveCandidate:276, DependencyDescriptor (org.springframework.beans.factory.config)
doResolveDependency:1307, DefaultListableBeanFactory (org.springframework.beans.factory.support)
resolveDependency:1227, DefaultListableBeanFactory (org.springframework.beans.factory.support)
resolveAutowiredArgument:886, ConstructorResolver (org.springframework.beans.factory.support)
createArgumentArray:790, ConstructorResolver (org.springframework.beans.factory.support)
instantiateUsingFactoryMethod:540, ConstructorResolver (org.springframework.beans.factory.support)
instantiateUsingFactoryMethod:1341, AbstractAutowireCapableBeanFactory (org.springframework.beans.factory.support)
createBeanInstance:1181, AbstractAutowireCapableBeanFactory (org.springframework.beans.factory.support)
doCreateBean:556, AbstractAutowireCapableBeanFactory (org.springframework.beans.factory.support)
createBean:516, AbstractAutowireCapableBeanFactory (org.springframework.beans.factory.support)
lambda$doGetBean$0:324, AbstractBeanFactory (org.springframework.beans.factory.support)
getObject:-1, 47925969 (org.springframework.beans.factory.support.AbstractBeanFactory$$Lambda$290)
getSingleton:234, DefaultSingletonBeanRegistry (org.springframework.beans.factory.support)
doGetBean:322, AbstractBeanFactory (org.springframework.beans.factory.support)
getBean:202, AbstractBeanFactory (org.springframework.beans.factory.support)
resolveCandidate:276, DependencyDescriptor (org.springframework.beans.factory.config)
doResolveDependency:1307, DefaultListableBeanFactory (org.springframework.beans.factory.support)
resolveDependency:1227, DefaultListableBeanFactory (org.springframework.beans.factory.support)
resolveFieldValue:657, AutowiredAnnotationBeanPostProcessor$AutowiredFieldElement (org.springframework.beans.factory.annotation)
inject:640, AutowiredAnnotationBeanPostProcessor$AutowiredFieldElement (org.springframework.beans.factory.annotation)
inject:119, InjectionMetadata (org.springframework.beans.factory.annotation)
postProcessProperties:399, AutowiredAnnotationBeanPostProcessor (org.springframework.beans.factory.annotation)
populateBean:1425, AbstractAutowireCapableBeanFactory (org.springframework.beans.factory.support)
doCreateBean:593, AbstractAutowireCapableBeanFactory (org.springframework.beans.factory.support)
createBean:516, AbstractAutowireCapableBeanFactory (org.springframework.beans.factory.support)
lambda$doGetBean$0:324, AbstractBeanFactory (org.springframework.beans.factory.support)
getObject:-1, 47925969 (org.springframework.beans.factory.support.AbstractBeanFactory$$Lambda$290)
getSingleton:234, DefaultSingletonBeanRegistry (org.springframework.beans.factory.support)
doGetBean:322, AbstractBeanFactory (org.springframework.beans.factory.support)
getBean:202, AbstractBeanFactory (org.springframework.beans.factory.support)
resolveCandidate:276, DependencyDescriptor (org.springframework.beans.factory.config)
doResolveDependency:1307, DefaultListableBeanFactory (org.springframework.beans.factory.support)
resolveDependency:1227, DefaultListableBeanFactory (org.springframework.beans.factory.support)
resolveFieldValue:657, AutowiredAnnotationBeanPostProcessor$AutowiredFieldElement (org.springframework.beans.factory.annotation)
inject:640, AutowiredAnnotationBeanPostProcessor$AutowiredFieldElement (org.springframework.beans.factory.annotation)
inject:119, InjectionMetadata (org.springframework.beans.factory.annotation)
postProcessProperties:399, AutowiredAnnotationBeanPostProcessor (org.springframework.beans.factory.annotation)
populateBean:1425, AbstractAutowireCapableBeanFactory (org.springframework.beans.factory.support)
doCreateBean:593, AbstractAutowireCapableBeanFactory (org.springframework.beans.factory.support)
createBean:516, AbstractAutowireCapableBeanFactory (org.springframework.beans.factory.support)
lambda$doGetBean$0:324, AbstractBeanFactory (org.springframework.beans.factory.support)
getObject:-1, 47925969 (org.springframework.beans.factory.support.AbstractBeanFactory$$Lambda$290)
getSingleton:234, DefaultSingletonBeanRegistry (org.springframework.beans.factory.support)
doGetBean:322, AbstractBeanFactory (org.springframework.beans.factory.support)
getBean:207, AbstractBeanFactory (org.springframework.beans.factory.support)
resolveBeanByName:453, AbstractAutowireCapableBeanFactory (org.springframework.beans.factory.support)
autowireResource:527, CommonAnnotationBeanPostProcessor (org.springframework.context.annotation)
getResource:497, CommonAnnotationBeanPostProcessor (org.springframework.context.annotation)
getResourceToInject:650, CommonAnnotationBeanPostProcessor$ResourceElement (org.springframework.context.annotation)
inject:228, InjectionMetadata$InjectedElement (org.springframework.beans.factory.annotation)
inject:119, InjectionMetadata (org.springframework.beans.factory.annotation)
postProcessProperties:318, CommonAnnotationBeanPostProcessor (org.springframework.context.annotation)
populateBean:1425, AbstractAutowireCapableBeanFactory (org.springframework.beans.factory.support)
doCreateBean:593, AbstractAutowireCapableBeanFactory (org.springframework.beans.factory.support)
createBean:516, AbstractAutowireCapableBeanFactory (org.springframework.beans.factory.support)
lambda$doGetBean$0:324, AbstractBeanFactory (org.springframework.beans.factory.support)
getObject:-1, 47925969 (org.springframework.beans.factory.support.AbstractBeanFactory$$Lambda$290)
getSingleton:234, DefaultSingletonBeanRegistry (org.springframework.beans.factory.support)
doGetBean:322, AbstractBeanFactory (org.springframework.beans.factory.support)
getBean:202, AbstractBeanFactory (org.springframework.beans.factory.support)
preInstantiateSingletons:897, DefaultListableBeanFactory (org.springframework.beans.factory.support)
finishBeanFactoryInitialization:879, AbstractApplicationContext (org.springframework.context.support)
refresh:551, AbstractApplicationContext (org.springframework.context.support)
refresh:755, SpringApplication (org.springframework.boot)
refresh:747, SpringApplication (org.springframework.boot)
refreshContext:402, SpringApplication (org.springframework.boot)
run:312, SpringApplication (org.springframework.boot)
loadContext:120, SpringBootContextLoader (org.springframework.boot.test.context)
loadContextInternal:99, DefaultCacheAwareContextLoaderDelegate (org.springframework.test.context.cache)
loadContext:124, DefaultCacheAwareContextLoaderDelegate (org.springframework.test.context.cache)
getApplicationContext:123, DefaultTestContext (org.springframework.test.context.support)
setUpRequestContextIfNecessary:190, ServletTestExecutionListener (org.springframework.test.context.web)
prepareTestInstance:132, ServletTestExecutionListener (org.springframework.test.context.web)
prepareTestInstance:244, TestContextManager (org.springframework.test.context)
createTest:227, SpringJUnit4ClassRunner (org.springframework.test.context.junit4)
runReflectiveCall:289, SpringJUnit4ClassRunner$1 (org.springframework.test.context.junit4)
run:12, ReflectiveCallable (org.junit.internal.runners.model)
methodBlock:291, SpringJUnit4ClassRunner (org.springframework.test.context.junit4)
runChild:246, SpringJUnit4ClassRunner (org.springframework.test.context.junit4)
runChild:97, SpringJUnit4ClassRunner (org.springframework.test.context.junit4)
run:331, ParentRunner$4 (org.junit.runners)
schedule:79, ParentRunner$1 (org.junit.runners)
runChildren:329, ParentRunner (org.junit.runners)
access$100:66, ParentRunner (org.junit.runners)
evaluate:293, ParentRunner$2 (org.junit.runners)
evaluate:61, RunBeforeTestClassCallbacks (org.springframework.test.context.junit4.statements)
evaluate:70, RunAfterTestClassCallbacks (org.springframework.test.context.junit4.statements)
evaluate:306, ParentRunner$3 (org.junit.runners)
run:413, ParentRunner (org.junit.runners)
run:190, SpringJUnit4ClassRunner (org.springframework.test.context.junit4)
run:137, JUnitCore (org.junit.runner)
startRunnerWithArgs:69, JUnit4IdeaTestRunner (com.intellij.junit4)
execute:38, IdeaTestRunner$Repeater$1 (com.intellij.rt.junit)
repeat:11, TestsRepeater (com.intellij.rt.execution.junit)
startRunnerWithArgs:35, IdeaTestRunner$Repeater (com.intellij.rt.junit)
prepareStreamsAndStart:235, JUnitStarter (com.intellij.rt.junit)
main:54, JUnitStarter (com.intellij.rt.junit)
+ + +

相关阅读

+ +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/2022/12/26/cms-gc/index.html b/2022/12/26/cms-gc/index.html new file mode 100644 index 0000000000..2aa84f8e61 --- /dev/null +++ b/2022/12/26/cms-gc/index.html @@ -0,0 +1,447 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + cms gc | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ cms gc +

+ + +
+ + + + +
+ + +

背景

了解java 的cms gc 算法

+

路径

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
src\hotspot\share\gc\serial\defNewGeneration.cpp
// Ignores "ref" and calls allocate().
oop Generation::promote(oop obj, size_t obj_size) {
assert(obj_size == obj->size(), "bad obj_size passed in");

#ifndef PRODUCT
if (GenCollectedHeap::heap()->promotion_should_fail()) {
return NULL;
}
#endif // #ifndef PRODUCT

HeapWord* result = allocate(obj_size, false);
if (result != NULL) {
Copy::aligned_disjoint_words(cast_from_oop<HeapWord*>(obj), result, obj_size);
return cast_to_oop(result);
} else {
GenCollectedHeap* gch = GenCollectedHeap::heap();
return gch->handle_failed_promotion(this, obj, obj_size);
}
}
+ + +

相关阅读

+ +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git "a/2022/12/26/java\345\237\272\347\241\200/index.html" "b/2022/12/26/java\345\237\272\347\241\200/index.html" new file mode 100644 index 0000000000..7ade54bf9b --- /dev/null +++ "b/2022/12/26/java\345\237\272\347\241\200/index.html" @@ -0,0 +1,479 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + java基础 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ java基础 +

+ + +
+ + + + +
+ + +
    +
  • 查看默认选项
  • +
+
1
2
3
4
5
$ java -XX:+PrintCommandLineFlags -version 
-XX:ConcGCThreads=3 -XX:G1ConcRefinementThreads=10 -XX:GCDrainStackTargetSize=64 -XX:InitialHeapSize=525168320 -XX:MarkStackSize=4194304 -XX:MaxHeapSize=8402693120 -XX:MinHeapSize=6815736 -XX:+PrintCommandLineFlags -XX:ReservedCodeCacheSize=251658240 -XX:+SegmentedCodeCache -XX:+UseCompressedClassPointers -XX:+UseCompressedOops -XX:+UseG1GC
openjdk version "17.0.5" 2022-10-18
OpenJDK Runtime Environment (build 17.0.5+8-Ubuntu-2ubuntu122.04)
OpenJDK 64-Bit Server VM (build 17.0.5+8-Ubuntu-2ubuntu122.04, mixed mode, sharing)
+ +

java io

java 的io分字节流 :
字节流都是 inputStream/outPutStream
字符流: reader/writer

+

方向:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + +
类名方法:读、写字节、字符
inputStreamreadbyte
outputStreamwritebyte
readerreadchar
writerwritechar
+

相关阅读

+ +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/2022/12/26/thread-pool/index.html b/2022/12/26/thread-pool/index.html new file mode 100644 index 0000000000..7e9b3636ec --- /dev/null +++ b/2022/12/26/thread-pool/index.html @@ -0,0 +1,451 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + thread pool | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ thread pool +

+ + +
+ + + + +
+ + +

背景

线程池是多个线程的集合。java的线程池主要使用

+
    +
  • ExecutorService
  • +
  • ThreadPoolExecutor
  • +
  • Executor
  • +
  • Executors
  • +
+

相关阅读

+ +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/2022/12/27/maven-scope/index.html b/2022/12/27/maven-scope/index.html new file mode 100644 index 0000000000..d552e390bf --- /dev/null +++ b/2022/12/27/maven-scope/index.html @@ -0,0 +1,451 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + maven scope | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ maven scope +

+ + +
+ + + + +
+ + +

背景

了解maven使用

+
    +
  • compile:默认的scope,运行期有效,需要打入包中
  • +
  • provided:编译期有效,运行期不需要提供,不会打入包中
  • +
  • runtime:编译不需要,在运行期有效,需要导入包中。(接口与实现分离)
  • +
  • test:测试需要,不会打入包中
  • +
  • system:非本地仓库引入、存在系统的某个路径下的jar。(一般不使用)
  • +
+

使用

+ +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git "a/2022/12/29/redission-\350\260\203\347\224\250\346\265\201\347\250\213/index.html" "b/2022/12/29/redission-\350\260\203\347\224\250\346\265\201\347\250\213/index.html" new file mode 100644 index 0000000000..8fd7ebfe9a --- /dev/null +++ "b/2022/12/29/redission-\350\260\203\347\224\250\346\265\201\347\250\213/index.html" @@ -0,0 +1,479 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + redission 调用流程 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ redission 调用流程 +

+ + +
+ + + + +
+ + +

背景

java的redis操作使用redission封装

+

一个简单的堆栈堆栈

+

使用

比较核心的堆栈

+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
encode:132, CommandEncoder (org.redisson.client.handler)
encode:99, CommandEncoder (org.redisson.client.handler)
encode:55, CommandEncoder (org.redisson.client.handler)
write:107, MessageToByteEncoder (io.netty.handler.codec)
write:75, CommandEncoder (org.redisson.client.handler)
invokeWrite0:881, AbstractChannelHandlerContext (io.netty.channel)
invokeWrite:863, AbstractChannelHandlerContext (io.netty.channel)
write:968, AbstractChannelHandlerContext (io.netty.channel)
write:856, AbstractChannelHandlerContext (io.netty.channel)
write:120, MessageToByteEncoder (io.netty.handler.codec)
write:45, CommandBatchEncoder (org.redisson.client.handler)
invokeWrite0:881, AbstractChannelHandlerContext (io.netty.channel)
invokeWriteAndFlush:940, AbstractChannelHandlerContext (io.netty.channel)
write:966, AbstractChannelHandlerContext (io.netty.channel)
writeAndFlush:934, AbstractChannelHandlerContext (io.netty.channel)
write:83, CommandsQueue (org.redisson.client.handler)
invokeWrite0:879, AbstractChannelHandlerContext (io.netty.channel)
invokeWriteAndFlush:940, AbstractChannelHandlerContext (io.netty.channel)
run:1247, AbstractChannelHandlerContext$WriteTask (io.netty.channel)
runTask$$$capture:174, AbstractEventExecutor (io.netty.util.concurrent)
runTask:-1, AbstractEventExecutor (io.netty.util.concurrent)
- Async stack trace
addTask:-1, SingleThreadEventExecutor (io.netty.util.concurrent)
execute:836, SingleThreadEventExecutor (io.netty.util.concurrent)
execute0:827, SingleThreadEventExecutor (io.netty.util.concurrent)
execute:817, SingleThreadEventExecutor (io.netty.util.concurrent)
safeExecute:1165, AbstractChannelHandlerContext (io.netty.channel)
write:972, AbstractChannelHandlerContext (io.netty.channel)
writeAndFlush:934, AbstractChannelHandlerContext (io.netty.channel)
writeAndFlush:984, AbstractChannelHandlerContext (io.netty.channel)
writeAndFlush:1025, DefaultChannelPipeline (io.netty.channel)
writeAndFlush:306, AbstractChannel (io.netty.channel)
send:206, RedisConnection (org.redisson.client)
sendCommand:590, RedisExecutor (org.redisson.command)
lambda$execute$3:164, RedisExecutor (org.redisson.command)
uniWhenComplete:859, CompletableFuture (java.util.concurrent)
uniWhenCompleteStage:883, CompletableFuture (java.util.concurrent)
whenComplete:2251, CompletableFuture (java.util.concurrent)
execute:149, RedisExecutor (org.redisson.command)
async:526, CommandAsyncService (org.redisson.command)
writeAsync:490, CommandAsyncService (org.redisson.command)
setAsync:192, RedissonBucket (org.redisson)
set:183, RedissonBucket (org.redisson)
main:25, App (com.demo.redission)
+ +

分配内存

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
allocateMemory:608, Unsafe (jdk.internal.misc)
<init>:122, DirectByteBuffer (java.nio)
allocateDirect:317, ByteBuffer (java.nio)
<clinit>:93, PlatformDependent0 (io.netty.util.internal)
isAndroid:333, PlatformDependent (io.netty.util.internal)
<clinit>:88, PlatformDependent (io.netty.util.internal)
<clinit>:84, NioEventLoop (io.netty.channel.nio)
newChild:182, NioEventLoopGroup (io.netty.channel.nio)
newChild:38, NioEventLoopGroup (io.netty.channel.nio)
<init>:84, MultithreadEventExecutorGroup (io.netty.util.concurrent)
<init>:60, MultithreadEventExecutorGroup (io.netty.util.concurrent)
<init>:49, MultithreadEventExecutorGroup (io.netty.util.concurrent)
<init>:59, MultithreadEventLoopGroup (io.netty.channel)
<init>:87, NioEventLoopGroup (io.netty.channel.nio)
<init>:82, NioEventLoopGroup (io.netty.channel.nio)
<init>:69, NioEventLoopGroup (io.netty.channel.nio)
<init>:181, MasterSlaveConnectionManager (org.redisson.connection)
<init>:73, ClusterConnectionManager (org.redisson.cluster)
createConnectionManager:196, ConfigSupport (org.redisson.config)
<init>:68, Redisson (org.redisson)
create:109, Redisson (org.redisson)
main:24, App (com.demo.redission)
+ + +

ByteBuf 写入

堆栈

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
putByte:704, PlatformDependent0 (io.netty.util.internal)
putByte:719, PlatformDependent (io.netty.util.internal)
unsafeWriteUtf8:1020, ByteBufUtil (io.netty.buffer)
writeUtf8:836, ByteBufUtil (io.netty.buffer)
writeUtf8:820, ByteBufUtil (io.netty.buffer)
setCharSequence0:707, AbstractByteBuf (io.netty.buffer)
writeCharSequence:1187, AbstractByteBuf (io.netty.buffer)
encode:45, StringCodec$1 (org.redisson.client.codec)
encode:622, CommandAsyncService (org.redisson.command)
encode:313, RedissonObject (org.redisson)
setAsync:192, RedissonBucket (org.redisson)
set:183, RedissonBucket (org.redisson)
testApp:57, AppTest (com.demo.redission)
invokeVirtual:-1, LambdaForm$DMH/0x0000000800c0c400 (java.lang.invoke)
invoke:-1, LambdaForm$MH/0x0000000800c0d000 (java.lang.invoke)
invokeExact_MT:-1, Invokers$Holder (java.lang.invoke)
invokeImpl:154, DirectMethodHandleAccessor (jdk.internal.reflect)
invoke:104, DirectMethodHandleAccessor (jdk.internal.reflect)
invoke:577, Method (java.lang.reflect)
runTest:154, TestCase (junit.framework)
runBare:127, TestCase (junit.framework)
protect:106, TestResult$1 (junit.framework)
runProtected:124, TestResult (junit.framework)
run:109, TestResult (junit.framework)
run:118, TestCase (junit.framework)
doRun:116, TestRunner (junit.textui)
doRun:117, JUnit3IdeaTestRunner (com.intellij.junit3)
doRun:109, TestRunner (junit.textui)
startRunnerWithArgs:38, JUnit3IdeaTestRunner (com.intellij.junit3)
execute:38, IdeaTestRunner$Repeater$1 (com.intellij.rt.junit)
repeat:11, TestsRepeater (com.intellij.rt.execution.junit)
startRunnerWithArgs:35, IdeaTestRunner$Repeater (com.intellij.rt.junit)
prepareStreamsAndStart:235, JUnitStarter (com.intellij.rt.junit)
main:54, JUnitStarter (com.intellij.rt.junit)
+

ByteBuf 最后会调用UNSAFE.putByte(data, offset, value); 写入

+

ByteBuf 写入socket

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
write:62, SocketDispatcher (sun.nio.ch)
writeFromNativeBuffer:137, IOUtil (sun.nio.ch)
write:81, IOUtil (sun.nio.ch)
write:58, IOUtil (sun.nio.ch)
write:532, SocketChannelImpl (sun.nio.ch)
doWrite:415, NioSocketChannel (io.netty.channel.socket.nio)
flush0:931, AbstractChannel$AbstractUnsafe (io.netty.channel)
flush0:354, AbstractNioChannel$AbstractNioUnsafe (io.netty.channel.nio)
flush:895, AbstractChannel$AbstractUnsafe (io.netty.channel)
flush:1372, DefaultChannelPipeline$HeadContext (io.netty.channel)
invokeFlush0:921, AbstractChannelHandlerContext (io.netty.channel)
invokeFlush:907, AbstractChannelHandlerContext (io.netty.channel)
flush:893, AbstractChannelHandlerContext (io.netty.channel)
flush:125, ChannelOutboundHandlerAdapter (io.netty.channel)
invokeFlush0:925, AbstractChannelHandlerContext (io.netty.channel)
invokeWriteAndFlush:941, AbstractChannelHandlerContext (io.netty.channel)
write:966, AbstractChannelHandlerContext (io.netty.channel)
writeAndFlush:934, AbstractChannelHandlerContext (io.netty.channel)
write:83, CommandsQueue (org.redisson.client.handler)
invokeWrite0:879, AbstractChannelHandlerContext (io.netty.channel)
invokeWriteAndFlush:940, AbstractChannelHandlerContext (io.netty.channel)
run:1247, AbstractChannelHandlerContext$WriteTask (io.netty.channel)
runTask$$$capture:174, AbstractEventExecutor (io.netty.util.concurrent)
runTask:-1, AbstractEventExecutor (io.netty.util.concurrent)
- Async stack trace
addTask:-1, SingleThreadEventExecutor (io.netty.util.concurrent)
execute:836, SingleThreadEventExecutor (io.netty.util.concurrent)
execute0:827, SingleThreadEventExecutor (io.netty.util.concurrent)
execute:817, SingleThreadEventExecutor (io.netty.util.concurrent)
safeExecute:1165, AbstractChannelHandlerContext (io.netty.channel)
write:972, AbstractChannelHandlerContext (io.netty.channel)
writeAndFlush:934, AbstractChannelHandlerContext (io.netty.channel)
writeAndFlush:984, AbstractChannelHandlerContext (io.netty.channel)
writeAndFlush:1025, DefaultChannelPipeline (io.netty.channel)
writeAndFlush:306, AbstractChannel (io.netty.channel)
send:206, RedisConnection (org.redisson.client)
sendCommand:590, RedisExecutor (org.redisson.command)
lambda$execute$3:164, RedisExecutor (org.redisson.command)
uniWhenComplete:863, CompletableFuture (java.util.concurrent)
uniWhenCompleteStage:887, CompletableFuture (java.util.concurrent)
whenComplete:2325, CompletableFuture (java.util.concurrent)
execute:149, RedisExecutor (org.redisson.command)
async:526, CommandAsyncService (org.redisson.command)
readAsync:292, CommandAsyncService (org.redisson.command)
getAsync:140, RedissonBucket (org.redisson)
get:135, RedissonBucket (org.redisson)
testApp:59, AppTest (com.demo.redission)
invoke:104, DirectMethodHandleAccessor (jdk.internal.reflect)
invoke:577, Method (java.lang.reflect)
runTest:154, TestCase (junit.framework)
runBare:127, TestCase (junit.framework)
protect:106, TestResult$1 (junit.framework)
runProtected:124, TestResult (junit.framework)
run:109, TestResult (junit.framework)
run:118, TestCase (junit.framework)
doRun:116, TestRunner (junit.textui)
doRun:117, JUnit3IdeaTestRunner (com.intellij.junit3)
doRun:109, TestRunner (junit.textui)
startRunnerWithArgs:38, JUnit3IdeaTestRunner (com.intellij.junit3)
execute:38, IdeaTestRunner$Repeater$1 (com.intellij.rt.junit)
repeat:11, TestsRepeater (com.intellij.rt.execution.junit)
startRunnerWithArgs:35, IdeaTestRunner$Repeater (com.intellij.rt.junit)
prepareStreamsAndStart:235, JUnitStarter (com.intellij.rt.junit)
main:54, JUnitStarter (com.intellij.rt.junit)
+

ByteBuf 到ByteBuffer 转换

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
public ByteBuffer[] nioBuffers(int maxCount, long maxBytes) {
...
ByteBuffer[] nioBuffers = NIO_BUFFERS.get(threadLocalMap);
Entry entry = flushedEntry;
while (isFlushedEntry(entry) && entry.msg instanceof ByteBuf) {
if (!entry.cancelled) {
ByteBuf buf = (ByteBuf) entry.msg;
final int readerIndex = buf.readerIndex();
final int readableBytes = buf.writerIndex() - readerIndex;

if (readableBytes > 0) {
nioBufferSize += readableBytes;
int count = entry.count;
if (count == -1) {
//noinspection ConstantValueVariableUse
entry.count = count = buf.nioBufferCount();
}
int neededSpace = min(maxCount, nioBufferCount + count);
if (neededSpace > nioBuffers.length) {
nioBuffers = expandNioBufferArray(nioBuffers, neededSpace, nioBufferCount);
NIO_BUFFERS.set(threadLocalMap, nioBuffers);
}
if (count == 1) {
ByteBuffer nioBuf = entry.buf;
if (nioBuf == null) {
// cache ByteBuffer as it may need to create a new ByteBuffer instance if its a
// derived buffer
entry.buf = nioBuf = buf.internalNioBuffer(readerIndex, readableBytes); // 转换
}
nioBuffers[nioBufferCount++] = nioBuf;
} else {
// The code exists in an extra method to ensure the method is not too big to inline as this
// branch is not very likely to get hit very frequently.
nioBufferCount = nioBuffers(entry, buf, nioBuffers, nioBufferCount, maxCount); // 转换nioBuffers
}
if (nioBufferCount >= maxCount) {
break;
}
}
}
entry = entry.next;
}
this.nioBufferCount = nioBufferCount;
this.nioBufferSize = nioBufferSize;

return nioBuffers;
}
+ +

写入ByteBuf 转换到ByteBuffer 的过程, 最后调用的是PooledByteBuf.internalNioBuffer 这个方法

+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
internalNioBuffer:158, PooledByteBuf (io.netty.buffer)
_internalNioBuffer:194, PooledByteBuf (io.netty.buffer)
internalNioBuffer:207, PooledByteBuf (io.netty.buffer)
nioBuffers:447, ChannelOutboundBuffer (io.netty.channel)
doWrite:399, NioSocketChannel (io.netty.channel.socket.nio)
flush0:931, AbstractChannel$AbstractUnsafe (io.netty.channel)
flush0:354, AbstractNioChannel$AbstractNioUnsafe (io.netty.channel.nio)
flush:895, AbstractChannel$AbstractUnsafe (io.netty.channel)
flush:1372, DefaultChannelPipeline$HeadContext (io.netty.channel)
invokeFlush0:921, AbstractChannelHandlerContext (io.netty.channel)
invokeFlush:907, AbstractChannelHandlerContext (io.netty.channel)
flush:893, AbstractChannelHandlerContext (io.netty.channel)
flush:125, ChannelOutboundHandlerAdapter (io.netty.channel)
invokeFlush0:925, AbstractChannelHandlerContext (io.netty.channel)
invokeWriteAndFlush:941, AbstractChannelHandlerContext (io.netty.channel)
write:966, AbstractChannelHandlerContext (io.netty.channel)
writeAndFlush:934, AbstractChannelHandlerContext (io.netty.channel)
write:83, CommandsQueue (org.redisson.client.handler)
invokeWrite0:879, AbstractChannelHandlerContext (io.netty.channel)
invokeWriteAndFlush:940, AbstractChannelHandlerContext (io.netty.channel)
run:1247, AbstractChannelHandlerContext$WriteTask (io.netty.channel)
runTask$$$capture:174, AbstractEventExecutor (io.netty.util.concurrent)
runTask:-1, AbstractEventExecutor (io.netty.util.concurrent)
- Async stack trace
addTask:-1, SingleThreadEventExecutor (io.netty.util.concurrent)
execute:836, SingleThreadEventExecutor (io.netty.util.concurrent)
execute0:827, SingleThreadEventExecutor (io.netty.util.concurrent)
execute:817, SingleThreadEventExecutor (io.netty.util.concurrent)
safeExecute:1165, AbstractChannelHandlerContext (io.netty.channel)
write:972, AbstractChannelHandlerContext (io.netty.channel)
writeAndFlush:934, AbstractChannelHandlerContext (io.netty.channel)
writeAndFlush:984, AbstractChannelHandlerContext (io.netty.channel)
writeAndFlush:1025, DefaultChannelPipeline (io.netty.channel)
writeAndFlush:306, AbstractChannel (io.netty.channel)
send:206, RedisConnection (org.redisson.client)
sync:215, RedisConnection (org.redisson.client)
sync:202, RedisConnection (org.redisson.client)
<init>:100, ClusterConnectionManager (org.redisson.cluster)
createConnectionManager:196, ConfigSupport (org.redisson.config)
<init>:68, Redisson (org.redisson)
create:109, Redisson (org.redisson)
main:24, App (com.demo.redission)
+ +

创建链接的过程:

+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
createConnection:36, RedisConnectionHandler (org.redisson.client.handler)
channelRegistered:53, BaseConnectionHandler (org.redisson.client.handler)
invokeChannelRegistered:176, AbstractChannelHandlerContext (io.netty.channel)
invokeChannelRegistered:152, AbstractChannelHandlerContext (io.netty.channel) [2]
fireChannelRegistered:145, AbstractChannelHandlerContext (io.netty.channel)
channelRegistered:1383, DefaultChannelPipeline$HeadContext (io.netty.channel)
invokeChannelRegistered:172, AbstractChannelHandlerContext (io.netty.channel)
invokeChannelRegistered:152, AbstractChannelHandlerContext (io.netty.channel) [1]
fireChannelRegistered:815, DefaultChannelPipeline (io.netty.channel)
register0:517, AbstractChannel$AbstractUnsafe (io.netty.channel)
access$200:429, AbstractChannel$AbstractUnsafe (io.netty.channel)
run:486, AbstractChannel$AbstractUnsafe$1 (io.netty.channel)
runTask$$$capture:174, AbstractEventExecutor (io.netty.util.concurrent)
runTask:-1, AbstractEventExecutor (io.netty.util.concurrent)
- Async stack trace
addTask:-1, SingleThreadEventExecutor (io.netty.util.concurrent)
execute:836, SingleThreadEventExecutor (io.netty.util.concurrent)
execute0:827, SingleThreadEventExecutor (io.netty.util.concurrent)
execute:817, SingleThreadEventExecutor (io.netty.util.concurrent)
register:483, AbstractChannel$AbstractUnsafe (io.netty.channel)
register:89, SingleThreadEventLoop (io.netty.channel)
register:83, SingleThreadEventLoop (io.netty.channel)
register:86, MultithreadEventLoopGroup (io.netty.channel)
initAndRegister:323, AbstractBootstrap (io.netty.bootstrap)
doResolveAndConnect:155, Bootstrap (io.netty.bootstrap)
connect:139, Bootstrap (io.netty.bootstrap)
lambda$connectAsync$1:220, RedisClient (org.redisson.client)
uniComposeStage:1187, CompletableFuture (java.util.concurrent)
thenCompose:2309, CompletableFuture (java.util.concurrent)
connectAsync:218, RedisClient (org.redisson.client)
connect:189, ClientConnectionsEntry (org.redisson.connection)
connect:249, ConnectionPool (org.redisson.connection.pool)
createConnection:274, ConnectionPool (org.redisson.connection.pool)
lambda$createConnection$1:112, ConnectionPool (org.redisson.connection.pool)
uniAcceptNow:757, CompletableFuture (java.util.concurrent)
uniAcceptStage:735, CompletableFuture (java.util.concurrent)
thenAccept:2182, CompletableFuture (java.util.concurrent)
createConnection:110, ConnectionPool (org.redisson.connection.pool)
initConnections:92, ConnectionPool (org.redisson.connection.pool)
add:69, ConnectionPool (org.redisson.connection.pool)
add:34, MasterConnectionPool (org.redisson.connection.pool)
lambda$setupMasterEntry$1:139, MasterSlaveEntry (org.redisson.connection)
uniComposeStage:1187, CompletableFuture (java.util.concurrent)
thenCompose:2309, CompletableFuture (java.util.concurrent)
setupMasterEntry:122, MasterSlaveEntry (org.redisson.connection)
setupMasterEntry:117, MasterSlaveEntry (org.redisson.connection)
setupMasterEntry:112, MasterSlaveEntry (org.redisson.connection)
initSingleEntry:330, MasterSlaveConnectionManager (org.redisson.connection)
<init>:146, MasterSlaveConnectionManager (org.redisson.connection)
<init>:30, SingleConnectionManager (org.redisson.connection)
createConnectionManager:190, ConfigSupport (org.redisson.config)
<init>:68, Redisson (org.redisson)
create:109, Redisson (org.redisson)
testApp:58, AppTest (com.demo.redission)
invoke:104, DirectMethodHandleAccessor (jdk.internal.reflect)
invoke:577, Method (java.lang.reflect)
runTest:154, TestCase (junit.framework)
runBare:127, TestCase (junit.framework)
protect:106, TestResult$1 (junit.framework)
runProtected:124, TestResult (junit.framework)
run:109, TestResult (junit.framework)
run:118, TestCase (junit.framework)
doRun:116, TestRunner (junit.textui)
doRun:117, JUnit3IdeaTestRunner (com.intellij.junit3)
doRun:109, TestRunner (junit.textui)
startRunnerWithArgs:38, JUnit3IdeaTestRunner (com.intellij.junit3)
execute:38, IdeaTestRunner$Repeater$1 (com.intellij.rt.junit)
repeat:11, TestsRepeater (com.intellij.rt.execution.junit)
startRunnerWithArgs:35, IdeaTestRunner$Repeater (com.intellij.rt.junit)
prepareStreamsAndStart:235, JUnitStarter (com.intellij.rt.junit)
main:54, JUnitStarter (com.intellij.rt.junit)
+ +

Bootstrap使用:

+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
createBootstrap:122, RedisClient (org.redisson.client)
<init>:115, RedisClient (org.redisson.client)
create:77, RedisClient (org.redisson.client)
createClient:425, MasterSlaveConnectionManager (org.redisson.connection)
createClient:412, MasterSlaveConnectionManager (org.redisson.connection)
setupMasterEntry:116, MasterSlaveEntry (org.redisson.connection)
setupMasterEntry:112, MasterSlaveEntry (org.redisson.connection)
initSingleEntry:330, MasterSlaveConnectionManager (org.redisson.connection)
<init>:146, MasterSlaveConnectionManager (org.redisson.connection)
<init>:30, SingleConnectionManager (org.redisson.connection)
createConnectionManager:190, ConfigSupport (org.redisson.config)
<init>:68, Redisson (org.redisson)
create:109, Redisson (org.redisson)
testApp:58, AppTest (com.demo.redission)
invokeVirtual:-1, LambdaForm$DMH/0x0000000800c0c400 (java.lang.invoke)
invoke:-1, LambdaForm$MH/0x0000000800c0d800 (java.lang.invoke)
invokeExact_MT:-1, Invokers$Holder (java.lang.invoke)
invokeImpl:154, DirectMethodHandleAccessor (jdk.internal.reflect)
invoke:104, DirectMethodHandleAccessor (jdk.internal.reflect)
invoke:577, Method (java.lang.reflect)
runTest:154, TestCase (junit.framework)
runBare:127, TestCase (junit.framework)
protect:106, TestResult$1 (junit.framework)
runProtected:124, TestResult (junit.framework)
run:109, TestResult (junit.framework)
run:118, TestCase (junit.framework)
runTest:208, TestSuite (junit.framework)
run:203, TestSuite (junit.framework)
doRun:116, TestRunner (junit.textui)
doRun:117, JUnit3IdeaTestRunner (com.intellij.junit3)
doRun:109, TestRunner (junit.textui)
startRunnerWithArgs:38, JUnit3IdeaTestRunner (com.intellij.junit3)
execute:38, IdeaTestRunner$Repeater$1 (com.intellij.rt.junit)
repeat:11, TestsRepeater (com.intellij.rt.execution.junit)
startRunnerWithArgs:35, IdeaTestRunner$Repeater (com.intellij.rt.junit)
prepareStreamsAndStart:235, JUnitStarter (com.intellij.rt.junit)
main:54, JUnitStarter (com.intellij.rt.junit)
+

future 使用:

+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
handleResult:547, RedisExecutor (org.redisson.command)
checkAttemptPromise:524, RedisExecutor (org.redisson.command)
lambda$execute$4:176, RedisExecutor (org.redisson.command)
accept:-1, RedisExecutor$$Lambda$85/0x0000000800d64c28 (org.redisson.command)
uniWhenComplete:863, CompletableFuture (java.util.concurrent)
tryFire:841, CompletableFuture$UniWhenComplete (java.util.concurrent)
postComplete:510, CompletableFuture (java.util.concurrent)
complete:2147, CompletableFuture (java.util.concurrent)
completeResponse:467, CommandDecoder (org.redisson.client.handler)
handleResult:461, CommandDecoder (org.redisson.client.handler)
decode:340, CommandDecoder (org.redisson.client.handler)
decodeCommand:205, CommandDecoder (org.redisson.client.handler)
decode:144, CommandDecoder (org.redisson.client.handler)
decode:120, CommandDecoder (org.redisson.client.handler)
decodeRemovalReentryProtection:529, ByteToMessageDecoder (io.netty.handler.codec)
callDecode:366, ReplayingDecoder (io.netty.handler.codec)
channelRead:290, ByteToMessageDecoder (io.netty.handler.codec)
invokeChannelRead:444, AbstractChannelHandlerContext (io.netty.channel)
invokeChannelRead:420, AbstractChannelHandlerContext (io.netty.channel)
fireChannelRead:412, AbstractChannelHandlerContext (io.netty.channel)
channelRead:1410, DefaultChannelPipeline$HeadContext (io.netty.channel)
invokeChannelRead:440, AbstractChannelHandlerContext (io.netty.channel)
invokeChannelRead:420, AbstractChannelHandlerContext (io.netty.channel)
fireChannelRead:919, DefaultChannelPipeline (io.netty.channel)
read:166, AbstractNioByteChannel$NioByteUnsafe (io.netty.channel.nio)
processSelectedKey:788, NioEventLoop (io.netty.channel.nio)
processSelectedKeysOptimized:724, NioEventLoop (io.netty.channel.nio)
processSelectedKeys:650, NioEventLoop (io.netty.channel.nio)
run:562, NioEventLoop (io.netty.channel.nio)
run:997, SingleThreadEventExecutor$4 (io.netty.util.concurrent)
run:74, ThreadExecutorMap$2 (io.netty.util.internal)
run:30, FastThreadLocalRunnable (io.netty.util.concurrent)
run:833, Thread (java.lang)
+ +

redis 解码

+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
/home/dai/.m2/repository/org/redisson/redisson/3.19.0/redisson-3.19.0-sources.jar!/org/redisson/client/handler/RedisChannelInitializer.java
@Override
protected void initChannel(Channel ch) throws Exception {
initSsl(config, ch);

if (type == Type.PLAIN) {
ch.pipeline().addLast(new RedisConnectionHandler(redisClient));
} else {
ch.pipeline().addLast(new RedisPubSubConnectionHandler(redisClient));
}

ch.pipeline().addLast(
connectionWatchdog,
CommandEncoder.INSTANCE,
CommandBatchEncoder.INSTANCE);

if (type == Type.PLAIN) {
ch.pipeline().addLast(new CommandsQueue());
} else {
ch.pipeline().addLast(new CommandsQueuePubSub());
}

if (pingConnectionHandler != null) {
ch.pipeline().addLast(pingConnectionHandler);
}

if (type == Type.PLAIN) {
ch.pipeline().addLast(new CommandDecoder(config.getAddress().getScheme())); // 解码
} else {
ch.pipeline().addLast(new CommandPubSubDecoder(config));
}

ch.pipeline().addLast(new ErrorsLoggingHandler());

config.getNettyHook().afterChannelInitialization(ch);
}

+ + +

redission future 回调:

+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
handleResult:547, RedisExecutor (org.redisson.command)
checkAttemptPromise:524, RedisExecutor (org.redisson.command)
lambda$execute$4:176, RedisExecutor (org.redisson.command)
accept:-1, RedisExecutor$$Lambda$86/0x0000000800d64c18 (org.redisson.command)
uniWhenComplete:863, CompletableFuture (java.util.concurrent)
tryFire:841, CompletableFuture$UniWhenComplete (java.util.concurrent)
postComplete:510, CompletableFuture (java.util.concurrent)
complete:2147, CompletableFuture (java.util.concurrent)
completeResponse:467, CommandDecoder (org.redisson.client.handler)
handleResult:461, CommandDecoder (org.redisson.client.handler)
decode:392, CommandDecoder (org.redisson.client.handler)
decodeCommand:205, CommandDecoder (org.redisson.client.handler)
decode:144, CommandDecoder (org.redisson.client.handler)
decode:120, CommandDecoder (org.redisson.client.handler)
decodeRemovalReentryProtection:529, ByteToMessageDecoder (io.netty.handler.codec)
callDecode:366, ReplayingDecoder (io.netty.handler.codec)
channelRead:290, ByteToMessageDecoder (io.netty.handler.codec)
invokeChannelRead:444, AbstractChannelHandlerContext (io.netty.channel)
invokeChannelRead:420, AbstractChannelHandlerContext (io.netty.channel)
fireChannelRead:412, AbstractChannelHandlerContext (io.netty.channel)
channelRead:1410, DefaultChannelPipeline$HeadContext (io.netty.channel)
invokeChannelRead:440, AbstractChannelHandlerContext (io.netty.channel)
invokeChannelRead:420, AbstractChannelHandlerContext (io.netty.channel)
fireChannelRead:919, DefaultChannelPipeline (io.netty.channel)
read:166, AbstractNioByteChannel$NioByteUnsafe (io.netty.channel.nio)
processSelectedKey:788, NioEventLoop (io.netty.channel.nio)
processSelectedKeysOptimized:724, NioEventLoop (io.netty.channel.nio)
processSelectedKeys:650, NioEventLoop (io.netty.channel.nio)
run:562, NioEventLoop (io.netty.channel.nio)
run:997, SingleThreadEventExecutor$4 (io.netty.util.concurrent)
run:74, ThreadExecutorMap$2 (io.netty.util.internal)
run:30, FastThreadLocalRunnable (io.netty.util.concurrent)
run:833, Thread (java.lang)
+ +

相关阅读

+ +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git "a/2023/01/11/java-\344\270\200\346\254\241gc\346\216\222\346\237\245/index.html" "b/2023/01/11/java-\344\270\200\346\254\241gc\346\216\222\346\237\245/index.html" new file mode 100644 index 0000000000..052c31664b --- /dev/null +++ "b/2023/01/11/java-\344\270\200\346\254\241gc\346\216\222\346\237\245/index.html" @@ -0,0 +1,462 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + java 一次gc排查 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ java 一次gc排查 +

+ + +
+ + + + +
+ + +

背景

测试环境有个old generation 非常占用内存.所以需要排查

+

过程

问题出现

同事压测的时候,发现old generation 占用内存非常大
例子是下图的样子:
java_gc_generation

+

相关配置:

+
    +
  • jdk 11
  • +
  • 最大堆内存2g
  • +
  • gc算法用默认的g1
  • +
+

排查问题

dump 文件

使用jamp命令将java的内存dump出来

+
1
2
jmap -dump:format=b,,live,file=<file-path> <pid> 

+ +

mat工具分析

然后去下载mat工具下载地址
安装之后打开,点击Dominator Tree:
Dominator Tree

+

就可以看到对象以及大小

+

Dominator Tree detail

+

可以看到有一个900m大小的HashSet , 这是一个去重的set , 每次都会往这里塞入设备id ,当循环结束,会自动不会再被引用,然后会被gc回收.

+

结论

这不是内存泄漏,而是一个有1kw 数据的的大set ,也就是一个大对象

+

我们找运维调大了最大的堆内存,问题解决

+

后续优化: 后续我们打算不使用HashSet 塞字符串去重,而是用布隆过滤器去重

+ +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git "a/2023/01/11/java-\345\257\271\350\261\241\345\244\247\345\260\217/index.html" "b/2023/01/11/java-\345\257\271\350\261\241\345\244\247\345\260\217/index.html" new file mode 100644 index 0000000000..02aad3ec8d --- /dev/null +++ "b/2023/01/11/java-\345\257\271\350\261\241\345\244\247\345\260\217/index.html" @@ -0,0 +1,531 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + java 对象大小 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ java 对象大小 +

+ + +
+ + + + +
+ + +

背景

排查一个gc问题的时候想到需要了解java的对象大小

+

举例这样创建一个1099 的对象会占用多少个字节呢?

+

那如果是一个特定的对象NumClass

+
1
2
3
4
5
public class NumClass{
public int num1 ;
public int num2 ;
}

+ +

那样创建NunClass[] 又占用多少字节呢?

+
1
2
3
4
## 这样 的array占用多少字节呢? 
Object[] array = new Object[10245] ;
## 这样 的array占用多少字节呢?
Object[] array = new NumClass[10245] ;
+ +

结论是两者占用的字节是一样的

+

object 数组例子

先上最简单的new Object的代码,在这个例子里面: 创建了一个长度是10245的对象数组

+
1
2
3
4
5
6
7
8
9
package com;
public class Hello{
public static volatile Object[] arr ;
public static void main(String [] argv){
arr = new Object[10245]; // 创建一个对象数组 , 数组也是一个对象 , 那么这个对象有多大呢?
arr[1] = "hihi";

}
}
+ +

实际上是在64位机器上,没有开启指针压缩的情况下是: 8+ 4 + 10245*8个字节长度

+

oop 是指针oopDesc* 的别名

+
1
2
3
4
5
typedef class oopDesc*                    oop;
typedef class instanceOopDesc* instanceOop;
typedef class arrayOopDesc* arrayOop;
typedef class objArrayOopDesc* objArrayOop;
typedef class typeArrayOopDesc* typeArrayOop;
+ +
1
2
3
4
5
+-----------++----------++-------++------+           
| || || || |
| || || || |
| MarkWord || length || oop || oop |
+-----------++----------++-------++------+ repeat 10254 次
+ + + + +

jol

核心函数:

+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
jol\jol-core\src\main\java\org\openjdk\jol\layouters\CurrentLayouter.java

@Override
public ClassLayout layout(ClassData data) {
VirtualMachine vm = VM.current();

if (data.isArray()) {
// special case of arrays
int base = vm.arrayBaseOffset(data.arrayComponentType());
int scale = vm.arrayIndexScale(data.arrayComponentType());

long instanceSize = MathUtil.align(base + data.arrayLength() * scale, vm.objectAlignment());

SortedSet<FieldLayout> result = new TreeSet<>();
result.add(new FieldLayout(FieldData.create(data.arrayClass(), "<elements>", data.arrayComponentType()), base, scale * data.arrayLength()));
return ClassLayout.create(data, result, CURRENT, instanceSize, false);
}

Collection<FieldData> fields = data.fields();

SortedSet<FieldLayout> result = new TreeSet<>();
for (FieldData f : fields) {
result.add(new FieldLayout(f, vm.fieldOffset(f.refField()), vm.sizeOfField(f.typeClass())));
}

long instanceSize;
if (result.isEmpty()) {
instanceSize = vm.objectHeaderSize();
} else {
FieldLayout f = result.last();
instanceSize = f.offset() + f.size();
// TODO: This calculation is incorrect if there is a trailing @Contended field, or the instance is @Contended
}
instanceSize = MathUtil.align(instanceSize, vm.objectAlignment());
return ClassLayout.create(data, result, CURRENT, instanceSize, true);
}
+ +
1
2
3
4
static int length_offset_in_bytes() {
return UseCompressedClassPointers ? klass_gap_offset_in_bytes() :
sizeof(arrayOopDesc);
}
+ + +

创建一个数组

+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
(gdb) bt
#0 arrayOopDesc::length_offset_in_bytes () at /var/jdk/src/hotspot/share/oops/arrayOop.hpp:83
#1 0x00007f93c2fdc06e in arrayOopDesc::length_addr_impl (obj_ptr=0x7162010c0) at /var/jdk/src/hotspot/share/oops/arrayOop.hpp:67
#2 0x00007f93c2fdc0ba in arrayOopDesc::length (this=0x7162010c0) at /var/jdk/src/hotspot/share/oops/arrayOop.hpp:114
#3 0x00007f93c3239521 in arrayOopDesc::is_within_bounds (this=0x7162010c0, index=10) at /var/jdk/src/hotspot/share/oops/arrayOop.hpp:110
#4 0x00007f93c3239690 in typeArrayOopDesc::byte_at_addr (this=0x7162010c0, which=10) at /var/jdk/src/hotspot/share/oops/typeArrayOop.inline.hpp:48
#5 0x00007f93c323972d in typeArrayOopDesc::byte_at_put (this=0x7162010c0, which=10, contents=112 'p') at /var/jdk/src/hotspot/share/oops/typeArrayOop.inline.hpp:96
#6 0x00007f93c389e259 in java_lang_String::create_from_unicode (unicode=0x7f93bc037c88, length=33, __the_thread__=0x7f93bc028fa0) at /var/jdk/src/hotspot/share/classfile/javaClasses.cpp:300
#7 0x00007f93c3f7b91d in StringTable::do_intern (string_or_null_h=..., name=0x7f93bc037c88, len=33, hash=2694772907, __the_thread__=0x7f93bc028fa0) at /var/jdk/src/hotspot/share/classfile/stringTable.cpp:347
#8 0x00007f93c3f7b87e in StringTable::intern (string_or_null_h=..., name=0x7f93bc037c88, len=33, __the_thread__=0x7f93bc028fa0) at /var/jdk/src/hotspot/share/classfile/stringTable.cpp:336
#9 0x00007f93c3f7b5b0 in StringTable::intern (symbol=0x7f93c0018d38, __the_thread__=0x7f93bc028fa0) at /var/jdk/src/hotspot/share/classfile/stringTable.cpp:296
#10 0x00007f93c35740b5 in ConstantPool::uncached_string_at (this=0x7f938c02e0a0, which=250, __the_thread__=0x7f93bc028fa0) at /var/jdk/src/hotspot/share/oops/constantPool.cpp:1171
#11 0x00007f93c36a19cd in fieldDescriptor::string_initial_value (this=0x7f93c2bd3a68, __the_thread__=0x7f93bc028fa0) at /var/jdk/src/hotspot/share/runtime/fieldDescriptor.cpp:103
#12 0x00007f93c389ff2d in initialize_static_string_field (fd=0x7f93c2bd3a68, mirror=..., __the_thread__=0x7f93bc028fa0) at /var/jdk/src/hotspot/share/classfile/javaClasses.cpp:809
#13 0x00007f93c38a0444 in initialize_static_field (fd=0x7f93c2bd3a68, mirror=..., __the_thread__=0x7f93bc028fa0) at /var/jdk/src/hotspot/share/classfile/javaClasses.cpp:866
#14 0x00007f93c3869f51 in InstanceKlass::do_local_static_fields (this=0x8000431a0, f=0x7f93c38a0365 <initialize_static_field(fieldDescriptor*, Handle, JavaThread*)>, mirror=..., __the_thread__=0x7f93bc028fa0) at /var/jdk/src/hotspot/share/oops/instanceKlass.cpp:1672
#15 0x00007f93c38a08d5 in java_lang_Class::initialize_mirror_fields (k=0x8000431a0, mirror=..., protection_domain=..., classData=..., __the_thread__=0x7f93bc028fa0) at /var/jdk/src/hotspot/share/classfile/javaClasses.cpp:930
#16 0x00007f93c38a10d1 in java_lang_Class::create_mirror (k=0x8000431a0, class_loader=..., module=..., protection_domain=..., classData=..., __the_thread__=0x7f93bc028fa0) at /var/jdk/src/hotspot/share/classfile/javaClasses.cpp:1035
#17 0x00007f93c34c6144 in ClassFileParser::fill_instance_klass (this=0x7f93c2bd3dd0, ik=0x8000431a0, changed_by_loadhook=false, cl_inst_info=..., __the_thread__=0x7f93bc028fa0) at /var/jdk/src/hotspot/share/classfile/classFileParser.cpp:5426
#18 0x00007f93c34c532e in ClassFileParser::create_instance_klass (this=0x7f93c2bd3dd0, changed_by_loadhook=false, cl_inst_info=..., __the_thread__=0x7f93bc028fa0) at /var/jdk/src/hotspot/share/classfile/classFileParser.cpp:5255
#19 0x00007f93c3b554d1 in KlassFactory::create_from_stream (stream=0x7f93bc036fb0, name=0x7f93c00001f8, loader_data=0x7f93bc121170, cl_info=..., __the_thread__=0x7f93bc028fa0) at /var/jdk/src/hotspot/share/classfile/klassFactory.cpp:202
#20 0x00007f93c34d86e9 in ClassLoader::load_class (name=0x7f93c00001f8, search_append_only=false, __the_thread__=0x7f93bc028fa0) at /var/jdk/src/hotspot/share/classfile/classLoader.cpp:1231
#21 0x00007f93c4000806 in SystemDictionary::load_instance_class_impl (class_name=0x7f93c00001f8, class_loader=..., __the_thread__=0x7f93bc028fa0) at /var/jdk/src/hotspot/share/classfile/systemDictionary.cpp:1289
#22 0x00007f93c4000ba3 in SystemDictionary::load_instance_class (name_hash=2036240099, name=0x7f93c00001f8, class_loader=..., __the_thread__=0x7f93bc028fa0) at /var/jdk/src/hotspot/share/classfile/systemDictionary.cpp:1354
#23 0x00007f93c3ffeca9 in SystemDictionary::resolve_instance_class_or_null (name=0x7f93c00001f8, class_loader=..., protection_domain=..., __the_thread__=0x7f93bc028fa0) at /var/jdk/src/hotspot/share/classfile/systemDictionary.cpp:723
#24 0x00007f93c3ffd82e in SystemDictionary::resolve_instance_class_or_null_helper (class_name=0x7f93c00001f8, class_loader=..., protection_domain=..., __the_thread__=0x7f93bc028fa0) at /var/jdk/src/hotspot/share/classfile/systemDictionary.cpp:294
#25 0x00007f93c3ffd6d4 in SystemDictionary::resolve_or_null (class_name=0x7f93c00001f8, class_loader=..., protection_domain=..., __the_thread__=0x7f93bc028fa0) at /var/jdk/src/hotspot/share/classfile/systemDictionary.cpp:277
#26 0x00007f93c3ffd617 in SystemDictionary::resolve_or_fail (class_name=0x7f93c00001f8, class_loader=..., protection_domain=..., throw_error=true, __the_thread__=0x7f93bc028fa0) at /var/jdk/src/hotspot/share/classfile/systemDictionary.cpp:263
#27 0x00007f93c32b8d98 in SystemDictionary::resolve_or_fail (class_name=0x7f93c00001f8, throw_error=true, __the_thread__=0x7f93bc028fa0) at /var/jdk/src/hotspot/share/classfile/systemDictionary.hpp:100
#28 0x00007f93c40dca98 in vmClasses::resolve (id=vmClassID::Throwable_klass_knum, __the_thread__=0x7f93bc028fa0) at /var/jdk/src/hotspot/share/classfile/vmClasses.cpp:99
#29 0x00007f93c40dcb96 in vmClasses::resolve_until (limit_id=vmClassID::SoftReference_klass_knum, start_id=@0x7f93c2bd48f0: vmClassID::Cloneable_klass_knum, __the_thread__=0x7f93bc028fa0) at /var/jdk/src/hotspot/share/classfile/vmClasses.cpp:108
#30 0x00007f93c40dd59a in vmClasses::resolve_through (last_id=vmClassID::Reference_klass_knum, start_id=@0x7f93c2bd48f0: vmClassID::Cloneable_klass_knum, __the_thread__=0x7f93bc028fa0) at /var/jdk/src/hotspot/share/classfile/vmClasses.hpp:64
#31 0x00007f93c40dce23 in vmClasses::resolve_all (__the_thread__=0x7f93bc028fa0) at /var/jdk/src/hotspot/share/classfile/vmClasses.cpp:168
#32 0x00007f93c4001ab2 in SystemDictionary::initialize (__the_thread__=0x7f93bc028fa0) at /var/jdk/src/hotspot/share/classfile/systemDictionary.cpp:1655
#33 0x00007f93c40812fb in Universe::genesis (__the_thread__=0x7f93bc028fa0) at /var/jdk/src/hotspot/share/memory/universe.cpp:335
#34 0x00007f93c408378f in universe2_init () at /var/jdk/src/hotspot/share/memory/universe.cpp:937
#35 0x00007f93c3863d8a in init_globals () at /var/jdk/src/hotspot/share/runtime/init.cpp:132
#36 0x00007f93c404a1ca in Threads::create_vm (args=0x7f93c2bd4d50, canTryAgain=0x7f93c2bd4c5b) at /var/jdk/src/hotspot/share/runtime/thread.cpp:2843
#37 0x00007f93c396f43b in JNI_CreateJavaVM_inner (vm=0x7f93c2bd4da8, penv=0x7f93c2bd4db0, args=0x7f93c2bd4d50) at /var/jdk/src/hotspot/share/prims/jni.cpp:3613
#38 0x00007f93c396f787 in JNI_CreateJavaVM (vm=0x7f93c2bd4da8, penv=0x7f93c2bd4db0, args=0x7f93c2bd4d50) at /var/jdk/src/hotspot/share/prims/jni.cpp:3701
#39 0x00007f93c50e6a6a in InitializeJVM (pvm=0x7f93c2bd4da8, penv=0x7f93c2bd4db0, ifn=0x7f93c2bd4e00) at /var/jdk/src/java.base/share/native/libjli/java.c:1459
#40 0x00007f93c50e35ec in JavaMain (_args=0x7ffedd44e1a0) at /var/jdk/src/java.base/share/native/libjli/java.c:411
#41 0x00007f93c50ea5ec in ThreadJavaMain (args=0x7ffedd44e1a0) at /var/jdk/src/java.base/unix/native/libjli/java_md.c:651
#42 0x00007f93c4f45b43 in start_thread (arg=<optimized out>) at ./nptl/pthread_create.c:442
#43 0x00007f93c4fd6bb4 in clone () at ../sysdeps/unix/sysv/linux/x86_64/clone.S:100
+ + + +

分配数组大小:

+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
  static int array_size(int length) {
const uint OopsPerHeapWord = HeapWordSize/heapOopSize;
assert(OopsPerHeapWord >= 1 && (HeapWordSize % heapOopSize == 0),
"Else the following (new) computation would be in error");
uint res = ((uint)length + OopsPerHeapWord - 1)/OopsPerHeapWord;
#ifdef ASSERT
// The old code is left in for sanity-checking; it'll
// go away pretty soon. XXX
// Without UseCompressedOops, this is simply:
// oop->length() * HeapWordsPerOop;
// With narrowOops, HeapWordsPerOop is 1/2 or equal 0 as an integer.
// The oop elements are aligned up to wordSize
const uint HeapWordsPerOop = heapOopSize/HeapWordSize;
uint old_res;
if (HeapWordsPerOop > 0) {
old_res = length * HeapWordsPerOop;
} else {
old_res = align_up((uint)length, OopsPerHeapWord)/OopsPerHeapWord;
}
assert(res == old_res, "Inconsistency between old and new.");
#endif // ASSERT
return res;
}
+ + + +
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
Thread 2 "java" hit Breakpoint 5, jni_invoke_static (env=0x7fca48029310, result=0x7fca4c534bf0, receiver=0x0, call_type=JNI_STATIC, method_id=0x7fca48542d50, args=0x7fca4c534c60, __the_thread__=0x7fca48029030) at /var/jdk/src/hotspot/share/prims/jni.cpp:881
881 args->push_arguments_on(&java_args);
(gdb) p method._value->print()
{method}
- this oop: 0x00007fca14411240
- method holder: 'com/Hello'
- constants: 0x00007fca14411020 constant pool [20] {0x00007fca14411020} for 'com/Hello' cache=0x00007fca14411348
- access: 0x9 public static
- name: 'main'
- signature: '([Ljava/lang/String;)V'
- max stack: 2
- max locals: 1
- size of params: 1
- method size: 13
- vtable index: -2
- i2i entry: 0x00007fca3900dc00
- adapters: AHE@0x00007fca4812b8d0: 0xb i2c: 0x00007fca39114d60 c2i: 0x00007fca39114e1a c2iUV: 0x00007fca39114de4 c2iNCI: 0x00007fca39114e57
- compiled entry 0x00007fca39114e1a
- code size: 10
- code start: 0x00007fca14411230
- code end (excl): 0x00007fca1441123a
- checked ex length: 0
- linenumber start: 0x00007fca1441123a
- localvar length: 0
$17 = void
(gdb) bt
#0 jni_invoke_static (env=0x7fca48029310, result=0x7fca4c534bf0, receiver=0x0, call_type=JNI_STATIC, method_id=0x7fca48542d50, args=0x7fca4c534c60, __the_thread__=0x7fca48029030) at /var/jdk/src/hotspot/share/prims/jni.cpp:881
#1 0x00007fca4d2c141c in jni_CallStaticVoidMethod (env=0x7fca48029310, cls=0x7fca4802b368, methodID=0x7fca48542d50) at /var/jdk/src/hotspot/share/prims/jni.cpp:1710
#2 0x00007fca4ea4415e in JavaMain (_args=0x7fff1ad56b60) at /var/jdk/src/java.base/share/native/libjli/java.c:545
#3 0x00007fca4ea4a5ec in ThreadJavaMain (args=0x7fff1ad56b60) at /var/jdk/src/java.base/unix/native/libjli/java_md.c:651
#4 0x00007fca4e8a5b43 in start_thread (arg=<optimized out>) at ./nptl/pthread_create.c:442
#5 0x00007fca4e936bb4 in clone () at ../sysdeps/unix/sysv/linux/x86_64/clone.S:100
+ + + +
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
Thread 2 "java" hit Breakpoint 5, jni_invoke_static (env=0x7f8674029310, result=0x7f86791b0bf0, receiver=0x0, call_type=JNI_STATIC, method_id=0x7f8674652560, args=0x7f86791b0c60, __the_thread__=0x7f8674029030) at /var/jdk/src/hotspot/share/prims/jni.cpp:881
881 args->push_arguments_on(&java_args);
(gdb) p method._value->print()
{method}
- this oop: 0x00007f8644411240
- method holder: 'com/Hello'
- constants: 0x00007f8644411020 constant pool [20] {0x00007f8644411020} for 'com/Hello' cache=0x00007f8644411348
- access: 0x9 public static
- name: 'main'
- signature: '([Ljava/lang/String;)V'
- max stack: 2
- max locals: 1
- size of params: 1
- method size: 13
- vtable index: -2
- i2i entry: 0x00007f866500dc00
- adapters: AHE@0x00007f867412b8d0: 0xb i2c: 0x00007f8665114d60 c2i: 0x00007f8665114e1a c2iUV: 0x00007f8665114de4 c2iNCI: 0x00007f8665114e57
- compiled entry 0x00007f8665114e1a
- code size: 10
- code start: 0x00007f8644411230
- code end (excl): 0x00007f864441123a
- checked ex length: 0
- linenumber start: 0x00007f864441123a
- localvar length: 0
$23 = void
(gdb) enable 1
(gdb) c
Continuing.

Thread 2 "java" hit Breakpoint 1, oopFactory::new_objArray (klass=0x800040f80, length=2019, __the_thread__=0x7f8674029030) at /var/jdk/src/hotspot/share/memory/oopFactory.cpp:118
118 assert(klass->is_klass(), "must be instance class");
(gdb) bt
#0 oopFactory::new_objArray (klass=0x800040f80, length=2019, __the_thread__=0x7f8674029030) at /var/jdk/src/hotspot/share/memory/oopFactory.cpp:118
#1 0x00007f8679e68a5b in InterpreterRuntime::anewarray (current=0x7f8674029030, pool=0x7f8644411020, index=2, size=2019) at /var/jdk/src/hotspot/share/interpreter/interpreterRuntime.cpp:254
#2 0x00007f8665024083 in ?? ()
#3 0x00007f867b4520a0 in TemplateInterpreter::_active_table () from /var/jdk/build/linux-x86_64-server-slowdebug/jdk/lib/server/libjvm.so
#4 0x00007f8665024002 in ?? ()
#5 0x00007f86791b07b0 in ?? ()
#6 0x00007f8644411233 in ?? ()
#7 0x00007f86791b0808 in ?? ()
#8 0x00007f8644411348 in ?? ()
#9 0x0000000000000000 in ?? ()
+ + + +
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
Bottom (innermost) frame selected; you cannot go down.
(gdb) p _do_zero
$35 = true
(gdb) n
413 if (_do_zero) {
(gdb) n
414 mem_clear(mem);
(gdb) n
416 arrayOopDesc::set_length(mem, _length);
(gdb) l
411 // concurrent GC.
412 assert(_length >= 0, "length should be non-negative");
413 if (_do_zero) {
414 mem_clear(mem);
415 }
416 arrayOopDesc::set_length(mem, _length);
417 return finish(mem);
418 }
419
420 oop ClassAllocator::initialize(HeapWord* mem) const {
(gdb) up
#1 0x00007f867a27af4b in MemAllocator::allocate (this=0x7f86791b0650) at /var/jdk/src/hotspot/share/gc/shared/memAllocator.cpp:365
365 obj = initialize(mem);
(gdb) down
#0 ObjArrayAllocator::initialize (this=0x7f86791b0650, mem=0x715e73dd0) at /var/jdk/src/hotspot/share/gc/shared/memAllocator.cpp:416
416 arrayOopDesc::set_length(mem, _length);
(gdb) s
arrayOopDesc::set_length (mem=0x715e73dd0, length=2019) at /var/jdk/src/hotspot/share/oops/arrayOop.hpp:122
122 *length_addr_impl(mem) = length;
(gdb) s
arrayOopDesc::length_addr_impl (obj_ptr=0x715e73dd0) at /var/jdk/src/hotspot/share/oops/arrayOop.hpp:66
66 char* ptr = static_cast<char*>(obj_ptr);
(gdb) l
61 return (int)hs;
62 }
63
64 // Returns the address of the length "field". See length_offset_in_bytes().
65 static int* length_addr_impl(void* obj_ptr) {
66 char* ptr = static_cast<char*>(obj_ptr);
67 return reinterpret_cast<int*>(ptr + length_offset_in_bytes());
68 }
69
70 // Check whether an element of a typeArrayOop with the given type must be
(gdb) n
67 return reinterpret_cast<int*>(ptr + length_offset_in_bytes());
(gdb) s
arrayOopDesc::length_offset_in_bytes () at /var/jdk/src/hotspot/share/oops/arrayOop.hpp:83
83 sizeof(arrayOopDesc);
(gdb) l
78 // The _length field is not declared in C++. It is allocated after the
79 // declared nonstatic fields in arrayOopDesc if not compressed, otherwise
80 // it occupies the second half of the _klass field in oopDesc.
81 static int length_offset_in_bytes() {
82 return UseCompressedClassPointers ? klass_gap_offset_in_bytes() :
83 sizeof(arrayOopDesc);
84 }
85
86 // Returns the offset of the first element.
87 static int base_offset_in_bytes(BasicType type) {
(gdb) p Use
Display all 161 possibilities? (y or n)
(gdb) p UseCompressedClassPointers
$36 = true
(gdb) s
82 return UseCompressedClassPointers ? klass_gap_offset_in_bytes() :
(gdb) s
oopDesc::klass_gap_offset_in_bytes () at /var/jdk/src/hotspot/share/oops/oop.hpp:307
307 assert(has_klass_gap(), "only applicable to compressed klass pointers");
(gdb) l
302
303 // for code generation
304 static int mark_offset_in_bytes() { return offset_of(oopDesc, _mark); }
305 static int klass_offset_in_bytes() { return offset_of(oopDesc, _metadata._klass); }
306 static int klass_gap_offset_in_bytes() {
307 assert(has_klass_gap(), "only applicable to compressed klass pointers");
308 return klass_offset_in_bytes() + sizeof(narrowKlass);
309 }
310
311 // for error reporting
(gdb) n
oopDesc::klass_gap_offset_in_bytes () at /var/jdk/src/hotspot/share/oops/oop.hpp:308
308 return klass_offset_in_bytes() + sizeof(narrowKlass);
(gdb) n
309 }
(gdb) n
arrayOopDesc::length_offset_in_bytes () at /var/jdk/src/hotspot/share/oops/arrayOop.hpp:83
83 sizeof(arrayOopDesc);
(gdb) n
84 }
(gdb) n
arrayOopDesc::length_addr_impl (obj_ptr=0x715e73dd0) at /var/jdk/src/hotspot/share/oops/arrayOop.hpp:68
68 }
(gdb) n
arrayOopDesc::set_length (mem=0x715e73dd0, length=2019) at /var/jdk/src/hotspot/share/oops/arrayOop.hpp:122
122 *length_addr_impl(mem) = length;
(gdb) n
123 }
(gdb) p (int *)mem@20
Only values in memory can be extended with '@'.
(gdb) p *(int *)mem@20
$37 = {-1163019586, -1163019586, -1163019586, 2019, 0 <repeats 16 times>}
+ + + +

byte code 生成入口

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
// jdk/src/hotspot/share/interpreter/templateInterpreter.cpp
void DispatchTable::set_entry(int i, EntryPoint& entry) {
assert(0 <= i && i < length, "index out of bounds");
assert(number_of_states == 10, "check the code below");
_table[btos][i] = entry.entry(btos);
_table[ztos][i] = entry.entry(ztos);
_table[ctos][i] = entry.entry(ctos);
_table[stos][i] = entry.entry(stos);
_table[atos][i] = entry.entry(atos);
_table[itos][i] = entry.entry(itos);
_table[ltos][i] = entry.entry(ltos);
_table[ftos][i] = entry.entry(ftos);
_table[dtos][i] = entry.entry(dtos);
_table[vtos][i] = entry.entry(vtos);
}
+ +
1
2
3
4
5
//src/hotspot/share/interpreter/bytecodes.hpp
_new = 187, // 0xbb
_newarray = 188, // 0xbc
_anewarray = 189, // 0xbd
_arraylength = 190, // 0xbe
+

堆栈:

+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
Thread 2 "java" hit Breakpoint 13, 0x00007fffe1011b13 in ?? ()
(gdb) x/20i $pc
=> 0x7fffe1011b13: movzwl 0x1(%r13),%eax
0x7fffe1011b18: bswap %eax
0x7fffe1011b1a: sar $0x10,%eax
0x7fffe1011b1d: movzbl 0x3(%r13),%ebx
0x7fffe1011b22: add $0x3,%r13
0x7fffe1011b26: movabs $0x7ffff7bca0a0,%r10
0x7fffe1011b30: jmp *(%r10,%rbx,8)
0x7fffe1011b34: nop
0x7fffe1011b35: nop
0x7fffe1011b36: nop
0x7fffe1011b37: nop
0x7fffe1011b38: int3
0x7fffe1011b39: int3
0x7fffe1011b3a: int3
0x7fffe1011b3b: int3
0x7fffe1011b3c: int3
0x7fffe1011b3d: int3
0x7fffe1011b3e: int3
0x7fffe1011b3f: int3
0x7fffe1011b40: and %al,(%rax,%rax,1)

+ + +
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
0x00007fffe1011b1d in ?? ()
(gdb) info registers
rax 0x2805 10245
rbx 0x11 17
rcx 0x2 2
rdx 0x8 8
rsi 0x555555581230 93824992416304
rdi 0x7ffff0028f70 140737220087664
rbp 0x7ffff59fe7f8 0x7ffff59fe7f8
rsp 0x7ffff59fe7b0 0x7ffff59fe7b0
r8 0x8 8
r9 0x0 0
r10 0x7ffff7bcc8a0 140737349732512
r11 0x216 534
r12 0x0 0
r13 0x7fffb4411230 140736217551408
r14 0x7ffff59fe808 140737314285576
r15 0x7ffff0028f70 140737220087664
rip 0x7fffe1011b1d 0x7fffe1011b1d
eflags 0x216 [ PF AF IF ]
cs 0x33 51
ss 0x2b 43
ds 0x0 0
es 0x0 0
fs 0x0 0
gs 0x0 0

+ +

anewarray 汇编代码:

+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
(gdb) x/20i $pc
=> 0x7fffe102400a: lea 0x8(%rsp),%rax
0x7fffe102400f: mov %r13,-0x40(%rbp)
0x7fffe1024013: cmpq $0x0,-0x10(%rbp)
0x7fffe102401b: je 0x7fffe1024035
0x7fffe1024021: movabs $0x7ffff71becc8,%rdi
0x7fffe102402b: and $0xfffffffffffffff0,%rsp
0x7fffe102402f: call 0x7ffff694f3c0 <_ZN14MacroAssembler7debug64EPclPl>
0x7fffe1024034: hlt
0x7fffe1024035: push %r10
0x7fffe1024037: cmp 0x16ae2ec2(%rip),%r12 # 0x7ffff7b06f00 <_ZN14CompressedOops11_narrow_oopE>
0x7fffe102403e: je 0x7fffe1024058
0x7fffe1024044: movabs $0x7ffff7311c28,%rdi
0x7fffe102404e: and $0xfffffffffffffff0,%rsp
0x7fffe1024052: call 0x7ffff694f3c0 <_ZN14MacroAssembler7debug64EPclPl>
0x7fffe1024057: hlt
0x7fffe1024058: pop %r10
0x7fffe102405a: mov %r15,%rdi
0x7fffe102405d: vzeroupper
0x7fffe1024060: mov %rbp,0x2d0(%r15)
0x7fffe1024067: mov %rax,0x2c0(%r15)
(gdb) x/200i $pc
=> 0x7fffe102400a: lea 0x8(%rsp),%rax
0x7fffe102400f: mov %r13,-0x40(%rbp)
0x7fffe1024013: cmpq $0x0,-0x10(%rbp)
0x7fffe102401b: je 0x7fffe1024035
0x7fffe1024021: movabs $0x7ffff71becc8,%rdi
0x7fffe102402b: and $0xfffffffffffffff0,%rsp
0x7fffe102402f: call 0x7ffff694f3c0 <_ZN14MacroAssembler7debug64EPclPl>
0x7fffe1024034: hlt
0x7fffe1024035: push %r10
0x7fffe1024037: cmp 0x16ae2ec2(%rip),%r12 # 0x7ffff7b06f00 <_ZN14CompressedOops11_narrow_oopE>
0x7fffe102403e: je 0x7fffe1024058
0x7fffe1024044: movabs $0x7ffff7311c28,%rdi
0x7fffe102404e: and $0xfffffffffffffff0,%rsp
0x7fffe1024052: call 0x7ffff694f3c0 <_ZN14MacroAssembler7debug64EPclPl>
0x7fffe1024057: hlt
0x7fffe1024058: pop %r10
0x7fffe102405a: mov %r15,%rdi
0x7fffe102405d: vzeroupper
0x7fffe1024060: mov %rbp,0x2d0(%r15)
0x7fffe1024067: mov %rax,0x2c0(%r15)
0x7fffe102406e: test $0xf,%esp
0x7fffe1024074: je 0x7fffe102408c
0x7fffe102407a: sub $0x8,%rsp
0x7fffe102407e: call 0x7ffff65cf968 <_ZN18InterpreterRuntime9anewarrayEP10JavaThreadP12ConstantPoolii>
0x7fffe1024083: add $0x8,%rsp
0x7fffe1024087: jmp 0x7fffe1024091
0x7fffe102408c: call 0x7ffff65cf968 <_ZN18InterpreterRuntime9anewarrayEP10JavaThreadP12ConstantPoolii>
0x7fffe1024091: push %rax
0x7fffe1024092: push %rdi
0x7fffe1024093: push %rsi
0x7fffe1024094: push %rdx
--Type <RET> for more, q to quit, c to continue without paging--
0x7fffe1024095: push %rcx
0x7fffe1024096: push %r8
0x7fffe1024098: push %r9
0x7fffe102409a: push %r10
0x7fffe102409c: push %r11
0x7fffe102409e: test $0xf,%esp
0x7fffe10240a4: je 0x7fffe10240bc
0x7fffe10240aa: sub $0x8,%rsp
0x7fffe10240ae: call 0x7ffff5d1c04e <_ZN6Thread7currentEv>
0x7fffe10240b3: add $0x8,%rsp
0x7fffe10240b7: jmp 0x7fffe10240c1
0x7fffe10240bc: call 0x7ffff5d1c04e <_ZN6Thread7currentEv>
0x7fffe10240c1: pop %r11
0x7fffe10240c3: pop %r10
0x7fffe10240c5: pop %r9
0x7fffe10240c7: pop %r8
0x7fffe10240c9: pop %rcx
0x7fffe10240ca: pop %rdx
0x7fffe10240cb: pop %rsi
0x7fffe10240cc: pop %rdi
0x7fffe10240cd: cmp %rax,%r15
0x7fffe10240d0: je 0x7fffe10240ea
0x7fffe10240d6: movabs $0x7ffff7311da0,%rdi
0x7fffe10240e0: and $0xfffffffffffffff0,%rsp
0x7fffe10240e4: call 0x7ffff694f3c0 <_ZN14MacroAssembler7debug64EPclPl>
0x7fffe10240e9: hlt
0x7fffe10240ea: pop %rax
0x7fffe10240eb: movq $0x0,0x2c0(%r15)
0x7fffe10240f6: movq $0x0,0x2d0(%r15)
0x7fffe1024101: movq $0x0,0x2c8(%r15)
0x7fffe102410c: vzeroupper
--Type <RET> for more, q to quit, c to continue without paging--
0x7fffe102410f: cmpq $0x0,0x8(%r15)
0x7fffe1024117: je 0x7fffe1024122
0x7fffe102411d: jmp 0x7fffe1000c20
0x7fffe1024122: mov 0x318(%r15),%rax
0x7fffe1024129: movq $0x0,0x318(%r15)
0x7fffe1024134: mov -0x40(%rbp),%r13
0x7fffe1024138: mov -0x38(%rbp),%r14
0x7fffe102413c: ret
0x7fffe102413d: movzbl 0x3(%r13),%ebx
0x7fffe1024142: add $0x3,%r13
0x7fffe1024146: movabs $0x7ffff7bcc0a0,%r10
0x7fffe1024150: jmp *(%r10,%rbx,8)
0x7fffe1024154: nop
0x7fffe1024155: nop


+ +

内存分配

用的是jdk11以上版本,我这个jdk是用g1来做gc的,所以看看g1 是怎么分配的

+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
(gdb) bt
#0 HeapRegion::par_allocate_impl (this=0x7ffff00e11b0, min_word_size=256, desired_word_size=63020, actual_size=0x7ffff59fcba8)
at /home/dai/jdk/src/hotspot/share/gc/g1/heapRegion.inline.hpp:63
#1 0x00007ffff640bdcc in HeapRegion::par_allocate (this=0x7ffff00e11b0, min_word_size=256, desired_word_size=63020, actual_word_size=0x7ffff59fcba8)
at /home/dai/jdk/src/hotspot/share/gc/g1/heapRegion.inline.hpp:225
#2 0x00007ffff640bfcb in G1AllocRegion::par_allocate (this=0x7ffff0052e10, alloc_region=0x7ffff00e11b0, min_word_size=256, desired_word_size=63020,
actual_word_size=0x7ffff59fcba8) at /home/dai/jdk/src/hotspot/share/gc/g1/g1AllocRegion.inline.hpp:63
#3 0x00007ffff640c0c6 in G1AllocRegion::attempt_allocation (this=0x7ffff0052e10, min_word_size=256, desired_word_size=63020, actual_word_size=0x7ffff59fcba8)
at /home/dai/jdk/src/hotspot/share/gc/g1/g1AllocRegion.inline.hpp:77
#4 0x00007ffff6447142 in G1Allocator::attempt_allocation (this=0x7ffff0052d50, min_word_size=256, desired_word_size=63020, actual_word_size=0x7ffff59fcba8)
at /home/dai/jdk/src/hotspot/share/gc/g1/g1Allocator.inline.hpp:62
#5 0x00007ffff6447b1d in G1CollectedHeap::attempt_allocation (this=0x7ffff0048bf0, min_word_size=256, desired_word_size=63020,
actual_word_size=0x7ffff59fcba8) at /home/dai/jdk/src/hotspot/share/gc/g1/g1CollectedHeap.cpp:709
#6 0x00007ffff64385ea in G1CollectedHeap::allocate_new_tlab (this=0x7ffff0048bf0, min_size=256, requested_size=63020, actual_size=0x7ffff59fcba8)
at /home/dai/jdk/src/hotspot/share/gc/g1/g1CollectedHeap.cpp:359
#7 0x00007ffff69e1cf6 in MemAllocator::allocate_inside_tlab_slow (this=0x7ffff59fcc00, allocation=...)
at /home/dai/jdk/src/hotspot/share/gc/shared/memAllocator.cpp:318
#8 0x00007ffff69e1bc2 in MemAllocator::allocate_inside_tlab (this=0x7ffff59fcc00, allocation=...)
at /home/dai/jdk/src/hotspot/share/gc/shared/memAllocator.cpp:278
#9 0x00007ffff69e1eb9 in MemAllocator::mem_allocate (this=0x7ffff59fcc00, allocation=...) at /home/dai/jdk/src/hotspot/share/gc/shared/memAllocator.cpp:350
#10 0x00007ffff69e1f22 in MemAllocator::allocate (this=0x7ffff59fcc00) at /home/dai/jdk/src/hotspot/share/gc/shared/memAllocator.cpp:363
#11 0x00007ffff6260d84 in CollectedHeap::array_allocate (this=0x7ffff0048bf0, klass=0x8000407c0, size=106, length=825, do_zero=true,
__the_thread__=0x7ffff0028f70) at /home/dai/jdk/src/hotspot/share/gc/shared/collectedHeap.inline.hpp:41
#12 0x00007ffff6db9bf2 in TypeArrayKlass::allocate_common (this=0x8000407c0, length=825, do_zero=true, __the_thread__=0x7ffff0028f70)
at /home/dai/jdk/src/hotspot/share/oops/typeArrayKlass.cpp:93
#13 0x00007ffff62f7428 in TypeArrayKlass::allocate (this=0x8000407c0, length=825, __the_thread__=0x7ffff0028f70)
at /home/dai/jdk/src/hotspot/share/oops/typeArrayKlass.hpp:68
#14 0x00007ffff6ab4757 in oopFactory::new_typeArray (type=T_BYTE, length=825, __the_thread__=0x7ffff0028f70)
at /home/dai/jdk/src/hotspot/share/memory/oopFactory.cpp:93
#15 0x00007ffff65cf8e5 in InterpreterRuntime::newarray (current=0x7ffff0028f70, type=T_BYTE, size=825)
at /home/dai/jdk/src/hotspot/share/interpreter/interpreterRuntime.cpp:247
#16 0x00007fffe1023eb2 in ?? ()
#17 0x00007ffff7bca0a0 in TemplateInterpreter::_active_table () from /home/dai/jdk/build/linux-x86_64-server-slowdebug/jdk/lib/server/libjvm.so
#18 0x00007fffe1023e31 in ?? ()
#19 0x000000062a47ab38 in ?? ()
#20 0x00007ffff59fcd88 in ?? ()
#21 0x00007fffb43a23e6 in ?? ()
#22 0x00007ffff59fcde8 in ?? ()
#23 0x00007fffb43a3520 in ?? ()
#24 0x0000000000000000 in ?? ()

+ + + +

array_copy 反查

java里面 System.arraycopy 函数就是copy 整个数组到新的数组里面,复制方式是浅拷贝.
最后调用的入口是下面的c++代码.所以可以通过这个函数就可以了解数组的内存布局

+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
void ObjArrayKlass::copy_array(arrayOop s, int src_pos, arrayOop d,
int dst_pos, int length, TRAPS) {


...
size_t src_offset = (size_t) objArrayOopDesc::obj_at_offset<oop>(src_pos); <----------------------------- 开始地址
size_t dst_offset = (size_t) objArrayOopDesc::obj_at_offset<oop>(dst_pos); <---------------------------- 结束地址
assert(arrayOopDesc::obj_offset_to_raw<oop>(s, src_offset, NULL) ==
objArrayOop(s)->obj_at_addr<oop>(src_pos), "sanity");
assert(arrayOopDesc::obj_offset_to_raw<oop>(d, dst_offset, NULL) ==
objArrayOop(d)->obj_at_addr<oop>(dst_pos), "sanity");
do_copy(s, src_offset, d, dst_offset, length, CHECK);
}
}

+ + + +

所以java的对象数组的内存布局就像下面一样 , 这里的oop 是一个指针

+

64位下面是8字节

+

相关阅读

+ +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/2023/01/12/clickhouse-400-error/index.html b/2023/01/12/clickhouse-400-error/index.html new file mode 100644 index 0000000000..ffcdf213dd --- /dev/null +++ b/2023/01/12/clickhouse-400-error/index.html @@ -0,0 +1,456 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + clickhouse 400 error | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ clickhouse 400 error +

+ + +
+ + + + +
+ + +

背景

线上环境请求clickhouse 400 bad request

+

问题排查

线上clickhouse导出的是http端口

+

线上java client抛出400异常:

+
1
2
3
4
5
6
7
8
9
10
11
12
	ru.yandex.clickhouse.except.ClickHouseException: ClickHouse exception, code: 400, host: internal-LB-clickhouse-write-intranet-429192233.us-west-2.elb.amazonaws.com, port: 8123; HTTP/1.1 400 Bad Request
at ru.yandex.clickhouse.except.ClickHouseExceptionSpecifier.specify(ClickHouseExceptionSpecifier.java:60)
at ru.yandex.clickhouse.except.ClickHouseExceptionSpecifier.specify(ClickHouseExceptionSpecifier.java:26)
at ru.yandex.clickhouse.ClickHouseStatementImpl.checkForErrorAndThrow(ClickHouseStatementImpl.java:1080)
at ru.yandex.clickhouse.ClickHouseStatementImpl.getInputStream(ClickHouseStatementImpl.java:770)
at ru.yandex.clickhouse.ClickHouseStatementImpl.getLastInputStream(ClickHouseStatementImpl.java:693)
Caused by: java.lang.IllegalStateException: HTTP/1.1 400 Bad Request
at ru.yandex.clickhouse.ClickHouseStatementImpl.checkForErrorAndThrow(ClickHouseStatementImpl.java:1080)
at ru.yandex.clickhouse.ClickHouseStatementImpl.getInputStream(ClickHouseStatementImpl.java:770)
at ru.yandex.clickhouse.ClickHouseStatementImpl.getLastInputStream(ClickHouseStatementImpl.java:693)
at ru.yandex.clickhouse.ClickHouseStatementImpl.executeQuery(ClickHouseStatementImpl.java:341)
at ru.yandex.clickhouse.ClickHouseStatementImpl.executeQuery(ClickHouseStatementImpl.java:326)
+ +

打开clickhouse日志,发现有以下报错:

+
1
2023.01.12 09:24:26.187479 [ 205027 ] {65179d49-ea6e-4a15-b13b-16d8378cfe29} <Error> executeQuery: Code: 62, e.displayText() = DB::Exception: Syntax error: failed at position 72 ('Client'): Client as DEFAULT_VALUE, Application name as DESCRIPTION union all select CustomHttpHeaders as NAME, toInt32(0) as MAX_LEN,  as DEFAULT_VALUE, Custom HTTP heade. Expected one of: UNION, LIMIT, WHERE, WINDOW, end of query, HAVING, GROUP BY, INTO OUTFILE, OFFSET, PREWHERE, Comma, ORDER BY, SETTINGS, FROM, FORMAT, WITH, token (version 21.3.3.14 (official build)) (from 172.31.42.57:23878) (in query: select ApplicationName as NAME, toInt32(0) as MAX_LEN, ClickHouse Java Client as DEFAULT_VALUE, Application name as DESCRIPTION union all select CustomHttpHeaders as NAME, toInt32(0) as MAX_LEN, as DEFAULT_VALUE, Custom HTTP headers as DESCRIPTION union all select CustomHttpParameters as NAME, toInt32(0) as MAX_LEN, as DEFAULT_VALUE, Customer HTTP query parameters as DESCRIPTION), Stack trace (when copying this message, always include the lines below):
+ + +

最后在github上找到相关issue:
https://github.com/dbeaver/dbeaver/issues/16885
https://github.com/ClickHouse/clickhouse-jdbc/pull/930

+

发现是clickhouse的jdbc 包的sql错了

+

ClickHouse Java Client as DEFAULT_VALUE 应该改为 'ClickHouse Java Client' as DEFAULT_VALUE

+

我们用的jdbc包是0.3.2

+
1
2
3
4
5
<dependency>
<groupId>ru.yandex.clickhouse</groupId>
<artifactId>clickhouse-jdbc</artifactId>
<version>0.3.2</version>
</dependency>
+ +

将clickhouse jdbc 包改成0.3.2-patch-1 即可.
其实可以用更加新的包,自己选择吧.

+ +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git "a/2023/01/15/\344\275\277\347\224\250k8s-\346\220\255\345\273\272redis-\351\233\206\347\276\244/index.html" "b/2023/01/15/\344\275\277\347\224\250k8s-\346\220\255\345\273\272redis-\351\233\206\347\276\244/index.html" new file mode 100644 index 0000000000..5c2ecbc3f3 --- /dev/null +++ "b/2023/01/15/\344\275\277\347\224\250k8s-\346\220\255\345\273\272redis-\351\233\206\347\276\244/index.html" @@ -0,0 +1,463 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 使用k8s 搭建redis 集群 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ 使用k8s 搭建redis 集群 +

+ + +
+ + + + +
+ + +

背景

使用k8s 搭建redis集群

+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
root@redis-app-0:/data# redis-cli 
127.0.0.1:6379> info cluster
# Cluster
cluster_enabled:1
127.0.0.1:6379> cluster meet 10.42.0.35 6379
OK
127.0.0.1:6379> cluster meet 10.42.0.36 6379
OK
127.0.0.1:6379> cluster meet 10.42.0.37 6379
OK
127.0.0.1:6379> cluster meet 10.42.0.38 6379
OK
127.0.0.1:6379> cluster meet 10.42.0.39 6379
OK
+ + +

查看节点

+
1
2
3
4
5
6
7
8
9
27.0.0.1:6379> cluster nodes
f8d5dd6aef17c622f541ade32a95430421606f6c 10.42.0.39:6379 master - 0 1673791234512 0 connected
76af8c3c32cf535a3733ce75db2c3c6719c644fc 10.42.0.38:6379 master - 0 1673791234512 4 connected
7f803ec0f21e4382bb773285fd40286069b26075 10.42.0.36:6379 master - 0 1673791235012 2 connected
c48e86d680b74df9c70cb7369201fb2cbd8650be 10.42.0.34:6379 myself,master - 0 0 5 connected
3ab965513f345444689cdeb7358c51263772f454 10.42.0.35:6379 master - 0 1673791233512 1 connected
8e256e1614cf9f330404693b6f18785da5794fbc 10.42.0.37:6379 master - 0 1673791234011 3 connected
127.0.0.1:6379>

+ +

分配槽位:

+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
10.42.0.39:6379> cluster nodes
f8d5dd6aef17c622f541ade32a95430421606f6c 10.42.0.39:6379 myself,slave 76af8c3c32cf535a3733ce75db2c3c6719c644fc 0 0 0 connected
7f803ec0f21e4382bb773285fd40286069b26075 10.42.0.36:6379 master - 0 1673791666954 2 connected
8e256e1614cf9f330404693b6f18785da5794fbc 10.42.0.37:6379 slave 7f803ec0f21e4382bb773285fd40286069b26075 0 1673791668456 3 connected
76af8c3c32cf535a3733ce75db2c3c6719c644fc 10.42.0.38:6379 master - 0 1673791667955 4 connected
3ab965513f345444689cdeb7358c51263772f454 10.42.0.35:6379 slave c48e86d680b74df9c70cb7369201fb2cbd8650be 0 1673791668955 5 connected
c48e86d680b74df9c70cb7369201fb2cbd8650be 10.42.0.34:6379 master - 0 1673791666954 5 connected
10.42.0.39:6379> redis-cli -h 10.42.0.34 -p 6379 cluster addslots {0..5461}
(error) ERR unknown command 'redis-cli'
10.42.0.39:6379> exit
root@redis-app-0:/data# redis-cli -h 10.42.0.34 -p 6379 cluster addslots {0..5461}
OK
root@redis-app-0:/data# redis-cli -h 10.42.0.36 -p 6379 cluster addslots {5462..10922}
OK
root@redis-app-0:/data# redis-cli -h 10.42.0.38 -p 6379 cluster addslots {10923..16383}

+ +

搭建结果:

+
1
2
3
4
5
6
7
8
9
10
11
12
13
127.0.0.1:6379> cluster info
cluster_state:ok
cluster_slots_assigned:16384
cluster_slots_ok:16384
cluster_slots_pfail:0
cluster_slots_fail:0
cluster_known_nodes:6
cluster_size:3
cluster_current_epoch:5
cluster_my_epoch:5
cluster_stats_messages_sent:3134
cluster_stats_messages_received:3134

+ +

相关阅读

+ +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git "a/2023/01/30/Garbage-First-Garbage-Collection-\347\256\200\345\215\225\346\246\202\345\206\265/index.html" "b/2023/01/30/Garbage-First-Garbage-Collection-\347\256\200\345\215\225\346\246\202\345\206\265/index.html" new file mode 100644 index 0000000000..c955e6d0e3 --- /dev/null +++ "b/2023/01/30/Garbage-First-Garbage-Collection-\347\256\200\345\215\225\346\246\202\345\206\265/index.html" @@ -0,0 +1,465 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Garbage-First Garbage Collection 简单概况 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ Garbage-First Garbage Collection 简单概况 +

+ + +
+ + + + +
+ + +

Garbage-First Garbage Collection

Garbage-First简称g1算法,是java目前可选的一个gc算法。

+

目标: 在一定的软实时性条件下,保证整体的吞吐

+

算法构成:

+
    +
  • 堆等大小: 整个内存堆被划分为相同大小的块。
  • +
+

The Garbage-First collector achieves these goals via sev-
eral techniques. The heap is partitioned into a set of equal-
sized heap regions, much like the train cars of the Mature-
Object Space collector of Hudson and Moss [22]. However,
whereas the remembered sets of the Mature-Object Space
collector are unidirectional, recording pointers from older
regions to younger but not vice versa, Garbage-First remem-
bered sets record pointers from all regions (with some excep-
tions, described in sections 2.4 and 4.6). Recording all ref-
erences allows an arbitrary set of heap regions to be chosen
for collection. A concurrent thread processes log records cre-
ated by special mutator write barriers to keep remembered
sets up-to-date, allowing shorter collections.

+

源码分析

    +
  • Young gc
  • +
+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
void G1YoungCollector::collect() {
// Do timing/tracing/statistics/pre- and post-logging/verification work not
// directly related to the collection. They should not be accounted for in
// collection work timing.

// The G1YoungGCTraceTime message depends on collector state, so must come after
// determining collector state.
G1YoungGCTraceTime tm(this, _gc_cause);

// JFR
G1YoungGCJFRTracerMark jtm(gc_timer_stw(), gc_tracer_stw(), _gc_cause);
// JStat/MXBeans
G1MonitoringScope ms(monitoring_support(),
false /* full_gc */,
collector_state()->in_mixed_phase() /* all_memory_pools_affected */);
// Create the heap printer before internal pause timing to have
// heap information printed as last part of detailed GC log.
G1HeapPrinterMark hpm(_g1h);
// Young GC internal pause timing
G1YoungGCNotifyPauseMark npm(this);

// Verification may use the workers, so they must be set up before.
// Individual parallel phases may override this.
set_young_collection_default_active_worker_threads();

// Wait for root region scan here to make sure that it is done before any
// use of the STW workers to maximize cpu use (i.e. all cores are available
// just to do that).
wait_for_root_region_scanning();

G1YoungGCVerifierMark vm(this);
{
// Actual collection work starts and is executed (only) in this scope.

// Young GC internal collection timing. The elapsed time recorded in the
// policy for the collection deliberately elides verification (and some
// other trivial setup above).
policy()->record_young_collection_start();

calculate_collection_set(jtm.evacuation_info(), _target_pause_time_ms);

G1RedirtyCardsQueueSet rdcqs(G1BarrierSet::dirty_card_queue_set().allocator());
G1PreservedMarksSet preserved_marks_set(workers()->active_workers());
G1ParScanThreadStateSet per_thread_states(_g1h,
&rdcqs,
&preserved_marks_set,
workers()->active_workers(),
collection_set()->young_region_length(),
collection_set()->optional_region_length(),
&_evac_failure_regions);

pre_evacuate_collection_set(jtm.evacuation_info(), &per_thread_states);

bool may_do_optional_evacuation = collection_set()->optional_region_length() != 0;
// Actually do the work...
evacuate_initial_collection_set(&per_thread_states, may_do_optional_evacuation);

if (may_do_optional_evacuation) {
evacuate_optional_collection_set(&per_thread_states);
}
post_evacuate_collection_set(jtm.evacuation_info(), &per_thread_states);

// Refine the type of a concurrent mark operation now that we did the
// evacuation, eventually aborting it.
_concurrent_operation_is_full_mark = policy()->concurrent_operation_is_full_mark("Revise IHOP");

// Need to report the collection pause now since record_collection_pause_end()
// modifies it to the next state.
jtm.report_pause_type(collector_state()->young_gc_pause_type(_concurrent_operation_is_full_mark));

policy()->record_young_collection_end(_concurrent_operation_is_full_mark, evacuation_failed());
}
TASKQUEUE_STATS_ONLY(print_taskqueue_stats());
TASKQUEUE_STATS_ONLY(reset_taskqueue_stats());
}
+ +

真正复制的代码

+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
evacuate_live_objects(


MAYBE_INLINE_EVACUATION
void G1ParScanThreadState::dispatch_task(ScannerTask task) {
verify_task(task);
if (task.is_narrow_oop_ptr()) {
do_oop_evac(task.to_narrow_oop_ptr());
} else if (task.is_oop_ptr()) { //oop 复制
do_oop_evac(task.to_oop_ptr());
} else {
do_partial_array(task.to_partial_array_task());
}
}
+ + +

堆栈:

+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
Thread 23 "GC Thread#4" hit Breakpoint 1, G1ParScanThreadState::do_copy_to_survivor_space (this=0x7fff7c000d90, region_attr=..., old=0x716809d28, old_mark=...) at /home/dai/jdk/src/hotspot/share/gc/g1/g1ParScanThreadState.cpp:443
443 assert(region_attr.is_in_cset(),
(gdb) bt
#0 G1ParScanThreadState::do_copy_to_survivor_space (this=0x7fff7c000d90, region_attr=..., old=0x716809d28, old_mark=...) at /home/dai/jdk/src/hotspot/share/gc/g1/g1ParScanThreadState.cpp:443
#1 0x00007ffff64ab3f6 in G1ParScanThreadState::copy_to_survivor_space (this=0x7fff7c000d90, region_attr=..., old=0x716809d28, old_mark=...)
at /home/dai/jdk/src/hotspot/share/gc/g1/g1ParScanThreadState.cpp:555
#2 0x00007ffff64de15e in G1ParCopyClosure<(G1Barrier)0, false>::do_oop_work<oopDesc*> (this=0x7fff7c001478, p=0x7ffff02e1cc8) at /home/dai/jdk/src/hotspot/share/gc/g1/g1OopClosures.inline.hpp:240
#3 0x00007ffff64dcbc3 in G1ParCopyClosure<(G1Barrier)0, false>::do_oop (this=0x7fff7c001478, p=0x7ffff02e1cc8) at /home/dai/jdk/src/hotspot/share/gc/g1/g1OopClosures.hpp:167
#4 0x00007ffff6546dd8 in chunk_oops_do (f=0x7fff7c001478, chunk=0x7ffff02e1cb0, chunk_top=0x7ffff02e1cd0 "\350\034\200\026\a") at /home/dai/jdk/src/hotspot/share/runtime/handles.cpp:100
#5 0x00007ffff6546e23 in HandleArea::oops_do (this=0x7ffff02e1c30, f=0x7fff7c001478) at /home/dai/jdk/src/hotspot/share/runtime/handles.cpp:108
#6 0x00007ffff6d85dd4 in Thread::oops_do_no_frames (this=0x7ffff02e1160, f=0x7fff7c001478, cf=0x7fff7c001520) at /home/dai/jdk/src/hotspot/share/runtime/thread.cpp:550
#7 0x00007ffff6d8a513 in JavaThread::oops_do_no_frames (this=0x7ffff02e1160, f=0x7fff7c001478, cf=0x7fff7c001520) at /home/dai/jdk/src/hotspot/share/runtime/thread.cpp:1968
#8 0x00007ffff6d85e28 in Thread::oops_do (this=0x7ffff02e1160, f=0x7fff7c001478, cf=0x7fff7c001520) at /home/dai/jdk/src/hotspot/share/runtime/thread.cpp:580
#9 0x00007ffff6d91359 in ParallelOopsDoThreadClosure::do_thread (this=0x7fff87dfaa00, t=0x7ffff02e1160) at /home/dai/jdk/src/hotspot/share/runtime/thread.cpp:3620
#10 0x00007ffff6d8c40b in Threads::possibly_parallel_threads_do (is_par=true, tc=0x7fff87dfaa00) at /home/dai/jdk/src/hotspot/share/runtime/thread.cpp:2545
#11 0x00007ffff6d8eac8 in Threads::possibly_parallel_oops_do (is_par=true, f=0x7fff7c001478, cf=0x7fff7c001520) at /home/dai/jdk/src/hotspot/share/runtime/thread.cpp:3626
#12 0x00007ffff64dea53 in G1RootProcessor::process_java_roots (this=0x7fffc9723df0, closures=0x7fff7c001470, phase_times=0x7fffb8001380, worker_id=1)
at /home/dai/jdk/src/hotspot/share/gc/g1/g1RootProcessor.cpp:183
#13 0x00007ffff64de78e in G1RootProcessor::evacuate_roots (this=0x7fffc9723df0, pss=0x7fff7c000d90, worker_id=1) at /home/dai/jdk/src/hotspot/share/gc/g1/g1RootProcessor.cpp:60
#14 0x00007ffff64f06b8 in G1EvacuateRegionsTask::scan_roots (this=0x7fffc9723f50, pss=0x7fff7c000d90, worker_id=1) at /home/dai/jdk/src/hotspot/share/gc/g1/g1YoungCollector.cpp:706
#15 0x00007ffff64f0632 in G1EvacuateRegionsBaseTask::work (this=0x7fffc9723f50, worker_id=1) at /home/dai/jdk/src/hotspot/share/gc/g1/g1YoungCollector.cpp:693
#16 0x00007ffff6e8bb7c in WorkerTaskDispatcher::worker_run_task (this=0x7ffff00a4c88) at /home/dai/jdk/src/hotspot/share/gc/shared/workerThread.cpp:67
#17 0x00007ffff6e8c074 in WorkerThread::run (this=0x7fffb800df30) at /home/dai/jdk/src/hotspot/share/gc/shared/workerThread.cpp:159
#18 0x00007ffff6d8557f in Thread::call_run (this=0x7fffb800df30) at /home/dai/jdk/src/hotspot/share/runtime/thread.cpp:358
#19 0x00007ffff6acc1e7 in thread_native_entry (thread=0x7fffb800df30) at /home/dai/jdk/src/hotspot/os/linux/os_linux.cpp:705
#20 0x00007ffff7c94ac3 in start_thread (arg=<optimized out>) at ./nptl/pthread_create.c:442
#21 0x00007ffff7d26a40 in clone3 () at ../sysdeps/unix/sysv/linux/x86_64/clone3.S:81

+ +

内存复制:

+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
static void pd_disjoint_words(const HeapWord* from, HeapWord* to, size_t count) {
#ifdef AMD64
switch (count) {
case 8: to[7] = from[7];
case 7: to[6] = from[6];
case 6: to[5] = from[5];
case 5: to[4] = from[4];
case 4: to[3] = from[3];
case 3: to[2] = from[2];
case 2: to[1] = from[1];
case 1: to[0] = from[0];
case 0: break;
default:
(void)memcpy(to, from, count * HeapWordSize);
break;
}
#else
// Includes a zero-count check.
intx temp;
__asm__ volatile(" testl %6,%6 ;"
" jz 3f ;"
" cmpl $32,%6 ;"
" ja 2f ;"
" subl %4,%1 ;"
"1: movl (%4),%3 ;"
" movl %7,(%5,%4,1);"
" addl $4,%0 ;"
" subl $1,%2 ;"
" jnz 1b ;"
" jmp 3f ;"
"2: rep; smovl ;"
"3: nop "
: "=S" (from), "=D" (to), "=c" (count), "=r" (temp)
: "0" (from), "1" (to), "2" (count), "3" (temp)
: "memory", "cc");
#endif // AMD64
}
+ +

相关阅读

+ +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/2023/02/04/Double-Checked-Locking-is-Broken/index.html b/2023/02/04/Double-Checked-Locking-is-Broken/index.html new file mode 100644 index 0000000000..2548d92247 --- /dev/null +++ b/2023/02/04/Double-Checked-Locking-is-Broken/index.html @@ -0,0 +1,447 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Double-Checked Locking is Broken | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ Double-Checked Locking is Broken +

+ + +
+ + + + +
+ + +

背景

double checked licking 在我们构造一个单例的时候会用到。

+

用法

对于一个单例,我们需要在不同线程中共享这个变量,所以

+

相关阅读

+ +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/2023/02/13/jstak/index.html b/2023/02/13/jstak/index.html new file mode 100644 index 0000000000..9e88a9b1cf --- /dev/null +++ b/2023/02/13/jstak/index.html @@ -0,0 +1,446 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 使用jstack | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ 使用jstack +

+ + +
+ + + + +
+ + +

背景

排查线上问题 ,定位故障线程

+

使用top

使用top -p PID -H
可以获取对应线程下面的线程以及负载

+

使用thread

https://arthas.aliyun.com/doc/thread.html

+

相关阅读

+ +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git "a/2023/02/14/java-\345\270\270\347\224\250\345\221\275\344\273\244/index.html" "b/2023/02/14/java-\345\270\270\347\224\250\345\221\275\344\273\244/index.html" new file mode 100644 index 0000000000..682a4c850f --- /dev/null +++ "b/2023/02/14/java-\345\270\270\347\224\250\345\221\275\344\273\244/index.html" @@ -0,0 +1,451 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + java 常用命令 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + + + + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/2023/02/21/clickhouse-mybatis-batch-insert-cpu-raise-up/index.html b/2023/02/21/clickhouse-mybatis-batch-insert-cpu-raise-up/index.html new file mode 100644 index 0000000000..838314ed2b --- /dev/null +++ b/2023/02/21/clickhouse-mybatis-batch-insert-cpu-raise-up/index.html @@ -0,0 +1,475 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + clickhouse mybatis batch insert cpu raise up | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ clickhouse mybatis batch insert cpu raise up +

+ + +
+ + + + +
+ + +

背景

背景

+

使用jdbc clickhouse,批量写入,发现cpu升高非常多,升到了90%多

+

排查及原因

相关环境

jdk: jdk11
clickhouse 使用的sql驱动:

+
1
2
3
<groupId>com.clickhouse</groupId>
<artifactId>clickhouse-jdbc</artifactId>
<version>0.3.2-patch11</version>
+ + +

原因:
使用mybatis plus 拼写sql,批量写入2000条
sql 用mybatis 的xml 拼写 类似:

+
1
insert into table values (row1_field1 , row1_field2 ),(row1_field1 , row1_field2) .... 这里是用mybatis 的xml foreache 2000 次
+ +

线上warning日志:

+
1
Please consider to use one and only one values expression, for example: use 'values(?)' instead of 'values(?),(?)'.
+ +

由于jdbc 的parser 比较慢,需要将perpare语句改成以下形式:

+
1
insert into table values ( ?, ? )  ## 只有一次 
+ +

采用的是

+
1
2
3
4
5
6
7
8
// Note: "insert into table values(?,?)" is treated as "insert into mytable"
try (PreparedStatement ps = conn.prepareStatement("insert into table values(?,?)")) {
ps.setString(1, "test"); // id
ps.setObject(2, LocalDateTime.now()); // timestamp
ps.addBatch(); // append parameters to the query
...
ps.executeBatch(); // issue the composed query: insert into mytable values(...)(...)...(...)
}
+ + +

结果

cpu 从80%降低到30%以内
优化前:

+

优化前

+

优化后:
优化后

+

相关阅读

+ +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/2023/02/28/utf8-encoding-and-java/index.html b/2023/02/28/utf8-encoding-and-java/index.html new file mode 100644 index 0000000000..4e9310eb71 --- /dev/null +++ b/2023/02/28/utf8-encoding-and-java/index.html @@ -0,0 +1,461 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + utf8 and utf16 and encoding and java | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ utf8 and utf16 and encoding and java +

+ + +
+ + + + +
+ + +

背景

java 的字符串会设计很多编码相关的问题,全部整理一下

+

知识点

Code Unit

code unit 描述的是一个编码的的最小单位(注意一个Unicode 平面对应的字符可能由多个code unit 组成)

+ + + + + + + + + + + + + + + +
编码unit code
utf-81字节
utf-162字节
+

java 的char

java 的char 是2个字节,类型的范围是 0 到 2^16 - 1.

+ +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/2023/03/09/each-jvm-bytecode-implement-in-x86-with-asm/index.html b/2023/03/09/each-jvm-bytecode-implement-in-x86-with-asm/index.html new file mode 100644 index 0000000000..568ab227b6 --- /dev/null +++ b/2023/03/09/each-jvm-bytecode-implement-in-x86-with-asm/index.html @@ -0,0 +1,574 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + each jvm bytecode implement in x86 with asm | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ each jvm bytecode implement in x86 with asm +

+ + +
+ + + + +
+ + +

背景

想要了解jvm的bytecode 的汇编实现 ,目标平台是x86

+

汇编格式

同样一个汇编语句:将1赋值给rax

+

汇编有两种表达方式

+ + + + + + + + + + + + + + + + + + +
desc/描述intelAT&T
将1写入rax寄存器mov eax,1movl $1,%eax
将rab+3 的地址的值写入raxmov eax,[ebx+3]movl 3(%ebx),%eax
+

stack frame

在x86 64 位的模式下 rbcp 是用r13 , 描述的是下一个指令,i = instruction
r14则存了本地变量指针

+
1
2
3
// Global Register Names
static const Register rbcp = LP64_ONLY(r13) NOT_LP64(rsi);
static const Register rlocals = LP64_ONLY(r14) NOT_LP64(rdi);
+ +

这里LP64_ONLY()和NOT_LP64()是通过宏_LP64来确定的

+
1
2
3
__LP64__
_LP64
These macros are defined, with value 1, if (and only if) the compilation is for a target where long int and pointer both use 64-bits and int uses 32-bit.
+

amd64 下面的寄存器

java的stack frame

+ + + + + + + + + + + + + + + +
寄存器含义、描述
r14存了本地变量的基地址
r13指向下一个执行的bytecode
+

类似c的堆栈,java 的栈如下:

+

stack

+

相关阅读

+

frame 用下面的结构描述

主要包括:

+
    +
  • _sp :指向栈
  • +
  • _pc : 指向指令
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    jdk/src/hotspot/share/runtime/frame.hpp
    class frame {
    private:
    // Instance variables:
    intptr_t* _sp; // stack pointer (from Thread::last_Java_sp) , java 的stack 指针
    address _pc; // program counter (the next instruction after the call) 下一个指令的指针

    CodeBlob* _cb; // CodeBlob that "owns" pc
    enum deopt_state {
    not_deoptimized,
    is_deoptimized,
    unknown
    };

    deopt_state _deopt_state;

    ...

    };
  • +
+

bytecode

1
2
3
4
5
6
7
8
9
10
11
12
13
14
enum TosState {         // describes the tos cache contents
btos = 0, // byte, bool tos cached
ztos = 1, // byte, bool tos cached
ctos = 2, // char tos cached
stos = 3, // short tos cached
itos = 4, // int tos cached
ltos = 5, // long tos cached
ftos = 6, // float tos cached
dtos = 7, // double tos cached
atos = 8, // object cached
vtos = 9, // tos not cached
number_of_states,
ilgl // illegal state: should not occur
};
+

iload

+ + + + + + + + + + + + +
bytecodeenumasm
iload21
+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
$65 = (address) 0x7fffe1012693 "A\017\266]\002\203\373\025\017\204J"
(gdb) x/20i 0x7fffe1012693
0x7fffe1012693: movzbl 0x2(%r13),%ebx
0x7fffe1012698: cmp $0x15,%ebx <--- 下一个bytecode
0x7fffe101269b: je 0x7fffe10126eb <-- 跳转到 done
0x7fffe10126a1: cmp $0xe0,%ebx <-- 判断下一个是否是_fast_iload
0x7fffe10126a7: mov $0xe1,%ecx <------ 下一个是_fast_iload 则重写成fast_iload2
0x7fffe10126ac: je 0x7fffe10126bd <-------- 跳转到rewrite label
0x7fffe10126ae: cmp $0x34,%ebx
0x7fffe10126b1: mov $0xe2,%ecx
0x7fffe10126b6: je 0x7fffe10126bd
0x7fffe10126b8: mov $0xe0,%ecx
0x7fffe10126bd: movzbl 0x0(%r13),%ebx
0x7fffe10126c2: cmp $0x15,%ebx
0x7fffe10126c5: je 0x7fffe10126e7
0x7fffe10126cb: cmp %ecx,%ebx
0x7fffe10126cd: je 0x7fffe10126e7
0x7fffe10126d3: movabs $0x7ffff74ef9d7,%rdi
0x7fffe10126dd: and $0xfffffffffffffff0,%rsp
0x7fffe10126e1: call 0x7ffff694f3c0 <_ZN14MacroAssembler7debug64EPclPl>
0x7fffe10126e6: hlt
0x7fffe10126e7: mov %cl,0x0(%r13)

+ + +

源码分析

+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
void TemplateTable::iload_internal(RewriteControl rc) {
transition(vtos, itos);
if (RewriteFrequentPairs && rc == may_rewrite) {
Label rewrite, done;
Register bc = r4;

// get next bytecode
__ load_unsigned_byte(r1, at_bcp(Bytecodes::length_for(Bytecodes::_iload)));

// if _iload, wait to rewrite to iload2. We only want to rewrite the
// last two iloads in a pair. Comparing against fast_iload means that
// the next bytecode is neither an iload or a caload, and therefore
// an iload pair.
__ cmpw(r1, Bytecodes::_iload); <--- 下一个bytecode
__ br(Assembler::EQ, done); <---- 跳转到done

// if _fast_iload rewrite to _fast_iload2
__ cmpw(r1, Bytecodes::_fast_iload); <-- 判断下一个是否是_fast_iload
__ movw(bc, Bytecodes::_fast_iload2); <------ 下一个是_fast_iload 则重写成fast_iload2
__ br(Assembler::EQ, rewrite); <-------- 跳转到rewrite label

// if _caload rewrite to _fast_icaload
__ cmpw(r1, Bytecodes::_caload);
__ movw(bc, Bytecodes::_fast_icaload);
__ br(Assembler::EQ, rewrite);

// else rewrite to _fast_iload
__ movw(bc, Bytecodes::_fast_iload);

// rewrite
// bc: new bytecode
__ bind(rewrite);
patch_bytecode(Bytecodes::_iload, bc, r1, false);
__ bind(done);

}

// do iload, get the local value into tos
locals_index(r1);
__ ldr(r0, iaddress(r1));

}
+ + +

aconst_null

+ + + + + + + + + + + + +
bytecodedescenum
aconst_nullpush a null reference onto the stack0x01
+
1
2
3
4
void TemplateTable::aconst_null() {
transition(vtos, atos);
__ xorl(rax, rax); // rax 就是栈顶
}
+ +

istore

+ + + + + + + + + + + + +
bytecodedescenum
istoreStore int into local variable54, // 0x36
+

可以通过这个bytecode 了解怎么访问本地变量

+
1
2
3
4
5
void TemplateTable::istore() {
transition(itos, vtos); // 这里只是一个断言assert , 断言之前的状态是itos , 之后的状态是vtos , 实际上是由def来定义的
locals_index(rbx); // 将偏移 也就是index 写入rbx
__ movl(iaddress(rbx), rax); //iaddress 就是 rlocal + rbx 也就是获取最后的跳转地址 ,然后将rax写入偏移地址
}
+

这里iaddress(rbx) 其实是rlocals+rbx 的偏移,也就是相对于本地变量的偏移

+
1
2
3
static inline Address iaddress(Register r) {
return Address(rlocals, r, Address::times_ptr);
}
+ +

iaddress 的源码在这里: src\hotspot\cpu\x86\assembler_x86.hpp
调用顺序是iaddress -> Address

+
1
2
3
4
5
6
7
8
9
10
11
12
13
static inline Address iaddress(Register r) {
return Address(rlocals, r, Address::times_ptr);
}
Address(Register base, Register index, ScaleFactor scale, int disp = 0)
: _base (base),
_index(index),
_xmmindex(xnoreg),
_scale(scale),
_disp (disp),
_isxmmindex(false) {
assert(!index->is_valid() == (scale == Address::no_scale),
"inconsistent address");
}
+ +

def istore展开

前面不是看到transition(itos, vtos); , 这个transition只是一个类似测试时候的断言,真正是在def 处理的

+
1
2
def(Bytecodes::_istore              , ubcp|____|clvm|____, itos, vtos, istore              ,  _           );

+

下面我们看看def展开,会慢慢展开成

+
1
2
3
4
5
6
7
void TemplateTable::def(Bytecodes::Code code, int flags, TosState in, TosState out, void (*gen)(int arg), int arg) {
...
Template* t = is_wide ? template_for_wide(code) : template_for(code);
// setup entry
t->initialize(flags, in, out, gen, arg);
assert(t->bytecode() == code, "just checkin'");
}
+ + +

这里的 in 和out 会在TemplateInterpreterGenerator::generate_and_dispatch的时候使用

+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39

//------------------------------------------------------------------------------------------------------------------------

void TemplateInterpreterGenerator::generate_and_dispatch(Template* t, TosState tos_out) {
#ifndef PRODUCT
// debugging code
if (CountBytecodes || TraceBytecodes || StopInterpreterAt > 0) count_bytecode();
if (PrintBytecodeHistogram) histogram_bytecode(t);
if (PrintBytecodePairHistogram) histogram_bytecode_pair(t);
if (TraceBytecodes) trace_bytecode(t);
if (StopInterpreterAt > 0) stop_interpreter_at();
__ verify_FPU(1, t->tos_in());
#endif // !PRODUCT
int step = 0;
if (!t->does_dispatch()) {
step = t->is_wide() ? Bytecodes::wide_length_for(t->bytecode()) : Bytecodes::length_for(t->bytecode());
if (tos_out == ilgl) tos_out = t->tos_out();
// compute bytecode size
assert(step > 0, "just checkin'");
// setup stuff for dispatching next bytecode
if (ProfileInterpreter && VerifyDataPointer
&& MethodData::bytecode_has_profile(t->bytecode())) {
__ verify_method_data_pointer();
}
__ dispatch_prolog(tos_out, step);
}
// generate template
t->generate(_masm);
// advance
if (t->does_dispatch()) {
#ifdef ASSERT
// make sure execution doesn't go beyond this point if code is broken
__ should_not_reach_here();
#endif // ASSERT
} else {
// dispatch to next bytecode
__ dispatch_epilog(tos_out, step);
}
}
+ + +

reference

+ +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git "a/2023/03/23/tersorflow-\345\205\245\351\227\250/index.html" "b/2023/03/23/tersorflow-\345\205\245\351\227\250/index.html" new file mode 100644 index 0000000000..0611840e2e --- /dev/null +++ "b/2023/03/23/tersorflow-\345\205\245\351\227\250/index.html" @@ -0,0 +1,461 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + tensorflow 入门 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ tensorflow 入门 +

+ + +
+ + + + +
+ + +

背景

最近chatgpt 很流行 ,所以想了解一下TensorFlow是怎么拟合数据的

+

python 版本

1
2
$python3 -V
Python 3.10.6
+ +

安装

安装分为pip 方式和jupyter方式

+

我用的是pip安装的方式

+
    +
  • 下载安装 Miniconda

    +
    1
    2
    curl https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh -o Miniconda3-latest-Linux-x86_64.sh
    bash Miniconda3-latest-Linux-x86_64.sh
  • +
  • 创建一个叫tf 的environment

    +
    1
    2
    3
    ## 我的python是3.10 , 根据自己情况改
    conda create --name tf python=3.10

    +
  • +
  • 安装tf

    +
    1
    (tf) dai@myhost:~$ pip install tensorflow==2.11.*
    +
  • +
  • 测试运行

    +
    1
    2
    (tf) dai@myhost:~$ python3 -c "import tensorflow as tf; print(tf.reduce_sum(tf.random.normal([1000, 1000])))"

  • +
+

相关阅读

+ +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/2023/03/29/java-unsafe/index.html b/2023/03/29/java-unsafe/index.html new file mode 100644 index 0000000000..45a67fce10 --- /dev/null +++ b/2023/03/29/java-unsafe/index.html @@ -0,0 +1,457 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + java unsafe | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ java unsafe +

+ + +
+ + + + +
+ + +

背景

java的unsafe包是有很多底层的api暴露出来,举例,java的netty就大量使用这个api

+

例子

下面是java的unsafe包里面的allocateMemory方法.

+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
public class UnsafeDemo {

public static void main(String[] args) {
var unsafe = getUnsafe();
var memory = unsafe.allocateMemory(100);
System.out.println(memory);
}

private static Unsafe getUnsafe() {
try {
Field field = Unsafe.class.getDeclaredField("theUnsafe");
field.setAccessible(true);
return (Unsafe) field.get(null);
} catch (Exception e) {
return null;
}
}
}
+ +

这里memory 返回的是一个地址

+

实际上是调用Unsafe_AllocateMemory0

+
1
2
3
4
5
6
7
8
9
UNSAFE_ENTRY(jlong, Unsafe_AllocateMemory0(JNIEnv *env, jobject unsafe, jlong size)) {
size_t sz = (size_t)size;

assert(is_aligned(sz, HeapWordSize), "sz not aligned");

void* x = os::malloc(sz, mtOther);

return addr_to_java(x);
} UNSAFE_END
+ +

最后调用的是glibc 的malloc

+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
void* os::malloc(size_t size, MEMFLAGS memflags, const NativeCallStack& stack) {

// Special handling for NMT preinit phase before arguments are parsed
void* rc = NULL;
if (NMTPreInit::handle_malloc(&rc, size)) {
// No need to fill with 0 because DumpSharedSpaces doesn't use these
// early allocations.
return rc;
}

DEBUG_ONLY(check_crash_protection());

// On malloc(0), implementations of malloc(3) have the choice to return either
// NULL or a unique non-NULL pointer. To unify libc behavior across our platforms
// we chose the latter.
size = MAX2((size_t)1, size);

// For the test flag -XX:MallocMaxTestWords
if (has_reached_max_malloc_test_peak(size)) {
return NULL;
}

const size_t outer_size = size + MemTracker::overhead_per_malloc();

// Check for overflow.
if (outer_size < size) {
return NULL;
}

ALLOW_C_FUNCTION(::malloc, void* const outer_ptr = ::malloc(outer_size);) <-- malloc 分配内存
if (outer_ptr == NULL) {
return NULL;
}

void* const inner_ptr = MemTracker::record_malloc((address)outer_ptr, size, memflags, stack);

if (DumpSharedSpaces) {
// Need to deterministically fill all the alignment gaps in C++ structures.
::memset(inner_ptr, 0, size);
} else {
DEBUG_ONLY(::memset(inner_ptr, uninitBlockPad, size);)
}
DEBUG_ONLY(break_if_ptr_caught(inner_ptr);)
return inner_ptr;
}
+ +

相关阅读

+ +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git "a/2023/04/03/java-\345\237\272\346\234\254\347\261\273\345\236\213/index.html" "b/2023/04/03/java-\345\237\272\346\234\254\347\261\273\345\236\213/index.html" new file mode 100644 index 0000000000..af0af70822 --- /dev/null +++ "b/2023/04/03/java-\345\237\272\346\234\254\347\261\273\345\236\213/index.html" @@ -0,0 +1,540 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + java 基本类型 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ java 基本类型 +

+ + +
+ + + + +
+ + +

背景

了解java的基本类型,基本类型的大小和取值范围

+

platform:amd64

+

源码分析

在c++ standard 里面

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
类型是否有符号最小范围字节数type
charimplement defined [Type char is a distinct type that has an implementation-defined choice of “signed char” or “unsigned char” as its underlying type]
signed charsignedsigned type
short intsignedsigned type
intsignedsigned type
long intsignedsigned type
long long intsignedsigned type
+

在linux 64位下面

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
java基本类型c/c++宏
jintint
jlonglong
jbytesigned char
jbooleanunsigned char
jcharunsigned short
jfloatfloat
jdoubledouble
jsizejint 也就是int
+
1
2
3
4
5
6
7
8
9
// jdk/src/java.base/unix/native/include/jni_md.h
typedef int jint;
#ifdef _LP64
typedef long jlong;
#else
typedef long long jlong;
#endif

typedef signed char jbyte;
+ +
1
2
3
4
5
6
7
8
9
10
// jdk/src/java.base/share/native/include/jni.h
#ifndef JNI_TYPES_ALREADY_DEFINED_IN_JNI_MD_H

typedef unsigned char jboolean;
typedef unsigned short jchar;
typedef short jshort;
typedef float jfloat;
typedef double jdouble;

typedef jint jsize;
+ + +

相关阅读

+ +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/2023/04/09/direct-memory-in-java/index.html b/2023/04/09/direct-memory-in-java/index.html new file mode 100644 index 0000000000..a7440b61ef --- /dev/null +++ b/2023/04/09/direct-memory-in-java/index.html @@ -0,0 +1,484 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + direct memory in java | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ direct memory in java +

+ + +
+ + + + +
+ + +

背景

lucene 使用了direct memory,这类内存是非jvm直接管理的内存

+

DirectByteBufferR 是read only 版本的DirectByteBuffer,所以DirectByteBufferDirectByteBufferR是差不多的类

+

lucene的mmap

lucene 会使用DirectByteBufferR,这里申请的内存地址是140063879776283

+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
main[1] dump receiver
receiver = {
$assertionsDisabled: true
java.nio.DirectByteBuffer.ARRAY_BASE_OFFSET: 16
java.nio.DirectByteBuffer.UNALIGNED: true
java.nio.DirectByteBuffer.att: instance of java.nio.DirectByteBufferR(id=1500)
java.nio.DirectByteBuffer.cleaner: null
java.nio.DirectByteBuffer.$assertionsDisabled: true
java.nio.MappedByteBuffer.fd: instance of java.io.FileDescriptor(id=1501)
java.nio.MappedByteBuffer.isSync: false
java.nio.MappedByteBuffer.SCOPED_MEMORY_ACCESS: instance of jdk.internal.misc.ScopedMemoryAccess(id=1502)
java.nio.ByteBuffer.ARRAY_BASE_OFFSET: 16
java.nio.ByteBuffer.hb: null
java.nio.ByteBuffer.offset: 0
java.nio.ByteBuffer.isReadOnly: true
java.nio.ByteBuffer.bigEndian: false
java.nio.ByteBuffer.nativeByteOrder: true
java.nio.ByteBuffer.$assertionsDisabled: true
java.nio.Buffer.UNSAFE: instance of jdk.internal.misc.Unsafe(id=1503)
java.nio.Buffer.SCOPED_MEMORY_ACCESS: instance of jdk.internal.misc.ScopedMemoryAccess(id=1502)
java.nio.Buffer.SPLITERATOR_CHARACTERISTICS: 16464
java.nio.Buffer.mark: -1
java.nio.Buffer.position: 0
java.nio.Buffer.limit: 7
java.nio.Buffer.capacity: 7
java.nio.Buffer.address: 140063879776283
java.nio.Buffer.segment: null
java.nio.Buffer.$assertionsDisabled: true
}
main[1] where
[1] org.apache.lucene.store.ByteBufferGuard.getByte (ByteBufferGuard.java:118)
[2] org.apache.lucene.store.ByteBufferIndexInput$SingleBufferImpl.readByte (ByteBufferIndexInput.java:593)
[3] org.apache.lucene.codecs.lucene90.Lucene90NormsProducer$3.longValue (Lucene90NormsProducer.java:388)
[4] org.apache.lucene.search.LeafSimScorer.getNormValue (LeafSimScorer.java:47)
[5] org.apache.lucene.search.LeafSimScorer.score (LeafSimScorer.java:60)
[6] org.apache.lucene.search.TermScorer.score (TermScorer.java:75)
[7] org.apache.lucene.search.TopScoreDocCollector$SimpleTopScoreDocCollector$1.collect (TopScoreDocCollector.java:73)
[8] org.apache.lucene.search.Weight$DefaultBulkScorer.scoreAll (Weight.java:305)
[9] org.apache.lucene.search.Weight$DefaultBulkScorer.score (Weight.java:247)
[10] org.apache.lucene.search.BulkScorer.score (BulkScorer.java:38)
[11] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:770)
[12] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:693)
[13] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:687)
[14] org.apache.lucene.search.IndexSearcher.searchAfter (IndexSearcher.java:532)
[15] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:542)
[16] org.apache.lucene.demo.SearchFiles.doPagingSearch (SearchFiles.java:180)
[17] org.apache.lucene.demo.SearchFiles.main (SearchFiles.java:150)

+ + +

查看_7.cfs 文件:

+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
hexdump -C /home/dai/index/_7.cfs
00000000 3f d7 6c 17 14 4c 75 63 65 6e 65 39 30 43 6f 6d |?.l..Lucene90Com|
00000010 70 6f 75 6e 64 44 61 74 61 00 00 00 00 6b f0 66 |poundData....k.f|
00000020 56 c3 12 5b 07 08 12 3a 32 4d 4b 92 f8 00 00 00 |V..[...:2MK.....|
00000030 3f d7 6c 17 17 4c 75 63 65 6e 65 39 30 46 69 65 |?.l..Lucene90Fie|
00000040 6c 64 73 49 6e 64 65 78 4d 65 74 61 00 00 00 01 |ldsIndexMeta....|
00000050 6b f0 66 56 c3 12 5b 07 08 12 3a 32 4d 4b 92 f8 |k.fV..[...:2MK..|
00000060 00 80 80 05 07 00 00 00 0a 00 00 00 02 00 00 00 |................|
00000070 30 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |0...............|
00000080 00 00 e0 40 00 00 00 00 00 00 00 00 00 30 00 00 |...@.........0..|
00000090 00 00 00 00 00 36 00 00 00 00 00 00 00 00 00 5d |.....6.........]|
000000a0 43 00 00 00 00 00 00 00 00 00 30 00 00 00 00 00 |C.........0.....|
000000b0 00 00 13 01 00 00 00 00 00 00 01 01 07 c0 28 93 |..............(.|
000000c0 e8 00 00 00 00 00 00 00 00 46 80 fe 32 00 00 00 |.........F..2...|
000000d0 3f d7 6c 17 19 4c 75 63 65 6e 65 39 30 50 6f 69 |?.l..Lucene90Poi|
000000e0 6e 74 73 46 6f 72 6d 61 74 49 6e 64 65 78 00 00 |ntsFormatIndex..|
000000f0 00 00 6b f0 66 56 c3 12 5b 07 08 12 3a 32 4d 4b |..k.fV..[...:2MK|
00000100 92 f8 00 32 c0 28 93 e8 00 00 00 00 00 00 00 00 |...2.(..........|
00000110 0a 2f 94 55 00 00 00 00 3f d7 6c 17 18 4c 75 63 |./.U....?.l..Luc|
00000120 65 6e 65 39 30 50 6f 69 6e 74 73 46 6f 72 6d 61 |ene90PointsForma|
00000130 74 4d 65 74 61 00 00 00 00 6b f0 66 56 c3 12 5b |tMeta....k.fV..[|
00000140 07 08 12 3a 32 4d 4b 92 f8 00 01 00 00 00 3f d7 |...:2MK.......?.|
00000150 6c 17 03 42 4b 44 00 00 00 09 01 01 80 04 08 01 |l..BKD..........|
00000160 80 00 01 81 b4 f6 00 0f 80 00 01 81 b5 2b 3d 9d |.............+=.|
00000170 07 07 01 32 00 00 00 00 00 00 00 33 00 00 00 00 |...2.......3....|
00000180 00 00 00 ff ff ff ff 44 00 00 00 00 00 00 00 72 |.......D.......r|
00000190 00 00 00 00 00 00 00 c0 28 93 e8 00 00 00 00 00 |........(.......|
000001a0 00 00 00 09 71 1c 79 00 3f d7 6c 17 12 42 6c 6f |....q.y.?.l..Blo|
000001b0 63 6b 54 72 65 65 54 65 72 6d 73 4d 65 74 61 00 |ckTreeTermsMeta.|
000001c0 00 00 00 6b f0 66 56 c3 12 5b 07 08 12 3a 32 4d |...k.fV..[...:2M|
000001d0 4b 92 f8 0a 4c 75 63 65 6e 65 39 30 5f 30 3f d7 |K...Lucene90_0?.|
000001e0 6c 17 1b 4c 75 63 65 6e 65 39 30 50 6f 73 74 69 |l..Lucene90Posti|
000001f0 6e 67 73 57 72 69 74 65 72 54 65 72 6d 73 00 00 |ngsWriterTerms..|
00000200 00 00 6b f0 66 56 c3 12 5b 07 08 12 3a 32 4d 4b |..k.fV..[...:2MK|
00000210 92 f8 0a 4c 75 63 65 6e 65 39 30 5f 30 80 01 02 |...Lucene90_0...|
00000220 02 8c 01 0c db 01 03 62 af 05 67 cf 09 6d 95 14 |.......b..g..m..|
00000230 c2 02 a5 01 06 01 30 03 cd b1 69 37 3f d7 6c 17 |......0...i7?.l.|
00000240 03 46 53 54 00 00 00 08 01 0d 14 95 6d 09 cf 67 |.FST........m..g|
00000250 05 af 62 03 01 db 0c 00 00 01 00 07 02 a2 37 07 |..b...........7.|
00000260 07 16 2f 68 6f 6d 65 2f 64 61 69 2f 64 6f 63 73 |../home/dai/docs|
00000270 2f 61 61 61 2e 74 78 74 1f 2f 68 6f 6d 65 2f 64 |/aaa.txt./home/d|
00000280 61 69 2f 64 6f 63 73 2f 69 6e 64 65 78 2f 77 72 |ai/docs/index/wr|
00000290 69 74 65 2e 6c 6f 63 6b 38 3f d7 6c 17 03 46 53 |ite.lock8?.l..FS|
000002a0 54 00 00 00 08 01 03 37 a2 02 00 00 01 49 00 00 |T......7.....I..|
000002b0 00 00 00 00 00 c9 07 00 00 00 00 00 00 c0 28 93 |..............(.|
000002c0 e8 00 00 00 00 00 00 00 00 c1 1b ff e4 00 00 00 |................|
000002d0 3f d7 6c 17 18 4c 75 63 65 6e 65 39 30 50 6f 69 |?.l..Lucene90Poi|
000002e0 6e 74 73 46 6f 72 6d 61 74 44 61 74 61 00 00 00 |ntsFormatData...|
000002f0 00 6b f0 66 56 c3 12 5b 07 08 12 3a 32 4d 4b 92 |.k.fV..[...:2MK.|
00000300 f8 00 07 10 00 01 00 06 00 04 00 00 00 05 00 03 |................|
00000310 00 02 00 04 80 00 01 81 00 b4 03 f6 00 0f f6 55 |...............U|
00000320 d3 f8 31 29 b5 04 2b 3d 81 2b 3d 81 2b 3d 85 2b |..1)..+=.+=.+=.+|
00000330 3d 9d c0 28 93 e8 00 00 00 00 00 00 00 00 28 e1 |=..(..........(.|
00000340 c0 de 00 00 00 00 00 00 3f d7 6c 17 19 4c 75 63 |........?.l..Luc|
00000350 65 6e 65 39 30 50 6f 73 74 69 6e 67 73 57 72 69 |ene90PostingsWri|
00000360 74 65 72 50 6f 73 00 00 00 00 6b f0 66 56 c3 12 |terPos....k.fV..|
00000370 5b 07 08 12 3a 32 4d 4b 92 f8 0a 4c 75 63 65 6e |[...:2MK...Lucen|
00000380 65 39 30 5f 30 1e 01 03 b1 01 06 0b 0b a4 01 05 |e90_0...........|
00000390 0d 21 06 02 0b a5 01 0c 0d 0a 0e 19 18 20 09 0b |.!........... ..|
000003a0 5f 19 45 06 30 08 0a 22 02 02 75 51 58 06 0b 03 |_.E.0.."..uQX...|
000003b0 05 03 07 7c 05 23 96 01 02 3c 54 30 37 01 11 18 |...|.#...<T07...|
000003c0 0a 40 30 29 0b 32 92 01 ae 01 03 1f 21 03 88 01 |.@0).2......!...|
000003d0 23 27 d5 01 73 18 0f 5f 07 3a 04 04 06 06 07 06 |#'..s.._.:......|
000003e0 12 19 38 04 00 72 0c 7d 52 3b 04 04 06 06 07 06 |..8..r.}R;......|
000003f0 25 06 38 04 11 46 3e 08 4c 42 11 10 0f 1f bc 01 |%.8..F>.LB......|
00000400 0b 1c 1a 06 8a 01 20 39 04 04 06 06 07 06 12 51 |...... 9.......Q|
00000410 04 c8 01 15 00 7e 44 06 06 07 06 17 06 08 06 04 |.....~D.........|
00000420 38 04 30 30 12 1d 05 07 19 06 05 02 00 05 05 06 |8.00............|
00000430 02 07 0c 05 07 31 05 2a 06 01 06 09 06 06 08 0b |.....1.*........|
00000440 04 00 00 1a 00 1c 0c d1 01 06 2f 07 60 07 15 06 |........../.`...|
00000450 01 01 cb 01 63 1a 26 a8 01 9f 01 13 06 2b 99 01 |....c.&......+..|
00000460 b4 01 01 68 28 09 d4 01 09 1b 0d 6f 0a 16 1b 10 |...h(......o....|
00000470 17 80 01 05 71 cf 01 d0 01 06 d2 01 06 17 1e 04 |....q...........|
00000480 05 0d 07 0c 05 07 31 05 2a 07 06 09 06 06 17 08 |......1.*.......|
00000490 04 04 0c 04 0d 12 2a 01 25 76 0e 07 0f 20 14 1e |......*.%v... ..|
000004a0 53 06 1e 08 a3 01 38 0a 0b a6 01 da 01 03 5e 2b |S.....8.......^+|
000004b0 c5 01 61 18 01 ba 01 38 03 05 0d 07 0c 05 07 31 |..a....8.......1|
000004c0 05 2a 07 06 09 06 06 17 03 04 04 03 03 02 05 0d |.*..............|
000004d0 07 0c 05 07 31 05 2a 07 06 09 06 06 17 02 08 02 |....1.*.........|
000004e0 02 c9 01 c0 28 93 e8 00 00 00 00 00 00 00 00 63 |....(..........c|
000004f0 69 b5 c7 00 00 00 00 00 3f d7 6c 17 15 4c 75 63 |i.......?.l..Luc|
00000500 65 6e 65 39 30 4e 6f 72 6d 73 4d 65 74 61 64 61 |ene90NormsMetada|
00000510 74 61 00 00 00 00 6b f0 66 56 c3 12 5b 07 08 12 |ta....k.fV..[...|
00000520 3a 32 4d 4b 92 f8 00 02 00 00 00 ff ff ff ff ff |:2MK............|
00000530 ff ff ff 00 00 00 00 00 00 00 00 ff ff ff 07 00 |................|
00000540 00 00 01 2b 00 00 00 00 00 00 00 ff ff ff ff c0 |...+............|
00000550 28 93 e8 00 00 00 00 00 00 00 00 72 ba cc 7e 00 |(..........r..~.|
00000560 3f d7 6c 17 12 4c 75 63 65 6e 65 39 30 46 69 65 |?.l..Lucene90Fie|
00000570 6c 64 49 6e 66 6f 73 00 00 00 00 6b f0 66 56 c3 |ldInfos....k.fV.|
00000580 12 5b 07 08 12 3a 32 4d 4b 92 f8 00 03 04 70 61 |.[...:2MK.....pa|
00000590 74 68 00 02 01 00 ff ff ff ff ff ff ff ff 02 1d |th..............|
000005a0 50 65 72 46 69 65 6c 64 50 6f 73 74 69 6e 67 73 |PerFieldPostings|
000005b0 46 6f 72 6d 61 74 2e 66 6f 72 6d 61 74 08 4c 75 |Format.format.Lu|
000005c0 63 65 6e 65 39 30 1d 50 65 72 46 69 65 6c 64 50 |cene90.PerFieldP|
000005d0 6f 73 74 69 6e 67 73 46 6f 72 6d 61 74 2e 73 75 |ostingsFormat.su|
000005e0 66 66 69 78 01 30 00 00 00 08 6d 6f 64 69 66 69 |ffix.0....modifi|
000005f0 65 64 01 00 00 00 ff ff ff ff ff ff ff ff 00 01 |ed..............|
00000600 01 08 00 00 08 63 6f 6e 74 65 6e 74 73 02 00 03 |.....contents...|
00000610 00 ff ff ff ff ff ff ff ff 02 1d 50 65 72 46 69 |...........PerFi|
00000620 65 6c 64 50 6f 73 74 69 6e 67 73 46 6f 72 6d 61 |eldPostingsForma|
00000630 74 2e 66 6f 72 6d 61 74 08 4c 75 63 65 6e 65 39 |t.format.Lucene9|
00000640 30 1d 50 65 72 46 69 65 6c 64 50 6f 73 74 69 6e |0.PerFieldPostin|
00000650 67 73 46 6f 72 6d 61 74 2e 73 75 66 66 69 78 01 |gsFormat.suffix.|
00000660 30 00 00 00 c0 28 93 e8 00 00 00 00 00 00 00 00 |0....(..........|
00000670 1f ee 84 f9 00 00 00 00 3f d7 6c 17 1c 4c 75 63 |........?.l..Luc|
00000680 65 6e 65 39 30 53 74 6f 72 65 64 46 69 65 6c 64 |ene90StoredField|
00000690 73 46 61 73 74 44 61 74 61 00 00 00 01 6b f0 66 |sFastData....k.f|
000006a0 56 c3 12 5b 07 08 12 3a 32 4d 4b 92 f8 00 00 1e |V..[...:2MK.....|
000006b0 00 01 08 18 1d 21 21 1d 1c 18 0a 13 0b 15 12 10 |.....!!.........|
000006c0 15 0f 15 12 10 15 12 a0 00 16 2f 68 6f 6d 65 2f |........../home/|
000006d0 64 61 f0 04 69 2f 64 6f 63 73 2f 62 62 62 2e 74 |da..i/docs/bbb.t|
000006e0 78 74 00 1b 2f 68 6f 01 05 00 e0 69 2f 64 6f 63 |xt../ho....i/doc|
000006f0 73 2f 69 6e 64 65 78 2f 5f 73 30 2e 63 66 73 00 |s/index/_s0.cfs.|
00000700 1f 0f 00 50 61 69 2f 64 6f f0 04 63 73 2f 69 6e |...Pai/do..cs/in|
00000710 64 65 78 2f 73 65 67 6d 65 6e 74 73 5f 31 24 00 |dex/segments_1$.|
00000720 1f 0a 00 90 69 2f 64 6f 63 73 2f 69 6e f0 04 64 |....i/docs/in..d|
00000730 65 78 2f 77 72 69 74 65 2e 6c 6f 63 6b 00 1b 2f |ex/write.lock../|
00000740 68 6f 01 05 00 e0 69 2f 64 6f 63 73 2f 69 6e 64 |ho....i/docs/ind|
00000750 65 78 2f 5f 73 30 2e 63 66 65 00 1a 0f 00 50 61 |ex/_s0.cfe....Pa|
00000760 69 2f 64 6f f0 04 63 73 2f 69 6e 64 65 78 2f 5f |i/do..cs/index/_|
00000770 30 2e 73 69 00 16 2f 68 6f 01 05 00 e0 69 2f 64 |0.si../ho....i/d|
00000780 6f 63 73 2f 61 61 61 2e 74 78 74 c0 28 93 e8 00 |ocs/aaa.txt.(...|
00000790 00 00 00 00 00 00 00 52 80 f1 02 00 00 00 00 00 |.......R........|
000007a0 3f d7 6c 17 13 42 6c 6f 63 6b 54 72 65 65 54 65 |?.l..BlockTreeTe|
000007b0 72 6d 73 49 6e 64 65 78 00 00 00 00 6b f0 66 56 |rmsIndex....k.fV|
000007c0 c3 12 5b 07 08 12 3a 32 4d 4b 92 f8 0a 4c 75 63 |..[...:2MK...Luc|
000007d0 65 6e 65 39 30 5f 30 00 00 c0 28 93 e8 00 00 00 |ene90_0...(.....|
000007e0 00 00 00 00 00 6e c7 b4 6e 00 00 00 00 00 00 00 |.....n..n.......|
000007f0 3f d7 6c 17 11 4c 75 63 65 6e 65 39 30 4e 6f 72 |?.l..Lucene90Nor|
00000800 6d 73 44 61 74 61 00 00 00 00 6b f0 66 56 c3 12 |msData....k.fV..|
00000810 5b 07 08 12 3a 32 4d 4b 92 f8 00 08 44 0e 00 21 |[...:2MK....D..!| <------------- here 08 44 0e 00 21
00000820 29 04 c0 28 93 e8 00 00 00 00 00 00 00 00 43 ab |)..(..........C.| <------------ 04 就是norm
00000830 9e 6c 00 00 00 00 00 00 3f d7 6c 17 19 4c 75 63 |.l......?.l..Luc|
00000840 65 6e 65 39 30 50 6f 73 74 69 6e 67 73 57 72 69 |ene90PostingsWri|
00000850 74 65 72 44 6f 63 00 00 00 00 6b f0 66 56 c3 12 |terDoc....k.fV..|
00000860 5b 07 08 12 3a 32 4d 4b 92 f8 0a 4c 75 63 65 6e |[...:2MK...Lucen|
00000870 65 39 30 5f 30 03 03 02 05 08 03 01 02 02 0b 03 |e90_0...........|
00000880 07 02 02 07 01 03 02 02 07 02 02 07 03 07 03 07 |................|
00000890 05 02 15 03 04 02 03 02 10 03 05 03 05 05 02 10 |................|
000008a0 02 03 05 03 02 10 02 02 05 03 c0 28 93 e8 00 00 |...........(....|
000008b0 00 00 00 00 00 00 8d fa 92 14 00 00 00 00 00 00 |................|
000008c0 3f d7 6c 17 12 42 6c 6f 63 6b 54 72 65 65 54 65 |?.l..BlockTreeTe|
000008d0 72 6d 73 44 69 63 74 00 00 00 00 6b f0 66 56 c3 |rmsDict....k.fV.|
000008e0 12 5b 07 08 12 3a 32 4d 4b 92 f8 0a 4c 75 63 65 |.[...:2MK...Luce|
000008f0 6e 65 39 30 5f 30 36 84 0e 30 30 75 62 75 6e 74 |ne90_06..00ubunt|
00000900 75 30 2e 32 32 2e 30 34 2e 31 31 31 30 2e 30 2e |u0.22.04.1110.0.|
00000910 30 31 36 35 36 36 30 31 39 31 38 38 33 36 31 37 |0165660191883617|
00000920 2e 30 2e 33 31 65 31 69 31 6d 31 6d 31 6d 32 33 |.0.31e1i1m1m1m23|
00000930 33 33 35 2e 31 35 2e 30 36 37 39 5f 30 5f 30 5f |335.15.0679_0_0_|
00000940 6c 75 63 65 6e 65 39 30 66 69 65 6c 64 5f 30 5f |lucene90field_0_|
00000950 6c 75 63 65 6e 65 39 30 66 69 65 6c 64 73 69 6e |lucene90fieldsin|
00000960 64 65 78 5f 30 5f 6c 75 63 65 6e 65 39 30 66 69 |dex_0_lucene90fi|
00000970 65 6c 64 73 69 6e 64 65 78 66 69 6c 65 5f 70 6f |eldsindexfile_po|
00000980 69 6e 74 65 72 73 5f 31 5f 30 cb b9 5f 6c 75 63 |inters_1_0.._luc|
00000990 65 6e 65 39 30 5f 30 5f 6c 75 63 65 6e 65 39 30 |ene90_0_lucene90|
000009a0 66 69 65 6c 64 73 69 6e 64 65 78 61 61 61 61 2e |fieldsindexaaaa.|
000009b0 74 78 74 61 61 6d 62 6f 79 64 6f 67 6f 6f 64 69 |txtaamboydogoodi|
000009c0 69 73 6b 6e 6f 77 74 68 69 6e 67 77 68 61 74 79 |isknowthingwhaty|
000009d0 6f 75 61 6d 61 6d 64 36 34 36 01 10 01 06 0d 06 |ouamamd646......|
000009e0 08 02 01 01 02 06 01 01 01 02 10 16 25 04 0b 14 |............%...|
000009f0 01 07 1f 02 05 1c 02 04 02 01 04 00 03 02 02 03 |................|
00000a00 02 01 07 02 01 02 01 04 06 07 02 04 01 06 01 02 |................|
00000a10 02 05 3a 7a 01 3d 11 06 00 02 04 05 03 01 01 01 |..:z.=..........|
00000a20 01 0f 03 01 02 01 01 01 02 11 01 01 01 0f 01 11 |................|
00000a30 01 0f 02 00 02 08 01 08 01 01 01 01 05 01 09 01 |................|
00000a40 0b 05 00 01 08 01 05 01 03 15 01 03 01 38 b4 09 |.............8..|
00000a50 62 62 62 62 2e 74 78 74 62 65 73 74 5f 73 70 65 |bbbb.txtbest_spe|
00000a60 65 64 62 6b 64 62 6c 6f 63 6b 74 72 65 65 74 65 |edbkdblocktreete|
00000a70 72 6d 73 64 69 63 74 62 6c 6f 63 6b 74 72 65 65 |rmsdictblocktree|
00000a80 74 65 72 6d 73 69 6e 64 65 78 62 6c 6f 63 6b 74 |termsindexblockt|
00000a90 72 65 65 74 65 72 6d 73 6d 65 74 61 62 6f 79 62 |reetermsmetaboyb|
00000aa0 75 69 6c 64 63 63 66 65 63 66 73 63 6f 6e 74 65 |uildccfecfsconte|
00000ab0 6e 74 73 63 73 64 64 61 69 64 6f 64 6f 63 64 6f |ntscsddaidodocdo|
00000ac0 63 5f 64 6f 63 5f 69 64 73 5f 30 64 6f 63 73 65 |c_doc_ids_0docse|
00000ad0 75 66 66 64 6d 66 64 74 66 64 78 66 6c 75 73 68 |uffdmfdtfdxflush|
00000ae0 66 6e 6d 66 73 74 38 01 07 0a 03 12 13 12 03 05 |fnmfst8.........|
00000af0 01 03 03 08 02 01 03 02 03 04 09 04 03 03 03 03 |................|
00000b00 05 03 03 1b 04 00 02 01 0d 02 02 05 02 01 04 01 |................|
00000b10 02 0a 04 00 05 02 0a 01 04 01 04 01 05 02 01 3d |...............=|
00000b20 8e 01 77 04 01 02 11 02 0f 01 01 01 01 01 01 02 |..w.............|
00000b30 15 02 03 01 0f 01 11 04 01 01 0f 01 01 02 00 02 |................|
00000b40 06 01 03 00 0b 04 04 02 0b 01 01 01 01 01 01 0b |................|
00000b50 00 01 06 03 06 04 03 05 01 03 01 0b 01 50 cc 20 |.............P. |
00000b60 67 67 65 6e 65 72 69 63 67 6f 6f 64 68 68 6f 6d |ggenericgoodhhom|
00000b70 65 69 69 64 73 5f 30 69 6e 64 65 78 69 73 6a 6a |eiids_0indexisjj|
00000b80 61 76 61 2e 72 75 6e 74 69 6d 65 2e 76 65 72 73 |ava.runtime.vers|
00000b90 69 6f 6e 6a 61 76 61 2e 76 65 6e 64 6f 72 6a 61 |ionjava.vendorja|
00000ba0 76 61 2e 76 65 72 73 69 6f 6e 6a 61 76 61 2e 76 |va.versionjava.v|
00000bb0 6d 2e 76 65 72 73 69 6f 6e 6b 64 64 6b 64 69 6b |m.versionkddkdik|
00000bc0 64 6d 30 6b 6e 6f 77 6c 6c 69 6e 75 78 6c 75 63 |dm0knowllinuxluc|
00000bd0 65 6e 65 2e 76 65 72 73 69 6f 6e 6c 75 63 65 6e |ene.versionlucen|
00000be0 65 39 30 6c 75 63 65 6e 65 39 30 5f 30 6c 75 63 |e90lucene90_0luc|
00000bf0 65 6e 65 39 30 63 6f 6d 70 6f 75 6e 64 64 61 74 |ene90compounddat|
00000c00 61 6c 75 63 65 6e 65 39 30 63 6f 6d 70 6f 75 6e |alucene90compoun|
00000c10 64 65 6e 74 72 69 65 73 6c 75 63 65 6e 65 39 30 |dentrieslucene90|
00000c20 66 69 65 6c 64 69 6e 66 6f 73 6c 75 63 65 6e 65 |fieldinfoslucene|
00000c30 39 30 66 69 65 6c 64 73 69 6e 64 65 78 69 64 78 |90fieldsindexidx|
00000c40 6c 75 63 65 6e 65 39 30 66 69 65 6c 64 73 69 6e |lucene90fieldsin|
00000c50 64 65 78 6d 65 74 61 6c 75 63 65 6e 65 39 30 6e |dexmetalucene90n|
00000c60 6f 72 6d 73 64 61 74 61 6c 75 63 65 6e 65 39 30 |ormsdatalucene90|
00000c70 6e 6f 72 6d 73 6d 65 74 61 64 61 74 61 6c 75 63 |normsmetadataluc|
00000c80 65 6e 65 39 30 70 6f 69 6e 74 73 66 6f 72 6d 61 |ene90pointsforma|
00000c90 74 64 61 74 61 6c 75 63 65 6e 65 39 30 70 6f 69 |tdatalucene90poi|
00000ca0 6e 74 73 66 6f 72 6d 61 74 69 6e 64 65 78 6c 75 |ntsformatindexlu|
00000cb0 63 65 6e 65 39 30 70 6f 69 6e 74 73 66 6f 72 6d |cene90pointsform|
00000cc0 61 74 6d 65 74 61 6c 75 63 65 6e 65 39 30 70 6f |atmetalucene90po|
00000cd0 73 74 69 6e 67 73 77 72 69 74 65 72 64 6f 63 6c |stingswriterdocl|
00000ce0 75 63 65 6e 65 39 30 70 6f 73 74 69 6e 67 73 77 |ucene90postingsw|
00000cf0 72 69 74 65 72 70 6f 73 6c 75 63 65 6e 65 39 30 |riterposlucene90|
00000d00 70 6f 73 74 69 6e 67 73 77 72 69 74 65 72 74 65 |postingswriterte|
00000d10 72 6d 73 6c 75 63 65 6e 65 39 30 73 65 67 6d 65 |rmslucene90segme|
00000d20 6e 74 69 6e 66 6f 6c 75 63 65 6e 65 39 30 73 74 |ntinfolucene90st|
00000d30 6f 72 65 64 66 69 65 6c 64 73 66 61 73 74 64 61 |oredfieldsfastda|
00000d40 74 61 6c 75 63 65 6e 65 39 30 73 74 6f 72 65 64 |talucene90stored|
00000d50 66 69 65 6c 64 73 66 6f 72 6d 61 74 2e 6d 6f 64 |fieldsformat.mod|
00000d60 65 6c 75 63 65 6e 65 39 33 50 01 07 04 01 04 01 |elucene93P......|
00000d70 05 05 02 01 14 0b 0c 0f 03 03 04 04 01 05 0e 08 |................|
00000d80 0a 14 17 12 16 17 11 15 18 19 18 19 19 1b 13 1c |................|
00000d90 1f 08 16 05 04 00 02 09 06 00 01 02 0a 01 02 01 |................|
00000da0 0f 08 15 03 02 01 02 05 21 56 a8 01 04 b9 01 05 |........!V......|
00000db0 01 13 01 00 01 04 01 03 00 0a 06 01 04 01 01 03 |................|
00000dc0 0b 05 01 11 02 01 01 01 01 01 01 03 01 01 01 01 |................|
00000dd0 01 0f 01 00 01 0c 05 19 01 01 0f 01 01 03 01 06 |................|
00000de0 0d 01 0b 01 01 02 01 01 01 01 01 01 01 02 01 02 |................|
00000df0 01 01 01 01 01 01 01 02 11 02 0f 01 11 01 0b 01 |................|
00000e00 5b a4 0f 6d 6f 64 69 66 69 65 64 6e 76 64 6e 76 |[..modifiednvdnv|
00000e10 6d 6f 6f 63 73 6f 73 6f 73 2e 61 72 63 68 6f 73 |moocsosos.archos|
00000e20 2e 76 65 72 73 69 6f 6e 70 70 61 69 70 61 74 68 |.versionppaipath|
00000e30 70 65 72 66 69 65 6c 64 70 6f 73 74 69 6e 67 73 |perfieldpostings|
00000e40 66 6f 72 6d 61 74 2e 66 6f 72 6d 61 74 70 65 72 |format.formatper|
00000e50 66 69 65 6c 64 70 6f 73 74 69 6e 67 73 66 6f 72 |fieldpostingsfor|
00000e60 6d 61 74 2e 73 75 66 66 69 78 70 6f 73 70 72 69 |mat.suffixpospri|
00000e70 76 61 74 65 70 d7 99 70 d7 9b 70 d7 9c 71 71 78 |vatep..p..p..qqx|
00000e80 72 73 65 67 6d 65 6e 74 73 73 69 73 69 6e 64 65 |rsegmentssisinde|
00000e90 78 66 69 6c 65 5f 70 6f 69 6e 74 65 72 73 5f 31 |xfile_pointers_1|
00000ea0 73 6f 75 72 63 65 74 68 69 6e 67 74 69 6d 74 69 |sourcethingtimti|
00000eb0 6d 65 73 74 61 6d 70 74 69 70 78 74 6d 64 74 6d |mestamptipxtmdtm|
00000ec0 70 75 75 62 75 6e 74 75 76 78 77 63 77 68 61 74 |puubuntuvxwcwhat|
00000ed0 77 72 69 74 65 2e 6c 6f 63 6b 77 72 69 74 65 2e |write.lockwrite.|
00000ee0 6c 6f 63 6b 38 78 79 79 6f 75 79 6f 75 37 7a 7a |lock8xyyouyou7zz|
00000ef0 74 37 cb b9 cd b1 69 5a 08 03 03 01 03 02 07 0a |t7....iZ........|
00000f00 01 03 04 1d 1d 03 07 03 03 03 01 03 08 02 15 06 |................|
00000f10 05 03 09 04 03 03 01 06 02 02 04 0a 0b 01 01 03 |................|
00000f20 04 01 03 02 03 21 07 02 02 05 02 01 03 02 01 02 |.....!..........|
00000f30 01 03 08 0f 03 04 00 13 02 03 02 01 02 01 05 02 |................|
00000f40 01 0b 08 11 08 10 01 60 be 01 01 9e 02 0d 02 01 |.......`........|
00000f50 01 01 01 0b 01 11 03 01 01 01 01 0f 01 01 03 01 |................|
00000f60 01 01 02 01 03 0d 03 05 01 00 01 0a 02 13 01 01 |................|
00000f70 00 01 04 05 02 0b 01 0d 01 0f 01 11 01 13 01 11 |................|
00000f80 01 05 01 03 01 01 01 0b 01 01 04 11 03 0f 02 01 |................|
00000f90 02 03 02 05 01 01 02 01 02 0d 01 0f 01 05 01 01 |................|
00000fa0 02 00 01 0c 15 0c 01 14 0f d4 0b 2f 68 6f 6d 65 |.........../home|
00000fb0 2f 64 61 69 2f 64 6f 63 73 2f 61 61 61 2e 74 78 |/dai/docs/aaa.tx|
00000fc0 74 2f 68 6f 6d 65 2f 64 61 69 2f 64 6f 63 73 2f |t/home/dai/docs/|
00000fd0 62 62 62 2e 74 78 74 2f 68 6f 6d 65 2f 64 61 69 |bbb.txt/home/dai|
00000fe0 2f 64 6f 63 73 2f 69 6e 64 65 78 2f 5f 30 2e 63 |/docs/index/_0.c|
00000ff0 66 65 2f 68 6f 6d 65 2f 64 61 69 2f 64 6f 63 73 |fe/home/dai/docs|
00001000 2f 69 6e 64 65 78 2f 5f 30 2e 63 66 73 2f 68 6f |/index/_0.cfs/ho|
00001010 6d 65 2f 64 61 69 2f 64 6f 63 73 2f 69 6e 64 65 |me/dai/docs/inde|
00001020 78 2f 5f 30 2e 73 69 2f 68 6f 6d 65 2f 64 61 69 |x/_0.si/home/dai|
00001030 2f 64 6f 63 73 2f 69 6e 64 65 78 2f 73 65 67 6d |/docs/index/segm|
00001040 65 6e 74 73 5f 31 2f 68 6f 6d 65 2f 64 61 69 2f |ents_1/home/dai/|
00001050 64 6f 63 73 2f 69 6e 64 65 78 2f 77 72 69 74 65 |docs/index/write|
00001060 2e 6c 6f 63 6b 0e 16 16 1b 1b 1a 1f 1f 01 0d 09 |.lock...........|
00001070 e4 01 06 17 11 0b 11 0b 05 c0 28 93 e8 00 00 00 |..........(.....|
00001080 00 00 00 00 00 26 7d 6b cb 00 00 00 00 00 00 00 |.....&}k........|
00001090 3f d7 6c 17 16 4c 75 63 65 6e 65 39 30 46 69 65 |?.l..Lucene90Fie|
000010a0 6c 64 73 49 6e 64 65 78 49 64 78 00 00 00 00 6b |ldsIndexIdx....k|
000010b0 f0 66 56 c3 12 5b 07 08 12 3a 32 4d 4b 92 f8 00 |.fV..[...:2MK...|
000010c0 c0 28 93 e8 00 00 00 00 00 00 00 00 be 7c 21 a1 |.(...........|!.|
000010d0 c0 28 93 e8 00 00 00 00 00 00 00 00 15 f4 63 e8 |.(............c.|
000010e0

+ +

gdb 读取内容:

+
1
2
3
4
5
6
(gdb) x/32xb 140063879776283
0x7f6329ccc81b: 0x08 0x44 0x0e 0x00 0x21 0x29 0x04 0xc0
0x7f6329ccc823: 0x28 0x93 0xe8 0x00 0x00 0x00 0x00 0x00
0x7f6329ccc82b: 0x00 0x00 0x00 0x43 0xab 0x9e 0x6c 0x00
0x7f6329ccc833: 0x00 0x00 0x00 0x00 0x00 0x3f 0xd7 0x6c

+ + + + + +

源码分析

DirectByteBufferR 继承关系

DirectByteBufferR extend

+

依赖以下的脚本自动根据平台自动实现nio的DirectByteBufferR这个类:

+
1
2
3
4
5
6
7
// 源码地址 jdk/make/modules/java.base/gensrc/GensrcBuffer.gmk
# Direct byte buffer
#
DIRECT_X_BUF := Direct-X-Buffer

$(eval $(call SetupGenBuffer,DirectByteBuffer, $(DIRECT_X_BUF), type:=byte, BIN:=1))
$(eval $(call SetupGenBuffer,DirectByteBufferR,$(DIRECT_X_BUF), type:=byte, BIN:=1, RW:=R))
+ +

DirectByteBufferR 继承 DirectByteBuffer , DirectByteBuffer 则继承ByteBuffer
下面是编译后通过宏自动构建的DirectByteBufferR类,需要编译jvm的时候才能生成,我的在这个目录生成(编译jdk之后才会有这个文件,直接下载是没有这个文件的)
jdk/build/linux-x86_64-server-slowdebug/support/gensrc/java.base/java/nio/DirectByteBufferR.java

+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44


// -- This file was mechanically generated: Do not edit! -- //

package java.nio;

import java.io.FileDescriptor;
import java.lang.ref.Reference;
import java.util.Objects;
import jdk.internal.access.foreign.MemorySegmentProxy;
import jdk.internal.misc.ScopedMemoryAccess.Scope;
import jdk.internal.misc.VM;
import jdk.internal.ref.Cleaner;
import sun.nio.ch.DirectBuffer;


class DirectByteBufferR extends DirectByteBuffer implements DirectBuffer
{
...
// Primary constructor
//
DirectByteBufferR(int cap) { // package-private
super(cap);
this.isReadOnly = true;

}



// For memory-mapped buffers -- invoked by FileChannelImpl via reflection
//
protected DirectByteBufferR(int cap, long addr,
FileDescriptor fd,
Runnable unmapper,
boolean isSync, MemorySegmentProxy segment)
{
super(cap, addr, fd, unmapper, isSync, segment);
this.isReadOnly = true;
}

...

}

+ +

他的读取方法DirectByteBufferR.get是从DirectByteBuffer继承的,下面是实现:

+

实际是调用SCOPED_MEMORY_ACCESS.getByte

+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
// jdk/build/linux-x86_64-server-slowdebug/support/gensrc/java.base/java/nio/DirectByteBuffer.java
public byte get() {
try {
return ((SCOPED_MEMORY_ACCESS.getByte(scope(), null, ix(nextGetIndex()))));
} finally {
Reference.reachabilityFence(this);
}
}

public byte get(int i) {
try {
return ((SCOPED_MEMORY_ACCESS.getByte(scope(), null, ix(checkIndex(i)))));
} finally {
Reference.reachabilityFence(this);
}
}

+

SCOPED_MEMORY_ACCESS是在MappedByteBuffer里面定义的 ,而DirectByteBufferMappedByteBuffer 子类

+
1
2
3
4
class DirectByteBuffer  extends MappedByteBuffer implements DirectBuffer
{
...
}
+ +

SCOPED_MEMORY_ACCESS.getByte最后调用的是UNSAFE.getByte

+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19

public class ScopedMemoryAccess {

private static final Unsafe UNSAFE = Unsafe.getUnsafe();


@ForceInline
public byte getByte(Scope scope, Object base, long offset) {
...
return getByteInternal(scope, base, offset); // 调用 内部函数
...
}

@ForceInline @Scoped
private byte getByteInternal(Scope scope, Object base, long offset) {
...
return UNSAFE.getByte(base, offset); // 最后调用的是UNSAFE.getByte
...
}
+ +

UNSAFE是一个全局的静态变量,最后调用的是

+
1
2
3
4
5
6
// jdk/src/hotspot/share/prims/unsafe.cpp

UNSAFE_ENTRY(java_type, Unsafe_Get##Type(JNIEnv *env, jobject unsafe, jobject obj, jlong offset)) { \
return MemoryAccess<java_type>(thread, obj, offset).get(); \
} UNSAFE_END \

+ +

展开之后是调用MemoryAccess的get方法,实际是获取内存的值

+

相关阅读

+ +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git "a/2023/04/12/arroyo-\347\274\226\350\257\221\345\222\214\344\275\277\347\224\250/index.html" "b/2023/04/12/arroyo-\347\274\226\350\257\221\345\222\214\344\275\277\347\224\250/index.html" new file mode 100644 index 0000000000..3ae013961d --- /dev/null +++ "b/2023/04/12/arroyo-\347\274\226\350\257\221\345\222\214\344\275\277\347\224\250/index.html" @@ -0,0 +1,464 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + arroyo 编译和使用 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ arroyo 编译和使用 +

+ + +
+ + + + +
+ + +

背景

github 地址
Arroyo 是分布式流式引擎,使用Rust编写.因为要试用,所以写了这篇

+

编译

    +
  • 安装rust

    +
    1
    curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh
  • +
  • 拉取代码

    +
    1
    git clone https://github.com/ArroyoSystems/arroyo.git
    +
  • +
  • 安装postgresql

    +
    1
    sudo apt install postgresql
  • +
+

配置路径:

+
1
/etc/postgresql/14/main/pg_hba.conf
+ +
1
2
3
4
5
6
7
8
9
## 链接postgresql
sudo -u postgres psql
## 创建database arroyo
create database arroyo;
## 执行sql , 要执行这个路径的
source arroyo/arroyo-api/migrations/V1__initial.sql

##postgresql 创建用户
create user arroyo with password 'arroyo';
+ +
1
2
## 重启postgresql
sudo systemctl restart postgresql.service
+ + +
    +
  • 编译
    1
    2
    3
    4
    5
    6
    ## 切换目录
    cd arroyo
    ## 编译
    cargo build
    ## 如果编译不了,用
    cargo build --no-default-features
    +编译结果:
    1
    2
    3
    4
    5
    6
    7
    Compiling datafusion-optimizer v20.0.0
    Compiling datafusion v20.0.0
    Compiling arroyo-sql v0.1.0 (/home/dai/rust/arroyo/arroyo-sql)
    Compiling arroyo-sql-macro v0.1.0 (/home/dai/rust/arroyo/arroyo-sql-macro)
    Compiling arroyo-sql-testing v0.1.0 (/home/dai/rust/arroyo/arroyo-sql-testing)
    Finished dev [unoptimized + debuginfo] target(s) in 2m 13s

  • +
+

错误和处理

发现没有ssl的库,我的系统是Ubuntu,所以执行sudo apt install libssl-dev , 如果编译不了用cargo build --no-default-features 试试

+

相关阅读

+ +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git "a/2023/04/14/java-rabbitmq-\345\210\235\345\247\213\345\214\226/index.html" "b/2023/04/14/java-rabbitmq-\345\210\235\345\247\213\345\214\226/index.html" new file mode 100644 index 0000000000..39c53d99b7 --- /dev/null +++ "b/2023/04/14/java-rabbitmq-\345\210\235\345\247\213\345\214\226/index.html" @@ -0,0 +1,442 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + java rabbitmq 初始化 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ java rabbitmq 初始化 +

+ + +
+ + + + +
+ + +

背景

了解java的spring boot 的rabbitmq的启动流程

+

堆栈

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
declareQueues:700, RabbitAdmin (org.springframework.amqp.rabbit.core)
lambda$initialize$12:606, RabbitAdmin (org.springframework.amqp.rabbit.core)
doInRabbit:-1, 1826902085 (org.springframework.amqp.rabbit.core.RabbitAdmin$$Lambda$965)
invokeAction:2151, RabbitTemplate (org.springframework.amqp.rabbit.core)
doExecute:2110, RabbitTemplate (org.springframework.amqp.rabbit.core)
execute:2062, RabbitTemplate (org.springframework.amqp.rabbit.core)
execute:2042, RabbitTemplate (org.springframework.amqp.rabbit.core)
initialize:604, RabbitAdmin (org.springframework.amqp.rabbit.core)
lambda$null$10:532, RabbitAdmin (org.springframework.amqp.rabbit.core)
doWithRetry:-1, 999782961 (org.springframework.amqp.rabbit.core.RabbitAdmin$$Lambda$957)
doExecute:287, RetryTemplate (org.springframework.retry.support)
execute:164, RetryTemplate (org.springframework.retry.support)
lambda$afterPropertiesSet$11:531, RabbitAdmin (org.springframework.amqp.rabbit.core)
onCreate:-1, 1185831500 (org.springframework.amqp.rabbit.core.RabbitAdmin$$Lambda$950)
lambda$onCreate$0:38, CompositeConnectionListener (org.springframework.amqp.rabbit.connection)
accept:-1, 1588281004 (org.springframework.amqp.rabbit.connection.CompositeConnectionListener$$Lambda$956)
forEach:803, CopyOnWriteArrayList (java.util.concurrent)
onCreate:38, CompositeConnectionListener (org.springframework.amqp.rabbit.connection)
createConnection:757, CachingConnectionFactory (org.springframework.amqp.rabbit.connection)
createConnection:216, ConnectionFactoryUtils (org.springframework.amqp.rabbit.connection)
doExecute:2089, RabbitTemplate (org.springframework.amqp.rabbit.core)
execute:2062, RabbitTemplate (org.springframework.amqp.rabbit.core)
execute:2042, RabbitTemplate (org.springframework.amqp.rabbit.core)
declareExchange:221, RabbitAdmin (org.springframework.amqp.rabbit.core)
cdpOrderTopicExchange:27, RabbitConfig (com.patpat.mms.mdp.base.core.rest.config)
CGLIB$cdpOrderTopicExchange$7:-1, RabbitConfig$$EnhancerBySpringCGLIB$$65dbc353 (com.patpat.mms.mdp.base.core.rest.config)
invoke:-1, RabbitConfig$$EnhancerBySpringCGLIB$$65dbc353$$FastClassBySpringCGLIB$$bdc910b3 (com.patpat.mms.mdp.base.core.rest.config)
invokeSuper:244, MethodProxy (org.springframework.cglib.proxy)
intercept:331, ConfigurationClassEnhancer$BeanMethodInterceptor (org.springframework.context.annotation)
cdpOrderTopicExchange:-1, RabbitConfig$$EnhancerBySpringCGLIB$$65dbc353 (com.patpat.mms.mdp.base.core.rest.config)
invoke0:-1, NativeMethodAccessorImpl (jdk.internal.reflect)
invoke:62, NativeMethodAccessorImpl (jdk.internal.reflect)
invoke:43, DelegatingMethodAccessorImpl (jdk.internal.reflect)
invoke:566, Method (java.lang.reflect)
instantiate:154, SimpleInstantiationStrategy (org.springframework.beans.factory.support)
instantiate:652, ConstructorResolver (org.springframework.beans.factory.support)
instantiateUsingFactoryMethod:637, ConstructorResolver (org.springframework.beans.factory.support)
instantiateUsingFactoryMethod:1341, AbstractAutowireCapableBeanFactory (org.springframework.beans.factory.support)
createBeanInstance:1181, AbstractAutowireCapableBeanFactory (org.springframework.beans.factory.support)
doCreateBean:556, AbstractAutowireCapableBeanFactory (org.springframework.beans.factory.support)
createBean:516, AbstractAutowireCapableBeanFactory (org.springframework.beans.factory.support)
lambda$doGetBean$0:324, AbstractBeanFactory (org.springframework.beans.factory.support)
getObject:-1, 1735872041 (org.springframework.beans.factory.support.AbstractBeanFactory$$Lambda$295)
getSingleton:234, DefaultSingletonBeanRegistry (org.springframework.beans.factory.support)
doGetBean:322, AbstractBeanFactory (org.springframework.beans.factory.support)
getBean:202, AbstractBeanFactory (org.springframework.beans.factory.support)
preInstantiateSingletons:897, DefaultListableBeanFactory (org.springframework.beans.factory.support)
finishBeanFactoryInitialization:879, AbstractApplicationContext (org.springframework.context.support)
refresh:551, AbstractApplicationContext (org.springframework.context.support)
refresh:755, SpringApplication (org.springframework.boot)
refresh:747, SpringApplication (org.springframework.boot)
refreshContext:402, SpringApplication (org.springframework.boot)
run:312, SpringApplication (org.springframework.boot)
loadContext:120, SpringBootContextLoader (org.springframework.boot.test.context)
loadContextInternal:99, DefaultCacheAwareContextLoaderDelegate (org.springframework.test.context.cache)
loadContext:124, DefaultCacheAwareContextLoaderDelegate (org.springframework.test.context.cache)
getApplicationContext:123, DefaultTestContext (org.springframework.test.context.support)
setUpRequestContextIfNecessary:190, ServletTestExecutionListener (org.springframework.test.context.web)
prepareTestInstance:132, ServletTestExecutionListener (org.springframework.test.context.web)
prepareTestInstance:244, TestContextManager (org.springframework.test.context)
createTest:227, SpringJUnit4ClassRunner (org.springframework.test.context.junit4)
runReflectiveCall:289, SpringJUnit4ClassRunner$1 (org.springframework.test.context.junit4)
run:12, ReflectiveCallable (org.junit.internal.runners.model)
methodBlock:291, SpringJUnit4ClassRunner (org.springframework.test.context.junit4)
runChild:246, SpringJUnit4ClassRunner (org.springframework.test.context.junit4)
runChild:97, SpringJUnit4ClassRunner (org.springframework.test.context.junit4)
run:331, ParentRunner$4 (org.junit.runners)
schedule:79, ParentRunner$1 (org.junit.runners)
runChildren:329, ParentRunner (org.junit.runners)
access$100:66, ParentRunner (org.junit.runners)
evaluate:293, ParentRunner$2 (org.junit.runners)
evaluate:61, RunBeforeTestClassCallbacks (org.springframework.test.context.junit4.statements)
evaluate:70, RunAfterTestClassCallbacks (org.springframework.test.context.junit4.statements)
evaluate:306, ParentRunner$3 (org.junit.runners)
run:413, ParentRunner (org.junit.runners)
run:190, SpringJUnit4ClassRunner (org.springframework.test.context.junit4)
run:137, JUnitCore (org.junit.runner)
startRunnerWithArgs:69, JUnit4IdeaTestRunner (com.intellij.junit4)
execute:38, IdeaTestRunner$Repeater$1 (com.intellij.rt.junit)
repeat:11, TestsRepeater (com.intellij.rt.execution.junit)
startRunnerWithArgs:35, IdeaTestRunner$Repeater (com.intellij.rt.junit)
prepareStreamsAndStart:235, JUnitStarter (com.intellij.rt.junit)
main:54, JUnitStarter (com.intellij.rt.junit)
+ +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git "a/2023/04/14/kafka\347\274\226\350\257\221\345\222\214\345\220\257\345\212\250-1/index.html" "b/2023/04/14/kafka\347\274\226\350\257\221\345\222\214\345\220\257\345\212\250-1/index.html" new file mode 100644 index 0000000000..202aed1b0e --- /dev/null +++ "b/2023/04/14/kafka\347\274\226\350\257\221\345\222\214\345\220\257\345\212\250-1/index.html" @@ -0,0 +1,446 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + kafka编译和启动 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ kafka编译和启动 +

+ + +
+ + + + +
+ + +

背景

了解kafka使用

+

编译

1
2
3
4
5
6
7
## 拉代码
git clone https://github.com/apache/kafka.git
## 切换目录
cd kafka/
## 编译 打包
./gradlew jar

+ +

编译好之后需要启动zookeeper 和kafka

+
1
2
3
4
5
## 在一个窗口启动zookeeper
bin/zookeeper-server-start.sh config/zookeeper.properties

## 在另外一个窗口启动kafka
bin/zookeeper-server-start.sh config/zookeeper.properties
+ + +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/2023/04/18/java-integer-divison/index.html b/2023/04/18/java-integer-divison/index.html new file mode 100644 index 0000000000..eee31afbc4 --- /dev/null +++ b/2023/04/18/java-integer-divison/index.html @@ -0,0 +1,465 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + java integer divison | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ java integer divison +

+ + +
+ + + + +
+ + +

背景

了解java整除除法的规则

+

jls 描述

1
2
3
4
5
Integer division rounds toward 0. That is, the quotient produced for operands n
and d that are integers after binary numeric promotion (§5.6) is an integer value q
whose magnitude is as large as possible while satisfying |d ⋅ q| ≤ |n|. Moreover, q
is positive when |n| ≥ |d| and n and d have the same sign, but q is negative when
|n| ≥ |d| and n and d have opposite signs.
+ +

也就是

+
1
int res = 3 / 5 = 0 ;
+ +

最后使用的bytecode 是idiv

+

我们看看idiv 这个bytecode 是怎么实现的吧:

+

jvm 实现

1
2
3
4
5
6
7
8
9
void Assembler::idivl(Register src) {
int encode = prefix_and_encode(src->encoding());
emit_int16((unsigned char)0xF7, (0xF8 | encode));
}

void Assembler::divl(Register src) { // Unsigned
int encode = prefix_and_encode(src->encoding());
emit_int16((unsigned char)0xF7, (0xF0 | encode));
}
+ +

查看intel的文档:
intel

+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
2.1.5 Addressing-Mode Encoding of ModR/M and SIB Bytes
The values and corresponding addressing forms of the ModR/M and SIB bytes are shown in Table 2-1 through Table
2-3: 16-bit addressing forms specified by the ModR/M byte are in Table 2-1 and 32-bit addressing forms are in
Table 2-2. Table 2-3 shows 32-bit addressing forms specified by the SIB byte. In cases where the reg/opcode field
in the ModR/M byte represents an extended opcode, valid encodings are shown in Appendix B.
In Table 2-1 and Table 2-2, the Effective Address column lists 32 effective addresses that can be assigned to the
first operand of an instruction by using the Mod and R/M fields of the ModR/M byte. The first 24 options provide
ways of specifying a memory location; the last eight (Mod = 11B) provide ways of specifying general-purpose, MMX
technology and XMM registers.
The Mod and R/M columns in Table 2-1 and Table 2-2 give the binary encodings of the Mod and R/M fields required
to obtain the effective address listed in the first column. For example: see the row indicated by Mod = 11B, R/M =
000B. The row identifies the general-purpose registers EAX, AX or AL; MMX technology register MM0; or XMM
register XMM0. The register used is determined by the opcode byte and the operand-size attribute.
Now look at the seventh row in either table (labeled “REG =”). This row specifies the use of the 3-bit Reg/Opcode
field when the field is used to give the location of a second operand. The second operand must be a generalpurpose, MMX technology, or XMM register. Rows one through five list the registers that may correspond to the
value in the table. Again, the register used is determined by the opcode byte along with the operand-size attribute.
If the instruction does not require a second operand, then the Reg/Opcode field may be used as an opcode extension. This use is represented by the sixth row in the tables (labeled “/digit (Opcode)”). Note that values in row six
are represented in decimal form.
The body of Table 2-1 and Table 2-2 (under the label “Value of ModR/M Byte (in Hexadecimal)”) contains a 32 by
8 array that presents all of 256 values of the ModR/M byte (in hexadecimal). Bits 3, 4 and 5 are specified by the
column of the table in which a byte resides. The row specifies bits 0, 1 and 2; and bits 6 and 7. The figure below
demonstrates interpretation of one table value.
+

register
寄存器会从0编码到16,编码顺序如上述所示

+

例子

1
2
3
4
5
6
int main(void){                        
int num0 = 5;
int num1 = 2;
int num = num0 / num1 ;
return num;
}
+ +
1
2
3
4
5
6
## 编译
gcc -O0 test.c -o test

## 用objdump 列出汇编内容
objdump -d test | grep -A20 '<main>:'

+

结果为

+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
0000000000001129 <main>:
1129: f3 0f 1e fa endbr64
112d: 55 push %rbp
112e: 48 89 e5 mov %rsp,%rbp
1131: c7 45 f4 05 00 00 00 movl $0x5,-0xc(%rbp)
1138: c7 45 f8 02 00 00 00 movl $0x2,-0x8(%rbp)
113f: 8b 45 f4 mov -0xc(%rbp),%eax
1142: 99 cltd
1143: f7 7d f8 idivl -0x8(%rbp)
1146: 89 45 fc mov %eax,-0x4(%rbp)
1149: 8b 45 fc mov -0x4(%rbp),%eax
114c: 5d pop %rbp
114d: c3 retq
114e: 66 90 xchg %ax,%ax
+ +

相关阅读

+ +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git "a/2023/04/20/volatile-java-\345\256\236\347\216\260/index.html" "b/2023/04/20/volatile-java-\345\256\236\347\216\260/index.html" new file mode 100644 index 0000000000..30d0f7aacb --- /dev/null +++ "b/2023/04/20/volatile-java-\345\256\236\347\216\260/index.html" @@ -0,0 +1,536 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + volatile java 实现 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ volatile java 实现 +

+ + +
+ + + + +
+ + +

背景

了解java volatile实现

+

实现

核心在这里: is_volatile_shift

+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
void TemplateTable::putfield_or_static(int byte_no, bool is_static, RewriteControl rc) {
transition(vtos, vtos);

const Register cache = rcx;
const Register index = rdx;
const Register obj = rcx;
const Register off = rbx;
const Register flags = rax;

resolve_cache_and_index(byte_no, cache, index, sizeof(u2));
jvmti_post_field_mod(cache, index, is_static);
load_field_cp_cache_entry(obj, cache, index, off, flags, is_static);

Label notVolatile, Done;
__ movl(rdx, flags);
__ shrl(rdx, ConstantPoolCacheEntry::is_volatile_shift); // 右移volatile 标志位 , 然后如果是volatile ,则走到不太分支
__ andl(rdx, 0x1);

// Check for volatile store
__ testl(rdx, rdx);
__ jcc(Assembler::zero, notVolatile);

putfield_or_static_helper(byte_no, is_static, rc, obj, off, flags);
volatile_barrier(Assembler::Membar_mask_bits(Assembler::StoreLoad |
Assembler::StoreStore));
__ jmp(Done);
__ bind(notVolatile);

putfield_or_static_helper(byte_no, is_static, rc, obj, off, flags);

__ bind(Done);
}
+ +

这是具体的代码:

+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
// ----------------------------------------------------------------------------
// Volatile variables demand their effects be made known to all CPU's
// in order. Store buffers on most chips allow reads & writes to
// reorder; the JMM's ReadAfterWrite.java test fails in -Xint mode
// without some kind of memory barrier (i.e., it's not sufficient that
// the interpreter does not reorder volatile references, the hardware
// also must not reorder them).
//
// According to the new Java Memory Model (JMM):
// (1) All volatiles are serialized wrt to each other. ALSO reads &
// writes act as aquire & release, so:
// (2) A read cannot let unrelated NON-volatile memory refs that
// happen after the read float up to before the read. It's OK for
// non-volatile memory refs that happen before the volatile read to
// float down below it.
// (3) Similar a volatile write cannot let unrelated NON-volatile
// memory refs that happen BEFORE the write float down to after the
// write. It's OK for non-volatile memory refs that happen after the
// volatile write to float up before it.
//
// We only put in barriers around volatile refs (they are expensive),
// not _between_ memory refs (that would require us to track the
// flavor of the previous memory refs). Requirements (2) and (3)
// require some barriers before volatile stores and after volatile
// loads. These nearly cover requirement (1) but miss the
// volatile-store-volatile-load case. This final case is placed after
// volatile-stores although it could just as well go before
// volatile-loads.

void TemplateTable::volatile_barrier(Assembler::Membar_mask_bits order_constraint ) {
// Helper function to insert a is-volatile test and memory barrier
__ membar(order_constraint);
}
+ +

还有这段:

+

ConstantPoolCache 看上去是运行时从.class文件读取的内容, 这里就有volatile的标志位,我们看看是在哪里设置这个值ConstantPoolCacheEntry::is_volatile_shift

+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
// The ConstantPoolCache is not a cache! It is the resolution table that the
// interpreter uses to avoid going into the runtime and a way to access resolved
// values.

// A ConstantPoolCacheEntry describes an individual entry of the constant
// pool cache. There's 2 principal kinds of entries: field entries for in-
// stance & static field access, and method entries for invokes. Some of
// the entry layout is shared and looks as follows:
//
// bit number |31 0|
// bit length |-8--|-8--|---16----|
// --------------------------------
// _indices [ b2 | b1 | index ] index = constant_pool_index
// _f1 [ entry specific ] metadata ptr (method or klass)
// _f2 [ entry specific ] vtable or res_ref index, or vfinal method ptr
// _flags [tos|0|F=1|0|0|0|f|v|0 |0000|field_index] (for field entries)
// bit length [ 4 |1| 1 |1|1|1|1|1|1 |1 |-3-|----16-----]
// _flags [tos|0|F=0|S|A|I|f|0|vf|indy_rf|000|00000|psize] (for method entries)
// bit length [ 4 |1| 1 |1|1|1|1|1|1 |-4--|--8--|--8--]

// --------------------------------
//
// with:
// index = original constant pool index
// b1 = bytecode 1
// b2 = bytecode 2
// psize = parameters size (method entries only)
// field_index = index into field information in holder InstanceKlass
// The index max is 0xffff (max number of fields in constant pool)
// and is multiplied by (InstanceKlass::next_offset) when accessing.
// tos = TosState
// F = the entry is for a field (or F=0 for a method)
// A = call site has an appendix argument (loaded from resolved references)
// I = interface call is forced virtual (must use a vtable index or vfinal)
// f = field or method is final
// v = field is volatile
// vf = virtual but final (method entries only: is_vfinal())
// indy_rf = call site specifier method resolution failed
//
// The flags after TosState have the following interpretation:
// bit 27: 0 for fields, 1 for methods
// f flag true if field is marked final
// v flag true if field is volatile (only for fields)
// f2 flag true if f2 contains an oop (e.g., virtual final method)
// fv flag true if invokeinterface used for method in class Object
//
// The flags 31, 30, 29, 28 together build a 4 bit number 0 to 16 with the
// following mapping to the TosState states:
//
// btos: 0
// ztos: 1
// ctos: 2
// stos: 3
// itos: 4
// ltos: 5
// ftos: 6
// dtos: 7
// atos: 8
// vtos: 9
//
// Entry specific: field entries:
// _indices = get (b1 section) and put (b2 section) bytecodes, original constant pool index
// _f1 = field holder (as a java.lang.Class, not a Klass*)
// _f2 = field offset in bytes
// _flags = field type information, original FieldInfo index in field holder
// (field_index section)
//
// Entry specific: method entries:
// _indices = invoke code for f1 (b1 section), invoke code for f2 (b2 section),
// original constant pool index
// _f1 = Method* for non-virtual calls, unused by virtual calls.
// for interface calls, which are essentially virtual but need a klass,
// contains Klass* for the corresponding interface.
// for invokedynamic and invokehandle, f1 contains the adapter method which
// manages the actual call. The appendix is stored in the ConstantPool
// resolved_references array.
// (upcoming metadata changes will move the appendix to a separate array)
// _f2 = vtable/itable index (or final Method*) for virtual calls only,
// unused by non-virtual. The is_vfinal flag indicates this is a
// method pointer for a final method, not an index.
// _flags = has local signature (MHs and indy),
// virtual final bit (vfinal),
// parameter size (psize section)
//
// Note: invokevirtual & invokespecial bytecodes can share the same constant
// pool entry and thus the same constant pool cache entry. All invoke
// bytecodes but invokevirtual use only _f1 and the corresponding b1
// bytecode, while invokevirtual uses only _f2 and the corresponding
// b2 bytecode. The value of _flags is shared for both types of entries.
//
// The fields are volatile so that they are stored in the order written in the
// source code. The _indices field with the bytecode must be written last.

class CallInfo;

class ConstantPoolCacheEntry {
+ + +

然后我们看看设置值的地方在这里

+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
 //源码地址   src/hotspot/share/oops/cpCache.cpp

// Note that concurrent update of both bytecodes can leave one of them
// reset to zero. This is harmless; the interpreter will simply re-resolve
// the damaged entry. More seriously, the memory synchronization is needed
// to flush other fields (f1, f2) completely to memory before the bytecodes
// are updated, lest other processors see a non-zero bytecode but zero f1/f2.
void ConstantPoolCacheEntry::set_field(Bytecodes::Code get_code,
Bytecodes::Code put_code,
Klass* field_holder,
int field_index,
int field_offset,
TosState field_type,
bool is_final,
bool is_volatile) {
set_f1(field_holder);
set_f2(field_offset);
assert((field_index & field_index_mask) == field_index,
"field index does not fit in low flag bits");
set_field_flags(field_type,
((is_volatile ? 1 : 0) << is_volatile_shift) | <--- 在这里会设置volatile 的值
((is_final ? 1 : 0) << is_final_shift),
field_index);
set_bytecode_1(get_code);
set_bytecode_2(put_code);
NOT_PRODUCT(verify(tty));
}
+ + +

然后用gdb调试一下:

+
1
2
3
4
5
6
7
8
9
(gdb) bt
#0 b ::set_field (this=0x7fffb4151b50, get_code=Bytecodes::_nop, put_code=Bytecodes::_nop, field_holder=0x800042fa0, field_index=2, field_offset=120, field_type=atos,
is_final=true, is_volatile=false) at /home/dai/jdk/src/hotspot/share/oops/cpCache.cpp:139
#1 0x00007ffff65d2561 in InterpreterRuntime::resolve_get_put (current=0x7ffff0028f70, bytecode=Bytecodes::_putstatic) at /home/dai/jdk/src/hotspot/share/interpreter/interpreterRuntime.cpp:708
#2 0x00007ffff65d3e14 in InterpreterRuntime::resolve_from_cache (current=0x7ffff0028f70, bytecode=Bytecodes::_putstatic) at /home/dai/jdk/src/hotspot/share/interpreter/interpreterRuntime.cpp:959
#3 0x00007fffe10203e3 in ?? ()
#4 0x00007ffff7bcc0a0 in TemplateInterpreter::_active_table () from /home/dai/jdk/build/linux-x86_64-server-slowdebug/jdk/lib/server/libjvm.so
#5 0x00007fffe1020362 in ?? ()
#6 0x0000000000000000 in ?? ()
+ +

我们看看ConstantPoolCacheEntry是从哪里取的

+
1
2
3
4
5
6
7
8
9
// Helper class to access current interpreter state
class LastFrameAccessor : public StackObj {
frame _last_frame;
public:
...
ConstantPoolCacheEntry* cache_entry_at(int i) const
{ return method()->constants()->cache()->entry_at(i); }
ConstantPoolCacheEntry* cache_entry() const { return cache_entry_at(Bytes::get_native_u2(bcp() + 1)); }
}
+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
(gdb) p m->print()
{method}
- this oop: 0x00007fffd14108e8
- method holder: 'java/lang/String'
- constants: 0x00007fffd14013b8 constant pool [1396]/operands[28] {0x00007fffd14013b8} for 'java/lang/String' cache=0x00007fffd1544158
- access: 0x8 static
- name: '<clinit>'
- signature: '()V'
- max stack: 3
- max locals: 0
- size of params: 0
- method size: 13
- vtable index: -2
- i2i entry: 0x00007fffe100dbe0
- adapters: AHE@0x00007ffff009b550: 0x i2c: 0x00007fffe1115060 c2i: 0x00007fffe111510d c2iUV: 0x00007fffe11150e0 c2iNCI: 0x00007fffe111514a
- compiled entry 0x00007fffe111510d
- code size: 22
- code start: 0x00007fffd14108c0
- code end (excl): 0x00007fffd14108d6
- checked ex length: 0
- linenumber start: 0x00007fffd14108d6
- localvar length: 0
$7 = void
(gdb) bt
#0 frame::interpreter_frame_method (this=0x7ffff5aa43f8) at /home/ubuntu/daixiao/jdk/src/hotspot/share/runtime/frame.cpp:332
#1 0x00007ffff672253c in LastFrameAccessor::method (this=0x7ffff5aa43f0) at /home/ubuntu/daixiao/jdk/src/hotspot/share/interpreter/interpreterRuntime.cpp:90
#2 0x00007ffff671d2fb in InterpreterRuntime::resolve_get_put (current=0x7ffff0028940, bytecode=Bytecodes::_putstatic) at /home/ubuntu/daixiao/jdk/src/hotspot/share/interpreter/interpreterRuntime.cpp:650
#3 0x00007ffff671ec81 in InterpreterRuntime::resolve_from_cache (current=0x7ffff0028940, bytecode=Bytecodes::_putstatic) at /home/ubuntu/daixiao/jdk/src/hotspot/share/interpreter/interpreterRuntime.cpp:948
#4 0x00007fffe10203a3 in ?? ()
#5 0x00007ffff7d4b280 in TemplateInterpreter::_active_table () from /home/ubuntu/daixiao/jdk/build/linux-x86_64-server-slowdebug/jdk/lib/server/libjvm.so
#6 0x00007fffe1020322 in ?? ()
#7 0x0000000000000001 in ?? ()
#8 0x00007ffff5aa44d8 in ?? ()
#9 0x00007fffd14108c1 in ?? ()
#10 0x00007ffff5aa4528 in ?? ()
#11 0x00007fffd1544158 in ?? ()
#12 0x0000000000000000 in ?? ()
(gdb)
+ + +

解析cache

+
1
2
3
InterpreterRuntime::resolve_from_cache

LinkResolver::resolve_field_access
+ +

相关类在

+
1
2
3
//   src/hotspot/share/oops/cpCache.cpp
// 头文件 在 src/hotspot/share/oops/cpCache.hpp
ConstantPoolCacheEntry
+ +

问了一下chatgpt

+
+

在HotSpot JVM中,ConstantPoolCacheEntry对象是在解析类文件时动态创建的。具体来说,在解析类文件时,JVM会为类文件中的每个常量创建一个对应的ConstantPoolCacheEntry对象,并将其添加到类的常量池缓存中。

+
+
+

ConstantPoolCacheEntry对象的实例化主要是在ClassFileParser类的parse_constant_pool_entry()函数中完成的。在解析常量池时,parse_constant_pool_entry()函数会根据常量的类型和值动态创建一个对应的ConstantPoolCacheEntry对象,并将其添加到类的常量池缓存中。

+
+
+

需要注意的是,在解析过程中,JVM可能会使用占位符对象来表示无效的或未知的常量,而不是创建ConstantPoolCacheEntry对象。在这种情况下,JVM会将占位符对象添加到常量池缓存中,以便在解析和执行时可以快速跳过无效的或未知的常量。

+
+
+

总之,在HotSpot JVM中,ConstantPoolCacheEntry对象的实例化是在解析类文件时完成的,而具体实现是在ClassFileParser类的parse_constant_pool_entry()函数中完成的。

+
+

查看堆栈

+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
(gdb) p stream->_source
$4 = 0x7ffff00bdd40 "/home/ubuntu/daixiao/jdk/build/linux-x86_64-server-slowdebug/jdk/modules/java.base"
(gdb) bt
#0 ClassFileParser::parse_constant_pool_entries (this=0x7ffff0028940, stream=0x0, cp=0x7ffff5fadff1 <Thread::as_Java_thread()+39>, length=32767, __the_thread__=0x7ffff0028940) at /home/ubuntu/daixiao/jdk/src/hotspot/share/classfile/classFileParser.cpp:150
#1 0x00007ffff6378b5e in ClassFileParser::parse_constant_pool (this=0x7ffff5aa27e0, stream=0x7ffff00298e0, cp=0x7fffd169b1c8, length=31, __the_thread__=0x7ffff0028940) at /home/ubuntu/daixiao/jdk/src/hotspot/share/classfile/classFileParser.cpp:424
#2 0x00007ffff638aacc in ClassFileParser::parse_stream (this=0x7ffff5aa27e0, stream=0x7ffff00298e0, __the_thread__=0x7ffff0028940) at /home/ubuntu/daixiao/jdk/src/hotspot/share/classfile/classFileParser.cpp:5720
#3 0x00007ffff638a2b1 in ClassFileParser::ClassFileParser (this=0x7ffff5aa27e0, stream=0x7ffff00298e0, name=0x7fffb005b0a0, loader_data=0x7ffff0091890, cl_info=0x7ffff5aa2a10, pub_level=ClassFileParser::BROADCAST, __the_thread__=0x7ffff0028940)
at /home/ubuntu/daixiao/jdk/src/hotspot/share/classfile/classFileParser.cpp:5590
#4 0x00007ffff69d5f1f in KlassFactory::create_from_stream (stream=0x7ffff00298e0, name=0x7fffb005b0a0, loader_data=0x7ffff0091890, cl_info=..., __the_thread__=0x7ffff0028940) at /home/ubuntu/daixiao/jdk/src/hotspot/share/classfile/klassFactory.cpp:199
#5 0x00007ffff639a9e5 in ClassLoader::load_class (name=0x7fffb005b0a0, search_append_only=false, __the_thread__=0x7ffff0028940) at /home/ubuntu/daixiao/jdk/src/hotspot/share/classfile/classLoader.cpp:1222
#6 0x00007ffff6e97300 in SystemDictionary::load_instance_class_impl (class_name=0x7fffb005b0a0, class_loader=..., __the_thread__=0x7ffff0028940) at /home/ubuntu/daixiao/jdk/src/hotspot/share/classfile/systemDictionary.cpp:1290
#7 0x00007ffff6e976d1 in SystemDictionary::load_instance_class (name_hash=1923324215, name=0x7fffb005b0a0, class_loader=..., __the_thread__=0x7ffff0028940) at /home/ubuntu/daixiao/jdk/src/hotspot/share/classfile/systemDictionary.cpp:1356
#8 0x00007ffff6e95874 in SystemDictionary::resolve_instance_class_or_null (name=0x7fffb005b0a0, class_loader=..., protection_domain=..., __the_thread__=0x7ffff0028940) at /home/ubuntu/daixiao/jdk/src/hotspot/share/classfile/systemDictionary.cpp:724
#9 0x00007ffff6e94481 in SystemDictionary::resolve_instance_class_or_null_helper (class_name=0x7fffb005b0a0, class_loader=..., protection_domain=..., __the_thread__=0x7ffff0028940) at /home/ubuntu/daixiao/jdk/src/hotspot/share/classfile/systemDictionary.cpp:295
#10 0x00007ffff6e94330 in SystemDictionary::resolve_or_null (class_name=0x7fffb005b0a0, class_loader=..., protection_domain=..., __the_thread__=0x7ffff0028940) at /home/ubuntu/daixiao/jdk/src/hotspot/share/classfile/systemDictionary.cpp:278
#11 0x00007ffff6e94273 in SystemDictionary::resolve_or_fail (class_name=0x7fffb005b0a0, class_loader=..., protection_domain=..., throw_error=true, __the_thread__=0x7ffff0028940) at /home/ubuntu/daixiao/jdk/src/hotspot/share/classfile/systemDictionary.cpp:264
#12 0x00007ffff64314d1 in ConstantPool::klass_at_impl (this_cp=..., which=506, __the_thread__=0x7ffff0028940) at /home/ubuntu/daixiao/jdk/src/hotspot/share/oops/constantPool.cpp:512
#13 0x00007ffff62aca88 in ConstantPool::klass_at (this=0x7fffd1692c38, which=506, __the_thread__=0x7ffff0028940) at /home/ubuntu/daixiao/jdk/src/hotspot/share/oops/constantPool.hpp:420
#14 0x00007ffff671af3a in InterpreterRuntime::_new (current=0x7ffff0028940, pool=0x7fffd1692c38, index=506) at /home/ubuntu/daixiao/jdk/src/hotspot/share/interpreter/interpreterRuntime.cpp:219
#15 0x00007fffe1023b92 in ?? ()
#16 0x00007fffe1023b06 in ?? ()
#17 0x00007ffff5aa3368 in ?? ()
#18 0x00007fffd1699fe1 in ?? ()
#19 0x00007ffff5aa33b8 in ?? ()
#20 0x00007fffd169a0a0 in ?? ()
#21 0x0000000000000000 in ?? ()
+ + +

设置断点

+
1
2
3
4
5
6
(gdb) commands
Type commands for breakpoint(s) 5, one per line.
End with a line saying just "end".
>p name->print()
>c
>end
+

打印

+
1
2
3
Thread 2 "java" hit Breakpoint 5, KlassFactory::create_from_stream (stream=0x7ffff5aa2f20, name=0x7ffff044c4a0, loader_data=0x7ffff03ddf90, cl_info=..., __the_thread__=0x7ffff0028940) at /home/ubuntu/daixiao/jdk/src/hotspot/share/classfile/klassFactory.cpp:172
172 assert(loader_data != NULL, "invariant");
Symbol: 'com/HelloWorld' count 2$1035 = void
+ + +

类加载

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
$801 = 0x7ffff59fd0b0 "file:/home/dai/jdk/build/linux-x86_64-server-slowdebug/jdk/bin/"
(gdb) bt
#0 KlassFactory::create_from_stream (stream=0x7ffff59fce00, name=0x7ffff057a9d0, loader_data=0x7ffff05185c0, cl_info=..., __the_thread__=0x7ffff0028f50)
at /home/dai/jdk/src/hotspot/share/classfile/klassFactory.cpp:179
#1 0x00007ffff6d4248e in SystemDictionary::resolve_class_from_stream (st=0x7ffff59fce00, class_name=0x7ffff057a9d0, class_loader=..., cl_info=..., __the_thread__=0x7ffff0028f50)
at /home/dai/jdk/src/hotspot/share/classfile/systemDictionary.cpp:914
#2 0x00007ffff6d42708 in SystemDictionary::resolve_from_stream (st=0x7ffff59fce00, class_name=0x7ffff057a9d0, class_loader=..., cl_info=..., __the_thread__=0x7ffff0028f50)
at /home/dai/jdk/src/hotspot/share/classfile/systemDictionary.cpp:952
#3 0x00007ffff66e1b51 in jvm_define_class_common (name=0x7ffff59fd030 "Hello", loader=0x7ffff59fd5a0, buf=0x7ffff04a4b90 "\312\376\272\276", len=409, pd=0x7ffff59fd578,
source=0x7ffff59fd0b0 "file:/home/dai/jdk/build/linux-x86_64-server-slowdebug/jdk/bin/", __the_thread__=0x7ffff0028f50) at /home/dai/jdk/src/hotspot/share/prims/jvm.cpp:883
#4 0x00007ffff66e2832 in JVM_DefineClassWithSource (env=0x7ffff0029230, name=0x7ffff59fd030 "Hello", loader=0x7ffff59fd5a0, buf=0x7ffff04a4b90 "\312\376\272\276", len=409, pd=0x7ffff59fd578,
source=0x7ffff59fd0b0 "file:/home/dai/jdk/build/linux-x86_64-server-slowdebug/jdk/bin/") at /home/dai/jdk/src/hotspot/share/prims/jvm.cpp:1047
#5 0x00007ffff58db1f5 in Java_java_lang_ClassLoader_defineClass1 (env=0x7ffff0029230, cls=0x7ffff59fd560, loader=0x7ffff59fd5a0, name=0x7ffff59fd598, data=0x7ffff59fd590, offset=0, length=409,
pd=0x7ffff59fd578, source=0x7ffff59fd570) at /home/dai/jdk/src/java.base/share/native/libjava/ClassLoader.c:132
#6 0x00007fffe100f6cb in ?? ()
#7 0x0000000000000199 in ?? ()
#8 0x00007ffff59fd578 in ?? ()
#9 0x00007ffff59fd570 in ?? ()
#10 0x0000555555581230 in ?? ()
#11 0x00007ffff0028f50 in ?? ()
#12 0x00007fffb445ba08 in ?? ()
#13 0x00007fffe100f199 in ?? ()
#14 0x00007ffff59fd508 in ?? ()
#15 0x00007fffb4025170 in ?? ()
#16 0x00007ffff59fd5a0 in ?? ()
#17 0x00007fffb4147588 in ?? ()
#18 0x0000000000000000 in ?? ()
+ + +

fileinfo 如何初始化

ConstantPoolCacheEntry 是由fieldDescriptor 的字段传进去的 , 那么我们看看fieldDescriptor是怎么初始化的

+
1
2
3
4
5
6
7
8
9
10
11
  fieldDescriptor info;
...
void InterpreterRuntime::resolve_get_put(JavaThread* current, Bytecodes::Code bytecode) {
...

LinkResolver::resolve_field_access(info, pool, last_frame.get_index_u2_cpcache(bytecode),
m, bytecode, CHECK);
...


}
+ + +

我们最后找到初始化的地方

+
1
2
3
4
5
6
7
8
9
10
11
bool InstanceKlass::find_local_field(Symbol* name, Symbol* sig, fieldDescriptor* fd) const {
for (JavaFieldStream fs(this); !fs.done(); fs.next()) {
Symbol* f_name = fs.name();
Symbol* f_sig = fs.signature();
if (f_name == name && f_sig == sig) {
fd->reinitialize(const_cast<InstanceKlass*>(this), fs.index()); // 初始化的地方
return true;
}
}
return false;
}
+ +

初始化就在这里: fieldDescriptor::reinitialize

+
1
2
3
4
5
6
7
8
void fieldDescriptor::reinitialize(InstanceKlass* ik, int index) {
...
FieldInfo* f = ik->field(index);
_access_flags = accessFlags_from(f->access_flags());
guarantee(f->name_index() != 0 && f->signature_index() != 0, "bad constant pool index for fieldDescriptor");
_index = index;
...
}
+ + +

jvm 堆栈结构

1
2
3
4
5
6
inline frame ContinuationEntry::to_frame() const {
static CodeBlob* cb = CodeCache::find_blob_fast(entry_pc());
assert(cb != nullptr, "");
assert(cb->as_compiled_method()->method()->is_continuation_enter_intrinsic(), "");
return frame(entry_sp(), entry_sp(), entry_fp(), entry_pc(), cb);
}
+ + + +
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
(gdb) bt
#0 frame::frame (this=0x7ffff59fe348) at /home/dai/jdk/src/hotspot/cpu/x86/frame_x86.inline.hpp:37
#1 0x00007ffff65d76f1 in LastFrameAccessor::LastFrameAccessor (this=0x7ffff59fe340, current=0x7ffff0028f70)
at /home/dai/jdk/src/hotspot/share/interpreter/interpreterRuntime.cpp:84
#2 0x00007ffff65d212e in InterpreterRuntime::resolve_get_put (current=0x7ffff0028f70,
bytecode=Bytecodes::_putstatic) at /home/dai/jdk/src/hotspot/share/interpreter/interpreterRuntime.cpp:652
#3 0x00007ffff65d3e14 in InterpreterRuntime::resolve_from_cache (current=0x7ffff0028f70,
bytecode=Bytecodes::_putstatic) at /home/dai/jdk/src/hotspot/share/interpreter/interpreterRuntime.cpp:959
#4 0x00007fffe10203e3 in ?? ()
#5 0x00007ffff7bca0a0 in TemplateInterpreter::_active_table ()
from /home/dai/jdk/build/linux-x86_64-server-slowdebug/jdk/lib/server/libjvm.so
#6 0x00007fffe1020362 in ?? ()
#7 0x0000000000000001 in ?? ()
#8 0x00007ffff59fe438 in ?? ()
#9 0x00007fffb4010821 in ?? ()
#10 0x00007ffff59fe488 in ?? ()
#11 0x00007fffb4149b38 in ?? ()
#12 0x0000000000000000 in ?? ()
+ +

后面我们能看到初始化是在这里:

+
1
2
3
4
5
frame JavaThread::pd_last_frame() {
assert(has_last_Java_frame(), "must have last_Java_sp() when suspended");
vmassert(_anchor.last_Java_pc() != NULL, "not walkable");
return frame(_anchor.last_Java_sp(), _anchor.last_Java_fp(), _anchor.last_Java_pc());
}
+ +

frame 的初始化

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
(gdb) bt
#0 JavaThread::pd_last_frame (this=0x7ffff0028f70) at /home/dai/jdk/src/hotspot/os_cpu/linux_x86/thread_linux_x86.cpp:30
#1 0x00007ffff612888b in JavaThread::last_frame (this=0x7ffff0028f70) at /home/dai/jdk/src/hotspot/share/runtime/thread.hpp:1407
#2 0x00007ffff65d7757 in LastFrameAccessor::LastFrameAccessor (this=0x7ffff59fe340, current=0x7ffff0028f70) at /home/dai/jdk/src/hotspot/share/interpreter/interpreterRuntime.cpp:86
#3 0x00007ffff65d212e in InterpreterRuntime::resolve_get_put (current=0x7ffff0028f70, bytecode=Bytecodes::_putstatic) at /home/dai/jdk/src/hotspot/share/interpreter/interpreterRuntime.cpp:652
#4 0x00007ffff65d3e14 in InterpreterRuntime::resolve_from_cache (current=0x7ffff0028f70, bytecode=Bytecodes::_putstatic) at /home/dai/jdk/src/hotspot/share/interpreter/interpreterRuntime.cpp:959
#5 0x00007fffe10203e3 in ?? ()
#6 0x00007ffff7bca0a0 in TemplateInterpreter::_active_table () from /home/dai/jdk/build/linux-x86_64-server-slowdebug/jdk/lib/server/libjvm.so
#7 0x00007fffe1020362 in ?? ()
#8 0x0000000000000001 in ?? ()
#9 0x00007ffff59fe438 in ?? ()
#10 0x00007fffb4010821 in ?? ()
#11 0x00007ffff59fe488 in ?? ()
#12 0x00007fffb4149b38 in ?? ()
#13 0x0000000000000000 in ?? ()
(gdb) list
35 // For Forte Analyzer AsyncGetCallTrace profiling support - thread is
36 // currently interrupted by SIGPROF
37 bool JavaThread::pd_get_top_frame_for_signal_handler(frame* fr_addr,
38 void* ucontext, bool isInJava) {
39
40 assert(Thread::current() == this, "caller must be current thread");
41 return pd_get_top_frame(fr_addr, ucontext, isInJava);
42 }
43
44 bool JavaThread::pd_get_top_frame_for_profiling(frame* fr_addr, void* ucontext, bool isInJava) {
(gdb) info registers
rax 0x7ffff59fe220 140737314284064
rbx 0x7ffff59fe340 140737314284352
rcx 0x7ffff00153f0 140737220006896
rdx 0x7ffff0028f70 140737220087664
rsi 0x7ffff0028f70 140737220087664
rdi 0x7ffff59fe220 140737314284064
rbp 0x7ffff59fe1f0 0x7ffff59fe1f0
rsp 0x7ffff59fe1d0 0x7ffff59fe1d0
r8 0x8 8
r9 0x0 0
r10 0x7ffff7bca0a0 140737349722272
r11 0x7ffff0000090 140737219920016
r12 0x0 0
r13 0x7fffb4010821 140736213354529
r14 0x7ffff59fe488 140737314284680
r15 0x7ffff0028f70 140737220087664
rip 0x7ffff6da2ea3 0x7ffff6da2ea3 <JavaThread::pd_last_frame()+23>
eflags 0x202 [ IF ]
cs 0x33 51
ss 0x2b 43
ds 0x0 0
es 0x0 0
fs 0x0 0
gs 0x0 0

+ + +

然后我们看到在

+
1
JavaFrameAnchor _anchor;    
+ +
1
2
3
4
5
6
7
8
9

class JavaFrameAnchor {
...

private:
...
intptr_t* volatile _last_Java_sp; //stack_pointer
...
}
+ +

JavaFrameAnchor 初始化

+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
JavaFrameAnchor::clear (this=0x7ffff0029230) at /home/dai/jdk/src/hotspot/cpu/x86/javaFrameAnchor_x86.hpp:44
44 _last_Java_fp = NULL;
(gdb) bt
#0 JavaFrameAnchor::clear (this=0x7ffff0029230) at /home/dai/jdk/src/hotspot/cpu/x86/javaFrameAnchor_x86.hpp:44
#1 0x00007ffff65df910 in JavaFrameAnchor::JavaFrameAnchor (this=0x7ffff0029230) at /home/dai/jdk/src/hotspot/share/runtime/javaFrameAnchor.hpp:88
#2 0x00007ffff6d8730c in JavaThread::JavaThread (this=0x7ffff0028f70) at /home/dai/jdk/src/hotspot/share/runtime/thread.cpp:1076
#3 0x00007ffff6d8d0ee in Threads::create_vm (args=0x7ffff59fed50, canTryAgain=0x7ffff59fec5b) at /home/dai/jdk/src/hotspot/share/runtime/thread.cpp:2817
#4 0x00007ffff66b243b in JNI_CreateJavaVM_inner (vm=0x7ffff59feda8, penv=0x7ffff59fedb0, args=0x7ffff59fed50) at /home/dai/jdk/src/hotspot/share/prims/jni.cpp:3613
#5 0x00007ffff66b2787 in JNI_CreateJavaVM (vm=0x7ffff59feda8, penv=0x7ffff59fedb0, args=0x7ffff59fed50) at /home/dai/jdk/src/hotspot/share/prims/jni.cpp:3701
#6 0x00007ffff7faca6a in InitializeJVM (pvm=0x7ffff59feda8, penv=0x7ffff59fedb0, ifn=0x7ffff59fee00) at /home/dai/jdk/src/java.base/share/native/libjli/java.c:1459
#7 0x00007ffff7fa95ec in JavaMain (_args=0x7fffffffa9a0) at /home/dai/jdk/src/java.base/share/native/libjli/java.c:411
#8 0x00007ffff7fb05ec in ThreadJavaMain (args=0x7fffffffa9a0) at /home/dai/jdk/src/java.base/unix/native/libjli/java_md.c:651
#9 0x00007ffff7c94b43 in start_thread (arg=<optimized out>) at ./nptl/pthread_create.c:442
#10 0x00007ffff7d26a00 in clone3 () at ../sysdeps/unix/sysv/linux/x86_64/clone3.S:81

+

在这里会设置last_java_sp

+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
void MacroAssembler::call_VM_base(Register oop_result,
Register java_thread,
Register last_java_sp,
address entry_point,
int number_of_arguments,
bool check_exceptions) {
// determine java_thread register
if (!java_thread->is_valid()) {
#ifdef _LP64
java_thread = r15_thread;
#else
java_thread = rdi;
get_thread(java_thread);
#endif // LP64
}
// determine last_java_sp register
if (!last_java_sp->is_valid()) {
last_java_sp = rsp;
}
// debugging support
assert(number_of_arguments >= 0 , "cannot have negative number of arguments");
LP64_ONLY(assert(java_thread == r15_thread, "unexpected register"));
#ifdef ASSERT
// TraceBytecodes does not use r12 but saves it over the call, so don't verify
// r12 is the heapbase.
LP64_ONLY(if (UseCompressedOops && !TraceBytecodes) verify_heapbase("call_VM_base: heap base corrupted?");)
#endif // ASSERT

assert(java_thread != oop_result , "cannot use the same register for java_thread & oop_result");
assert(java_thread != last_java_sp, "cannot use the same register for java_thread & last_java_sp");

// push java thread (becomes first argument of C function)

NOT_LP64(push(java_thread); number_of_arguments++);
LP64_ONLY(mov(c_rarg0, r15_thread));

// set last Java frame before call
assert(last_java_sp != rbp, "can't use ebp/rbp");

// Only interpreter should have to set fp
set_last_Java_frame(java_thread, last_java_sp, rbp, NULL);

// do the call, remove parameters
MacroAssembler::call_VM_leaf_base(entry_point, number_of_arguments);

// restore the thread (cannot use the pushed argument since arguments
// may be overwritten by C code generated by an optimizing compiler);
// however can use the register value directly if it is callee saved.
if (LP64_ONLY(true ||) java_thread == rdi || java_thread == rsi) {
// rdi & rsi (also r15) are callee saved -> nothing to do
#ifdef ASSERT
guarantee(java_thread != rax, "change this code");
push(rax);
{ Label L;
get_thread(rax);
cmpptr(java_thread, rax);
jcc(Assembler::equal, L);
STOP("MacroAssembler::call_VM_base: rdi not callee saved?");
bind(L);
}
pop(rax);
#endif
} else {
get_thread(java_thread);
}
// reset last Java frame
// Only interpreter should have to clear fp
reset_last_Java_frame(java_thread, true);

// C++ interp handles this in the interpreter
check_and_handle_popframe(java_thread);
check_and_handle_earlyret(java_thread);

if (check_exceptions) {
// check for pending exceptions (java_thread is set upon return)
cmpptr(Address(java_thread, Thread::pending_exception_offset()), (int32_t) NULL_WORD);
#ifndef _LP64
jump_cc(Assembler::notEqual,
RuntimeAddress(StubRoutines::forward_exception_entry()));
#else
// This used to conditionally jump to forward_exception however it is
// possible if we relocate that the branch will not reach. So we must jump
// around so we can always reach

Label ok;
jcc(Assembler::equal, ok);
jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
bind(ok);
#endif // LP64
}

// get oop result if there is one and reset the value in the thread
if (oop_result->is_valid()) {
get_vm_result(oop_result, java_thread);
}
}
+

volatile 标志位

1
2
///home/dai/jdk/src/hotspot/share/utilities/accessFlags.hpp
bool is_volatile () const { return (_flags & JVM_ACC_VOLATILE ) != 0; }
+ +

相关阅读

+ +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git "a/2023/05/04/insert-ignore-\346\255\273\351\224\201/index.html" "b/2023/05/04/insert-ignore-\346\255\273\351\224\201/index.html" new file mode 100644 index 0000000000..33b713cd00 --- /dev/null +++ "b/2023/05/04/insert-ignore-\346\255\273\351\224\201/index.html" @@ -0,0 +1,450 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + insert ignore 死锁 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ insert ignore 死锁 +

+ + +
+ + + + +
+ + +

背景

insert ignore sql死锁了

+

排查

日志

排查了死锁日志:
我们定位到是有个表,简单来说,是有两个字段:

+
    +
  • id : 主键
  • +
  • email_address: 唯一索引
  • +
+

一共有两个线程在写入,每次的操作就是批量用insert ignore写入,初步看都是很简单的sql, 后面google一下发现是和间隙锁有关:
相关blog

+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
2023-05-03 05:06:45 0x4002719ffef0
*** (1) TRANSACTION:
TRANSACTION 218982374, ACTIVE 0 sec inserting
mysql tables in use 1, locked 1
LOCK WAIT 7 lock struct(s), heap size 1136, 4 row lock(s)
MySQL thread id 481964, OS thread handle 70370189385456, query id 1320790141 10.4.3.228 app_cdp_0 update
insert ignore into cdp_user_email
(email_address,
clean_tag,
is_upload_emarsys,
created_at,
upload_at
)
value


*** (1) HOLDS THE LOCK(S):
RECORD LOCKS space id 189 page no 328322 n bits 272 index PRIMARY of table `customer_data_platform`.`cdp_user_email` trx id 218982374 lock_mode X
Record lock, heap no 1 PHYSICAL RECORD: n_fields 1; compact format; info bits 0
0: len 8; hex 73757072656d756d; asc supremum;;


*** (1) WAITING FOR THIS LOCK TO BE GRANTED:
RECORD LOCKS space id 189 page no 328322 n bits 272 index PRIMARY of table `customer_data_platform`.`cdp_user_email` trx id 218982374 lock_mode X insert intention waiting
Record lock, heap no 1 PHYSICAL RECORD: n_fields 1; compact format; info bits 0
0: len 8; hex 73757072656d756d; asc supremum;;


*** (2) TRANSACTION:
TRANSACTION 218982373, ACTIVE 0 sec inserting
mysql tables in use 1, locked 1
LOCK WAIT 11 lock struct(s), heap size 1136, 6 row lock(s)
MySQL thread id 481963, OS thread handle 70370189655792, query id 1320790139 10.4.3.228 app_cdp_0 update
insert ignore into cdp_user_email
(email_address,
clean_tag,
is_upload_emarsys,
created_at,
upload_at
)
value


*** (2) HOLDS THE LOCK(S):
RECORD LOCKS space id 189 page no 328322 n bits 272 index PRIMARY of table `customer_data_platform`.`cdp_user_email` trx id 218982373 lock_mode X
Record lock, heap no 1 PHYSICAL RECORD: n_fields 1; compact format; info bits 0
0: len 8; hex 73757072656d756d; asc supremum;;


*** (2) WAITING FOR THIS LOCK TO BE GRANTED:
RECORD LOCKS space id 189 page no 328322 n bits 272 index PRIMARY of table `customer_data_platform`.`cdp_user_email` trx id 218982373 lock_mode X insert intention waiting
Record lock, heap no 1 PHYSICAL RECORD: n_fields 1; compact format; info bits 0
0: len 8; hex 73757072656d756d; asc supremum;;

*** WE ROLL BACK TRANSACTION (1)
+ +

修改方式

将批量insert 改成每次只写入一条

+ +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git "a/2023/05/16/flink-\347\274\226\350\257\221/index.html" "b/2023/05/16/flink-\347\274\226\350\257\221/index.html" new file mode 100644 index 0000000000..13d6e1507b --- /dev/null +++ "b/2023/05/16/flink-\347\274\226\350\257\221/index.html" @@ -0,0 +1,456 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + flink 编译 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ flink 编译 +

+ + +
+ + + + +
+ + +

背景

了解flink的使用

+

编译

下载代码:

+
1
2
3
4
git clone https://github.com/apache/flink.git
cd flink
## 编译
./mvnw -T1C package -DskipTests
+ + +

启动

1
2
3
4
5
6
## 切换目录
cd build-target
## 启动
./bin/start-cluster.sh
## 执行例子
./bin/flink run ./examples/batch/WordCount.jar
+ + +

输出

+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
./bin/flink run ./examples/batch/WordCount.jar

Executing WordCount example with default input data set.
Use --input to specify file input.
Printing result to stdout. Use --output to specify output path.
Job has been submitted with JobID 9451e7d1ac07642311caa0633429027f
Program execution finished
Job with JobID 9451e7d1ac07642311caa0633429027f has finished.
Job Runtime: 218 ms
Accumulator Results:
- a6ac7b0991b6510ab27930ef3590fe1f (java.util.ArrayList) [170 elements]


(a,5)
(action,1)
(after,1)
(against,1)
(all,2)
(and,12)
(arms,1)
(arrows,1)
(awry,1)
(ay,1)
(bare,1)
(be,4)
(bear,3)
(bodkin,1)
(bourn,1)
(but,1)
(by,2)
(calamity,1)
(cast,1)
(coil,1)
(come,1)
(conscience,1)
(consummation,1)
(contumely,1)
(country,1)
(cowards,1)
(currents,1)
(d,4)
(death,2)
(delay,1)
(despis,1)
(devoutly,1)
(die,2)
(does,1)
(dread,1)
(dream,1)
(dreams,1)
(end,2)
(enterprises,1)
(er,1)
(fair,1)
(fardels,1)
(flesh,1)
(fly,1)
(for,2)
(fortune,1)
(from,1)
(give,1)
(great,1)
(grunt,1)
(have,2)
(he,1)
(heartache,1)
(heir,1)
(himself,1)
(his,1)
(hue,1)
(ills,1)
(in,3)
(insolence,1)
(is,3)
(know,1)
(law,1)
(life,2)
(long,1)
(lose,1)
(love,1)
(make,2)
(makes,2)
(man,1)
(may,1)
(merit,1)
(might,1)
(mind,1)
(moment,1)
(more,1)
(mortal,1)
(must,1)
(my,1)
(name,1)
(native,1)
(natural,1)
(no,2)
(nobler,1)
(not,2)
(now,1)
(nymph,1)
(o,1)
(of,15)
(off,1)
(office,1)
(ophelia,1)
(opposing,1)
(oppressor,1)
(or,2)
(orisons,1)
(others,1)
(outrageous,1)
(pale,1)
(pangs,1)
(patient,1)
(pause,1)
(perchance,1)
(pith,1)
(proud,1)
(puzzles,1)
(question,1)
(quietus,1)
(rather,1)
(regard,1)
(remember,1)
(resolution,1)
(respect,1)
(returns,1)
(rub,1)
(s,5)
(say,1)
(scorns,1)
(sea,1)
(shocks,1)
(shuffled,1)
(sicklied,1)
(sins,1)
(sleep,5)
(slings,1)
(so,1)
(soft,1)
(something,1)
(spurns,1)
(suffer,1)
(sweat,1)
(take,1)
(takes,1)
(than,1)
(that,7)
(the,22)
(their,1)
(them,1)
(there,2)
(these,1)
(this,2)
(those,1)
(thought,1)
(thousand,1)
(thus,2)
(thy,1)
(time,1)
(tis,2)
(to,15)
(traveller,1)
(troubles,1)
(turn,1)
(under,1)
(undiscover,1)
(unworthy,1)
(us,3)
(we,4)
(weary,1)
(what,1)
(when,2)
(whether,1)
(whips,1)
(who,2)
(whose,1)
(will,1)
(wish,1)
(with,3)
(would,2)
(wrong,1)
(you,1)
+ + +

相关阅读

+ +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git "a/2023/05/18/bigint-\346\230\240\345\260\204/index.html" "b/2023/05/18/bigint-\346\230\240\345\260\204/index.html" new file mode 100644 index 0000000000..9a9eba186f --- /dev/null +++ "b/2023/05/18/bigint-\346\230\240\345\260\204/index.html" @@ -0,0 +1,439 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + mysql bigint 映射 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + + + + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git "a/2023/05/24/java-\346\225\260\347\273\204\345\243\260\346\230\216\344\275\215\347\275\256\345\214\272\345\210\253/index.html" "b/2023/05/24/java-\346\225\260\347\273\204\345\243\260\346\230\216\344\275\215\347\275\256\345\214\272\345\210\253/index.html" new file mode 100644 index 0000000000..eace448d34 --- /dev/null +++ "b/2023/05/24/java-\346\225\260\347\273\204\345\243\260\346\230\216\344\275\215\347\275\256\345\214\272\345\210\253/index.html" @@ -0,0 +1,451 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + java 数组声明位置区别 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ java 数组声明位置区别 +

+ + +
+ + + + +
+ + +

背景

1
String inputValues[] = {"cat", "dog", "dogs"};
+

当我声明一个数组,方括号在右边的时候, idea会有个很小警告:

+
1
C-style array declaration of local variable 'inputValues' 
+ +

IDE劝我改成

+
1
String[] inputValues = {"cat", "dog", "dogs"};
+ +

那么他们有什么区别呢?

+

我查了stackoverflow

+

这两个是等价的

+

jls是如何定义的?

相关引用

+ +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git "a/2023/05/25/fst-\347\273\223\346\236\204/index.html" "b/2023/05/25/fst-\347\273\223\346\236\204/index.html" new file mode 100644 index 0000000000..fcabbb4fba --- /dev/null +++ "b/2023/05/25/fst-\347\273\223\346\236\204/index.html" @@ -0,0 +1,485 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + fst 结构 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ fst 结构 +

+ + +
+ + + + +
+ + +

背景

了解lucene 的fst结构

+

核心函数

freeezeTail -> compileNode

+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
private void freezeTail(int prefixLenPlus1) throws IOException {  // 入参是一个偏移值:  公共前缀+ 1
final int downTo = Math.max(1, prefixLenPlus1);
for (int idx = lastInput.length(); idx >= downTo; idx--) {

boolean doPrune = false;
boolean doCompile = false;


if (doCompile) {
...
parent.replaceLast(
lastInput.intAt(idx - 1),
compileNode(node, 1 + lastInput.length() - idx),
nextFinalOutput,
isFinal);
...
}
}
}
}
+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
private CompiledNode compileNode(UnCompiledNode<T> nodeIn, int tailLength) throws IOException {
final long node;
long bytesPosStart = bytes.getPosition();
if (dedupHash != null
&& (doShareNonSingletonNodes || nodeIn.numArcs <= 1)
&& tailLength <= shareMaxTailLength) {
if (nodeIn.numArcs == 0) {
node = fst.addNode(this, nodeIn);
lastFrozenNode = node;
} else {
node = dedupHash.add(this, nodeIn);
}
} else {
node = fst.addNode(this, nodeIn);
}
assert node != -2;

long bytesPosEnd = bytes.getPosition();
if (bytesPosEnd != bytesPosStart) {
// The FST added a new node:
assert bytesPosEnd > bytesPosStart;
lastFrozenNode = node;
}

nodeIn.clear();

final CompiledNode fn = new CompiledNode();
fn.node = node;
return fn;
}
+ + +
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
// serializes new node by appending its bytes to the end
// of the current byte[]
long addNode(FSTCompiler<T> fstCompiler, FSTCompiler.UnCompiledNode<T> nodeIn)
throws IOException {
T NO_OUTPUT = outputs.getNoOutput();

// System.out.println("FST.addNode pos=" + bytes.getPosition() + " numArcs=" + nodeIn.numArcs);
if (nodeIn.numArcs == 0) {
if (nodeIn.isFinal) {
return FINAL_END_NODE;
} else {
return NON_FINAL_END_NODE;
}
}
final long startAddress = fstCompiler.bytes.getPosition();
// System.out.println(" startAddr=" + startAddress);

final boolean doFixedLengthArcs = shouldExpandNodeWithFixedLengthArcs(fstCompiler, nodeIn);
if (doFixedLengthArcs) {
// System.out.println(" fixed length arcs");
if (fstCompiler.numBytesPerArc.length < nodeIn.numArcs) {
fstCompiler.numBytesPerArc = new int[ArrayUtil.oversize(nodeIn.numArcs, Integer.BYTES)];
fstCompiler.numLabelBytesPerArc = new int[fstCompiler.numBytesPerArc.length];
}
}

fstCompiler.arcCount += nodeIn.numArcs;

final int lastArc = nodeIn.numArcs - 1;

long lastArcStart = fstCompiler.bytes.getPosition();
int maxBytesPerArc = 0;
int maxBytesPerArcWithoutLabel = 0;
for (int arcIdx = 0; arcIdx < nodeIn.numArcs; arcIdx++) {
final FSTCompiler.Arc<T> arc = nodeIn.arcs[arcIdx];
final FSTCompiler.CompiledNode target = (FSTCompiler.CompiledNode) arc.target;
int flags = 0;
// System.out.println(" arc " + arcIdx + " label=" + arc.label + " -> target=" +
// target.node);

if (arcIdx == lastArc) {
flags += BIT_LAST_ARC;
}

if (fstCompiler.lastFrozenNode == target.node && !doFixedLengthArcs) {
// TODO: for better perf (but more RAM used) we
// could avoid this except when arc is "near" the
// last arc:
flags += BIT_TARGET_NEXT;
}

if (arc.isFinal) {
flags += BIT_FINAL_ARC;
if (arc.nextFinalOutput != NO_OUTPUT) {
flags += BIT_ARC_HAS_FINAL_OUTPUT;
}
} else {
assert arc.nextFinalOutput == NO_OUTPUT;
}

boolean targetHasArcs = target.node > 0;

if (!targetHasArcs) {
flags += BIT_STOP_NODE;
}

if (arc.output != NO_OUTPUT) {
flags += BIT_ARC_HAS_OUTPUT;
}

fstCompiler.bytes.writeByte((byte) flags);
long labelStart = fstCompiler.bytes.getPosition();
writeLabel(fstCompiler.bytes, arc.label);
int numLabelBytes = (int) (fstCompiler.bytes.getPosition() - labelStart);

// System.out.println(" write arc: label=" + (char) arc.label + " flags=" + flags + "
// target=" + target.node + " pos=" + bytes.getPosition() + " output=" +
// outputs.outputToString(arc.output));

if (arc.output != NO_OUTPUT) {
outputs.write(arc.output, fstCompiler.bytes);
// System.out.println(" write output");
}

if (arc.nextFinalOutput != NO_OUTPUT) {
// System.out.println(" write final output");
outputs.writeFinalOutput(arc.nextFinalOutput, fstCompiler.bytes);
}

if (targetHasArcs && (flags & BIT_TARGET_NEXT) == 0) {
assert target.node > 0;
// System.out.println(" write target");
fstCompiler.bytes.writeVLong(target.node);
}

// just write the arcs "like normal" on first pass, but record how many bytes each one took
// and max byte size:
if (doFixedLengthArcs) {
int numArcBytes = (int) (fstCompiler.bytes.getPosition() - lastArcStart);
fstCompiler.numBytesPerArc[arcIdx] = numArcBytes;
fstCompiler.numLabelBytesPerArc[arcIdx] = numLabelBytes;
lastArcStart = fstCompiler.bytes.getPosition();
maxBytesPerArc = Math.max(maxBytesPerArc, numArcBytes);
maxBytesPerArcWithoutLabel =
Math.max(maxBytesPerArcWithoutLabel, numArcBytes - numLabelBytes);
// System.out.println(" arcBytes=" + numArcBytes + " labelBytes=" + numLabelBytes);
}
}

// TODO: try to avoid wasteful cases: disable doFixedLengthArcs in that case
/*
*
* LUCENE-4682: what is a fair heuristic here?
* It could involve some of these:
* 1. how "busy" the node is: nodeIn.inputCount relative to frontier[0].inputCount?
* 2. how much binSearch saves over scan: nodeIn.numArcs
* 3. waste: numBytes vs numBytesExpanded
*
* the one below just looks at #3
if (doFixedLengthArcs) {
// rough heuristic: make this 1.25 "waste factor" a parameter to the phd ctor????
int numBytes = lastArcStart - startAddress;
int numBytesExpanded = maxBytesPerArc * nodeIn.numArcs;
if (numBytesExpanded > numBytes*1.25) {
doFixedLengthArcs = false;
}
}
*/

if (doFixedLengthArcs) {
assert maxBytesPerArc > 0;
// 2nd pass just "expands" all arcs to take up a fixed byte size

int labelRange = nodeIn.arcs[nodeIn.numArcs - 1].label - nodeIn.arcs[0].label + 1;
assert labelRange > 0;
if (shouldExpandNodeWithDirectAddressing(
fstCompiler, nodeIn, maxBytesPerArc, maxBytesPerArcWithoutLabel, labelRange)) {
writeNodeForDirectAddressing(
fstCompiler, nodeIn, startAddress, maxBytesPerArcWithoutLabel, labelRange);
fstCompiler.directAddressingNodeCount++;
} else {
writeNodeForBinarySearch(fstCompiler, nodeIn, startAddress, maxBytesPerArc);
fstCompiler.binarySearchNodeCount++;
}
}

final long thisNodeAddress = fstCompiler.bytes.getPosition() - 1;
fstCompiler.bytes.reverse(startAddress, thisNodeAddress);
fstCompiler.nodeCount++;
return thisNodeAddress;
}
+ +

Arc<T> 描述的是一个弧

+
1
2
3
4
5
6
7
8
9
10
//package org.apache.lucene.util.fst;
// org\apache\lucene\util\fst\FSTCompiler.java
/** Expert: holds a pending (seen but not yet serialized) arc. */
static class Arc<T> {
int label; // really an "unsigned" byte // 一个label
Node target; // 举例:a -> b 那么target 就是b
boolean isFinal;
T output;
T nextFinalOutput;
}
+

例子

cat的权重是5
dog的权重是7
dogs的权重是13

+
1
2
String inputValues[] = {"cat", "dog", "dogs"};
long[] outputValues = {5, 7, 13};
+ +

下面是一个cat的例子
cat包含三个字符c,a ,t,分别代表三个ASCII码:

+
    +
  • c:99
  • +
  • a:97
  • +
  • t:116
  • +
+

定义Arc: 一个弧包含三个内容: [a:label1]->[b:label2] 来描述一个弧:a指向b .其中a的值是label1, b的值是label2

+

下面是idea的截图,
[2747:c] -> [2856:a] -> [2860:t]

+

fst linklist

+

下面是fst.bytes 的例子

+

fst bytes

+

最后是变成下列格式:

+
1
[0, 116, 15, 97, 6, 6, 115, 31, 103, 7, 111, 6, 7, 100, 22, 4, 5, 99, 16]
+

相关阅读

+ +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git "a/2023/05/27/java-\347\272\277\347\250\213\346\261\240/index.html" "b/2023/05/27/java-\347\272\277\347\250\213\346\261\240/index.html" new file mode 100644 index 0000000000..b7a180081e --- /dev/null +++ "b/2023/05/27/java-\347\272\277\347\250\213\346\261\240/index.html" @@ -0,0 +1,445 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + java 线程池 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ java 线程池 +

+ + +
+ + + + +
+ + +

背景

了解java源码包里面线程池的细节

+

线程池

线程池是管理一堆线程的对象。对于线程池来说,不同线程池主要是他们创建、调度、销毁的的各种策略的不一样

+

基础类

Executors

executor/executors/executorService:

+

executor 是一个接口,类似Runable , 实际上也是差不多功能

+
1
2
3
public interface Executor {
void execute(Runnable var1);
}
+ +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git "a/2023/06/05/WFST-\345\222\214lucene-\345\222\214fst/index.html" "b/2023/06/05/WFST-\345\222\214lucene-\345\222\214fst/index.html" new file mode 100644 index 0000000000..f4b91b711c --- /dev/null +++ "b/2023/06/05/WFST-\345\222\214lucene-\345\222\214fst/index.html" @@ -0,0 +1,444 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + WFST 和lucene 和fst | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ WFST 和lucene 和fst +

+ + +
+ + + + +
+ + +

在WFST(Weighted Finite State Transducer,加权有限状态转换器)中,”All Pairs Shortest Path”(APSP)算法用于计算任意两个状态之间的最短路径。在WFST中,每个状态之间都有一条带有权重的边,表示从一个状态到另一个状态的转换。APSP算法的目标是找到连接任意两个状态的最短路径,即具有最小总权重的路径。

+

相关阅读

+ +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/2023/06/06/Payload-value-must-not-be-empty/index.html b/2023/06/06/Payload-value-must-not-be-empty/index.html new file mode 100644 index 0000000000..14ccc42161 --- /dev/null +++ b/2023/06/06/Payload-value-must-not-be-empty/index.html @@ -0,0 +1,464 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Payload value must not be empty | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ Payload value must not be empty +

+ + +
+ + + + +
+ + +

背景

生产环境rabbitmq遇到错误:Payload value must not be empty

+
1
2
3
4
5
6
7
Caused by: org.springframework.messaging.handler.annotation.support.MethodArgumentNotValidException: Could not resolve method parameter at index 0 in public void com.xxxx.consume(java.lang.String,org.springframework.amqp.core.Message,com.rabbitmq.client.Channel) throws java.lang.Exception: 1 error(s): [Error in object 'content': codes []; arguments []; default message [Payload value must not be empty]] 
at org.springframework.messaging.handler.annotation.support.PayloadMethodArgumentResolver.resolveArgument(PayloadMethodArgumentResolver.java:122)
at org.springframework.messaging.handler.invocation.HandlerMethodArgumentResolverComposite.resolveArgument(HandlerMethodArgumentResolverComposite.java:117)
at org.springframework.messaging.handler.invocation.InvocableHandlerMethod.getMethodArgumentValues(InvocableHandlerMethod.java:147)
at org.springframework.messaging.handler.invocation.InvocableHandlerMethod.invoke(InvocableHandlerMethod.java:115)
at org.springframework.amqp.rabbit.listener.adapter.HandlerAdapter.invoke(HandlerAdapter.java:75)
at org.springframework.amqp.rabbit.listener.adapter.MessagingMessageListenerAdapter.invokeHandler(MessagingMessageListenerAdapter.java:261)
+ +

解决方式

添加注解@Payload

+
1
2
3
4
@RabbitListener(queues = "queue")
public void consume( @Payload(required = false) String content, Message message, Channel channel) throws Exception { // 将@Payload 塞到 content 字段
...
}
+ +

查看源码

扒了一下代码查看原因:
路径

+
    +
  • org\springframework\messaging\handler\annotation\support\PayloadMethodArgumentResolver.java
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    public Object resolveArgument(MethodParameter parameter, Message<?> message) throws Exception {
    Payload ann = parameter.getParameterAnnotation(Payload.class);
    if (ann != null && StringUtils.hasText(ann.expression())) {
    throw new IllegalStateException("@Payload SpEL expressions not supported by this resolver");
    }

    Object payload = message.getPayload();
    if (isEmptyPayload(payload)) { // 条件1
    if (ann == null || ann.required()) { // 条件2
    String paramName = getParameterName(parameter);
    BindingResult bindingResult = new BeanPropertyBindingResult(payload, paramName);
    bindingResult.addError(new ObjectError(paramName, "Payload value must not be empty")); // 在这里会校验空
    throw new MethodArgumentNotValidException(message, parameter, bindingResult);
    }
    else {
    return null;
    }
    }
    }
  • +
  • 条件1 : payload 是空
  • +
  • 条件2 : 没有@Payload 注解或者 @Payload(required = ture)
  • +
+

原因: 满足条件1和条件2就会抛出异常

+

默认不加注解的时候会满足条件1和2 , 所以解决方案:
添加注解@Payload

+
1
2
3
4
@RabbitListener(queues = "queue")
public void consume( @Payload(required = false) String content, Message message, Channel channel) throws Exception { // 将@Payload 塞到 content 字段
...
}
+ +

堆栈

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
resolveArgument:111, PayloadMethodArgumentResolver (org.springframework.messaging.handler.annotation.support)
resolveArgument:117, HandlerMethodArgumentResolverComposite (org.springframework.messaging.handler.invocation)
getMethodArgumentValues:147, InvocableHandlerMethod (org.springframework.messaging.handler.invocation)
invoke:115, InvocableHandlerMethod (org.springframework.messaging.handler.invocation)
invoke:75, HandlerAdapter (org.springframework.amqp.rabbit.listener.adapter)
invokeHandler:261, MessagingMessageListenerAdapter (org.springframework.amqp.rabbit.listener.adapter)
invokeHandlerAndProcessResult:207, MessagingMessageListenerAdapter (org.springframework.amqp.rabbit.listener.adapter)
onMessage:146, MessagingMessageListenerAdapter (org.springframework.amqp.rabbit.listener.adapter)
doInvokeListener:1665, AbstractMessageListenerContainer (org.springframework.amqp.rabbit.listener)
actualInvokeListener:1584, AbstractMessageListenerContainer (org.springframework.amqp.rabbit.listener)
invokeListener:-1, 1393414871 (org.springframework.amqp.rabbit.listener.AbstractMessageListenerContainer$$Lambda$1549)
invoke0:-1, NativeMethodAccessorImpl (jdk.internal.reflect)
invoke:62, NativeMethodAccessorImpl (jdk.internal.reflect)
invoke:43, DelegatingMethodAccessorImpl (jdk.internal.reflect)
invoke:566, Method (java.lang.reflect)
invokeJoinpointUsingReflection:344, AopUtils (org.springframework.aop.support)
invokeJoinpoint:198, ReflectiveMethodInvocation (org.springframework.aop.framework)
proceed:163, ReflectiveMethodInvocation (org.springframework.aop.framework)
doWithRetry:93, RetryOperationsInterceptor$1 (org.springframework.retry.interceptor)
doExecute:329, RetryTemplate (org.springframework.retry.support)
execute:225, RetryTemplate (org.springframework.retry.support)
invoke:116, RetryOperationsInterceptor (org.springframework.retry.interceptor)
proceed:186, ReflectiveMethodInvocation (org.springframework.aop.framework)
invoke:215, JdkDynamicAopProxy (org.springframework.aop.framework)
invokeListener:-1, $Proxy247 (org.springframework.amqp.rabbit.listener)
invokeListener:1572, AbstractMessageListenerContainer (org.springframework.amqp.rabbit.listener)
doExecuteListener:1563, AbstractMessageListenerContainer (org.springframework.amqp.rabbit.listener)
executeListener:1507, AbstractMessageListenerContainer (org.springframework.amqp.rabbit.listener)
doReceiveAndExecute:967, SimpleMessageListenerContainer (org.springframework.amqp.rabbit.listener)
receiveAndExecute:914, SimpleMessageListenerContainer (org.springframework.amqp.rabbit.listener)
access$1600:83, SimpleMessageListenerContainer (org.springframework.amqp.rabbit.listener)
mainLoop:1291, SimpleMessageListenerContainer$AsyncMessageProcessingConsumer (org.springframework.amqp.rabbit.listener)
run:1197, SimpleMessageListenerContainer$AsyncMessageProcessingConsumer (org.springframework.amqp.rabbit.listener)
run:834, Thread (java.lang)
+ + + + + + + + +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/2023/06/07/Invalid-JSON-text-in-argument-2-in-mysql8/index.html b/2023/06/07/Invalid-JSON-text-in-argument-2-in-mysql8/index.html new file mode 100644 index 0000000000..47b0262ffb --- /dev/null +++ b/2023/06/07/Invalid-JSON-text-in-argument-2-in-mysql8/index.html @@ -0,0 +1,450 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Invalid JSON text in argument 2 in mysql8 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ Invalid JSON text in argument 2 in mysql8 +

+ + +
+ + + + +
+ + +

背景

有个update_rules字段是json类型

+
1
{"type": "once", "values": []}
+

想用JSON_CONTAINS提取中间的once内容,发现下面这个sql一直报错:

+
1
2
SELECT  *  FROM  `usergroup`  WHERE JSON_CONTAINS(update_rules , 'once' , '$.type' )

+ +

报错一直是Invalid JSON text in argument 2 , 看了很久也没有看出第二个参数哪里错了

+

mysql官网例子 , 我看官网也是这样

+

解决方式

搜索了一下stackoverflow,原来字符串还要在里面加双引号

+

也就是'once' 改成'"once"'

+

完整的sql变成如下:

+
1
2
SELECT  *  FROM  `usergroup`  WHERE JSON_CONTAINS(update_rules , '"daily"' , '$.type' )

+
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/2023/06/09/why-bison-can-be-find-in-cmake/index.html b/2023/06/09/why-bison-can-be-find-in-cmake/index.html new file mode 100644 index 0000000000..af4f729a29 --- /dev/null +++ b/2023/06/09/why-bison-can-be-find-in-cmake/index.html @@ -0,0 +1,443 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + why bison can be find in cmake | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ why bison can be find in cmake +

+ + +
+ + + + +
+ + +

背景

我添加cmake的时候,经常看看为什么find_package(bison)可以默认找到

+

原因

cmake 有默认的xxx.cmake文件

+
1
2
3
https://cmake.org/cmake/help/v2.8.9/cmake.html#module:FindBISON

/usr/share/cmake-3.22/Modules/FindBISON.cmake
+ +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/2023/06/14/clickhouse-jdbc-1002-error/index.html b/2023/06/14/clickhouse-jdbc-1002-error/index.html new file mode 100644 index 0000000000..501317f3d8 --- /dev/null +++ b/2023/06/14/clickhouse-jdbc-1002-error/index.html @@ -0,0 +1,448 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + clickhouse jdbc 1002 error | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ clickhouse jdbc 1002 error +

+ + +
+ + + + +
+ + +

背景

生产环境会有下面错误: Unknown error 1002, server ClickHouseNode , clickhouse使用的是http的协议

+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
Caused by: java.sql.SQLException: Unknown error 1002, server ClickHouseNode(addr=http:xxxx.amazonaws.com:8123, db=personas)@1267316279
at com.clickhouse.jdbc.SqlExceptionUtils.handle(SqlExceptionUtils.java:54)
at com.clickhouse.jdbc.SqlExceptionUtils.handle(SqlExceptionUtils.java:69)
at com.clickhouse.jdbc.internal.ClickHouseStatementImpl.executeStatement(ClickHouseStatementImpl.java:139)
at com.clickhouse.jdbc.internal.SqlBasedPreparedStatement.executeBatch(SqlBasedPreparedStatement.java:158)
at com.clickhouse.jdbc.internal.SqlBasedPreparedStatement.execute(SqlBasedPreparedStatement.java:382)
at com.zaxxer.hikari.pool.ProxyPreparedStatement.execute(ProxyPreparedStatement.java:44)
at com.zaxxer.hikari.pool.HikariProxyPreparedStatement.execute(HikariProxyPreparedStatement.java)
at org.apache.ibatis.executor.statement.PreparedStatementHandler.update(PreparedStatementHandler.java:47)
at org.apache.ibatis.executor.statement.RoutingStatementHandler.update(RoutingStatementHandler.java:74)
at org.apache.ibatis.executor.SimpleExecutor.doUpdate(SimpleExecutor.java:50)
at org.apache.ibatis.executor.BaseExecutor.update(BaseExecutor.java:117)
at org.apache.ibatis.executor.CachingExecutor.update(CachingExecutor.java:76)
at jdk.internal.reflect.GeneratedMethodAccessor202.invoke(Unknown Source)
at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.base/java.lang.reflect.Method.invoke(Method.java:566)
at org.apache.ibatis.plugin.Plugin.invoke(Plugin.java:64)
at com.sun.proxy.$Proxy263.update(Unknown Source)
at org.apache.ibatis.session.defaults.DefaultSqlSession.update(DefaultSqlSession.java:194)
at org.apache.ibatis.session.defaults.DefaultSqlSession.insert(DefaultSqlSession.java:181)
at jdk.internal.reflect.GeneratedMethodAccessor201.invoke(Unknown Source)
at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.base/java.lang.reflect.Method.invoke(Method.java:566)
at org.mybatis.spring.SqlSessionTemplate$SqlSessionInterceptor.invoke(SqlSessionTemplate.java:427)
... 18 common frames omitted
+ + +

翻看源码

一共有两处会抛出ERROR_UNKNOWN , 代码如下:

+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
// 路径 : com\clickhouse\clickhouse-client\0.3.2\clickhouse-client-0.3.2-sources.jar!\com\clickhouse\client\ClickHouseException.java
public static final int ERROR_UNKNOWN = 1002;
private static int extractErrorCode(String errorMessage) {
if (errorMessage == null || errorMessage.isEmpty()) {
return ERROR_UNKNOWN; // 抛出unknow
} else if (errorMessage.startsWith("Poco::Exception. Code: 1000, ")) {
return ERROR_POCO;
}

int startIndex = errorMessage.indexOf(' ');
if (startIndex >= 0) {
for (int i = ++startIndex, len = errorMessage.length(); i < len; i++) {
char ch = errorMessage.charAt(i);
if (ch == '.' || ch == ',' || Character.isWhitespace(ch)) {
try {
return Integer.parseInt(errorMessage.substring(startIndex, i));
} catch (NumberFormatException e) {
// ignore
}
break;
}
}
}

// this is confusing as usually it's a client-side exception
return ERROR_UNKNOWN; // 抛出unknown
}


+ +

看了代码,这个错误外部的异常,不是sql语法错误,所以一般是反向代理一侧有问题,也就是代理层或者clickhouse层有问题

+ +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/2023/06/14/nacos-client-and-serve/index.html b/2023/06/14/nacos-client-and-serve/index.html new file mode 100644 index 0000000000..a08a083184 --- /dev/null +++ b/2023/06/14/nacos-client-and-serve/index.html @@ -0,0 +1,443 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + nacos client and serve | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ nacos client and serve +

+ + +
+ + + + +
+ + +

背景

了解nacos 注册/发现/协议

+

java-nacos-client

整个堆栈大概是到这里

+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
write:35, NettyWritableBuffer (com.alibaba.nacos.shaded.io.grpc.netty.shaded.io.grpc.netty)
writeRaw:290, MessageFramer (com.alibaba.nacos.shaded.io.grpc.internal)
writeKnownLengthUncompressed:229, MessageFramer (com.alibaba.nacos.shaded.io.grpc.internal)
writeUncompressed:168, MessageFramer (com.alibaba.nacos.shaded.io.grpc.internal)
writePayload:141, MessageFramer (com.alibaba.nacos.shaded.io.grpc.internal)
writeMessage:53, AbstractStream (com.alibaba.nacos.shaded.io.grpc.internal)
writeMessage:37, ForwardingClientStream (com.alibaba.nacos.shaded.io.grpc.internal)
sendMessageInternal:473, ClientCallImpl (com.alibaba.nacos.shaded.io.grpc.internal)
sendMessage:457, ClientCallImpl (com.alibaba.nacos.shaded.io.grpc.internal)
sendMessage:37, ForwardingClientCall (com.alibaba.nacos.shaded.io.grpc)
sendMessage:37, ForwardingClientCall (com.alibaba.nacos.shaded.io.grpc)
asyncUnaryRequestCall:284, ClientCalls (com.alibaba.nacos.shaded.io.grpc.stub)
futureUnaryCall:191, ClientCalls (com.alibaba.nacos.shaded.io.grpc.stub)
request:212, RequestGrpc$RequestFutureStub (com.alibaba.nacos.api.grpc.auto)
request:73, GrpcConnection (com.alibaba.nacos.common.remote.client.grpc)
request:657, RpcClient (com.alibaba.nacos.common.remote.client)
requestToServer:269, NamingGrpcClientProxy (com.alibaba.nacos.client.naming.remote.gprc)
queryInstancesOfService:169, NamingGrpcClientProxy (com.alibaba.nacos.client.naming.remote.gprc)
queryInstancesOfService:111, NamingClientProxyDelegate (com.alibaba.nacos.client.naming.remote)
run:182, ServiceInfoUpdateService$UpdateTask (com.alibaba.nacos.client.naming.core)
call:515, Executors$RunnableAdapter (java.util.concurrent)
run$$$capture:264, FutureTask (java.util.concurrent)
run:-1, FutureTask (java.util.concurrent)
- Async stack trace
<init>:151, FutureTask (java.util.concurrent)
<init>:215, ScheduledThreadPoolExecutor$ScheduledFutureTask (java.util.concurrent)
schedule:561, ScheduledThreadPoolExecutor (java.util.concurrent)
run:197, ServiceInfoUpdateService$UpdateTask (com.alibaba.nacos.client.naming.core)
call:515, Executors$RunnableAdapter (java.util.concurrent)
run$$$capture:264, FutureTask (java.util.concurrent)
run:-1, FutureTask (java.util.concurrent)
- Async stack trace
<init>:151, FutureTask (java.util.concurrent)
<init>:215, ScheduledThreadPoolExecutor$ScheduledFutureTask (java.util.concurrent)
schedule:561, ScheduledThreadPoolExecutor (java.util.concurrent)
run:197, ServiceInfoUpdateService$UpdateTask (com.alibaba.nacos.client.naming.core)
call:515, Executors$RunnableAdapter (java.util.concurrent)
run$$$capture:264, FutureTask (java.util.concurrent)
run:-1, FutureTask (java.util.concurrent)
- Async stack trace
<init>:151, FutureTask (java.util.concurrent)
<init>:215, ScheduledThreadPoolExecutor$ScheduledFutureTask (java.util.concurrent)
schedule:561, ScheduledThreadPoolExecutor (java.util.concurrent)
run:197, ServiceInfoUpdateService$UpdateTask (com.alibaba.nacos.client.naming.core)
call:515, Executors$RunnableAdapter (java.util.concurrent)
run$$$capture:264, FutureTask (java.util.concurrent)
run:-1, FutureTask (java.util.concurrent)
- Async stack trace
<init>:151, FutureTask (java.util.concurrent)
<init>:215, ScheduledThreadPoolExecutor$ScheduledFutureTask (java.util.concurrent)
schedule:561, ScheduledThreadPoolExecutor (java.util.concurrent)
run:197, ServiceInfoUpdateService$UpdateTask (com.alibaba.nacos.client.naming.core)
call:515, Executors$RunnableAdapter (java.util.concurrent)
run$$$capture:264, FutureTask (java.util.concurrent)
run:-1, FutureTask (java.util.concurrent)
- Async stack trace
<init>:151, FutureTask (java.util.concurrent)
<init>:215, ScheduledThreadPoolExecutor$ScheduledFutureTask (java.util.concurrent)
schedule:561, ScheduledThreadPoolExecutor (java.util.concurrent)
run:197, ServiceInfoUpdateService$UpdateTask (com.alibaba.nacos.client.naming.core)
call:515, Executors$RunnableAdapter (java.util.concurrent)
run$$$capture:264, FutureTask (java.util.concurrent)
run:-1, FutureTask (java.util.concurrent)
- Async stack trace
<init>:151, FutureTask (java.util.concurrent)
<init>:215, ScheduledThreadPoolExecutor$ScheduledFutureTask (java.util.concurrent)
schedule:561, ScheduledThreadPoolExecutor (java.util.concurrent)
addTask:104, ServiceInfoUpdateService (com.alibaba.nacos.client.naming.core)
scheduleUpdateIfAbsent:98, ServiceInfoUpdateService (com.alibaba.nacos.client.naming.core)
subscribe:144, NamingClientProxyDelegate (com.alibaba.nacos.client.naming.remote)
subscribe:393, NacosNamingService (com.alibaba.nacos.client.naming)
start:134, NacosWatch (com.alibaba.cloud.nacos.discovery)
doStart:182, DefaultLifecycleProcessor (org.springframework.context.support)
access$200:53, DefaultLifecycleProcessor (org.springframework.context.support)
start:360, DefaultLifecycleProcessor$LifecycleGroup (org.springframework.context.support)
startBeans:158, DefaultLifecycleProcessor (org.springframework.context.support)
onRefresh:122, DefaultLifecycleProcessor (org.springframework.context.support)
finishRefresh:895, AbstractApplicationContext (org.springframework.context.support)
refresh:554, AbstractApplicationContext (org.springframework.context.support)
refresh:143, ServletWebServerApplicationContext (org.springframework.boot.web.servlet.context)
refresh:755, SpringApplication (org.springframework.boot)
refresh:747, SpringApplication (org.springframework.boot)
refreshContext:402, SpringApplication (org.springframework.boot)
run:312, SpringApplication (org.springframework.boot)
main:25, Application (com.patpat.mms)
+ +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git "a/2023/06/19/lucene-\346\220\234\347\264\242\350\277\207\347\250\213/index.html" "b/2023/06/19/lucene-\346\220\234\347\264\242\350\277\207\347\250\213/index.html" new file mode 100644 index 0000000000..5258982eb9 --- /dev/null +++ "b/2023/06/19/lucene-\346\220\234\347\264\242\350\277\207\347\250\213/index.html" @@ -0,0 +1,485 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + lucene 搜索过程 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ lucene 搜索过程 +

+ + +
+ + + + +
+ + +

背景

了解lucene的搜索过程:

+
    +
  • 分词
  • +
  • 算每个分词的权重,排序取topk
  • +
+

代码堆栈

    +
  • 写入过程:

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    add:473, FSTCompiler (org.apache.lucene.util.fst)
    compileIndex:504, Lucene90BlockTreeTermsWriter$PendingBlock (org.apache.lucene.codecs.lucene90.blocktree)
    writeBlocks:725, Lucene90BlockTreeTermsWriter$TermsWriter (org.apache.lucene.codecs.lucene90.blocktree)
    finish:1105, Lucene90BlockTreeTermsWriter$TermsWriter (org.apache.lucene.codecs.lucene90.blocktree)
    write:370, Lucene90BlockTreeTermsWriter (org.apache.lucene.codecs.lucene90.blocktree)
    write:172, PerFieldPostingsFormat$FieldsWriter (org.apache.lucene.codecs.perfield)
    flush:135, FreqProxTermsWriter (org.apache.lucene.index)
    flush:310, IndexingChain (org.apache.lucene.index)
    flush:392, DocumentsWriterPerThread (org.apache.lucene.index)
    doFlush:492, DocumentsWriter (org.apache.lucene.index)
    flushAllThreads:671, DocumentsWriter (org.apache.lucene.index)
    doFlush:4194, IndexWriter (org.apache.lucene.index)
    flush:4168, IndexWriter (org.apache.lucene.index)
    shutdown:1322, IndexWriter (org.apache.lucene.index)
    close:1362, IndexWriter (org.apache.lucene.index)
    doTestSearch:133, FstTest (com.dinosaur.lucene.demo)
    +
  • +
  • 读的过程

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    findTargetArc:1418, FST (org.apache.lucene.util.fst)
    seekExact:511, SegmentTermsEnum (org.apache.lucene.codecs.lucene90.blocktree)
    loadTermsEnum:111, TermStates (org.apache.lucene.index)
    build:96, TermStates (org.apache.lucene.index)
    createWeight:227, TermQuery (org.apache.lucene.search)
    createWeight:904, IndexSearcher (org.apache.lucene.search)
    search:687, IndexSearcher (org.apache.lucene.search)
    searchAfter:523, IndexSearcher (org.apache.lucene.search)
    search:538, IndexSearcher (org.apache.lucene.search)
    doPagingSearch:158, SearchFiles (com.dinosaur.lucene.demo)
    testSearch:128, SearchFiles (com.dinosaur.lucene.demo)

  • +
+

例子

cfe 文件

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
$ hexdump  app/index/_3.cfs
000000 3f d7 6c 17 14 4c 75 63 65 6e 65 39 30 43 6f 6d
000010 70 6f 75 6e 64 44 61 74 61 00 00 00 00 7a fc 30
000020 52 e0 51 d2 54 be 49 7f 21 78 69 fe c4 00 00 00
000030 3f d7 6c 17 11 4c 75 63 65 6e 65 39 30 4e 6f 72
000040 6d 73 44 61 74 61 00 00 00 00 7a fc 30 52 e0 51
000050 d2 54 be 49 7f 21 78 69 fe c4 00 04 03 c0 28 93
000060 e8 00 00 00 00 00 00 00 00 f0 6a f4 62 00 00 00
000070 3f d7 6c 17 16 4c 75 63 65 6e 65 39 30 46 69 65
000080 6c 64 73 49 6e 64 65 78 49 64 78 00 00 00 00 7a
000090 fc 30 52 e0 51 d2 54 be 49 7f 21 78 69 fe c4 00
0000a0 c0 28 93 e8 00 00 00 00 00 00 00 00 92 7f 21 bb
0000b0 3f d7 6c 17 19 4c 75 63 65 6e 65 39 30 50 6f 69
0000c0 6e 74 73 46 6f 72 6d 61 74 49 6e 64 65 78 00 00
0000d0 00 00 7a fc 30 52 e0 51 d2 54 be 49 7f 21 78 69
0000e0 fe c4 00 32 c0 28 93 e8 00 00 00 00 00 00 00 00
0000f0 f7 61 6e 2f 00 00 00 00 3f d7 6c 17 13 42 6c 6f
000100 63 6b 54 72 65 65 54 65 72 6d 73 49 6e 64 65 78
000110 00 00 00 00 7a fc 30 52 e0 51 d2 54 be 49 7f 21
000120 78 69 fe c4 0a 4c 75 63 65 6e 65 39 30 5f 30 00
000130 00 c0 28 93 e8 00 00 00 00 00 00 00 00 07 1a 7b
000140 47 00 00 00 00 00 00 00 3f d7 6c 17 18 4c 75 63
000150 65 6e 65 39 30 50 6f 69 6e 74 73 46 6f 72 6d 61
000160 74 44 61 74 61 00 00 00 00 7a fc 30 52 e0 51 d2
000170 54 be 49 7f 21 78 69 fe c4 00 02 fe 00 08 80 00
000180 01 88 d2 0f 28 0d ff c0 28 93 e8 00 00 00 00 00
000190 00 00 00 6d 43 fa 6e 00 3f d7 6c 17 19 4c 75 63
0001a0 65 6e 65 39 30 50 6f 73 74 69 6e 67 73 57 72 69
0001b0 74 65 72 44 6f 63 00 00 00 00 7a fc 30 52 e0 51
0001c0 d2 54 be 49 7f 21 78 69 fe c4 0a 4c 75 63 65 6e
0001d0 65 39 30 5f 30 01 03 01 03 c0 28 93 e8 00 00 00 <--- 右边的01 03 是you的两个docid
0001e0 00 00 00 00 00 26 f5 75 88 00 00 00 00 00 00 00
0001f0 3f d7 6c 17 19 4c 75 63 65 6e 65 39 30 50 6f 73
000200 74 69 6e 67 73 57 72 69 74 65 72 50 6f 73 00 00
000210 00 00 7a fc 30 52 e0 51 d2 54 be 49 7f 21 78 69
000220 fe c4 0a 4c 75 63 65 6e 65 39 30 5f 30 02 00 00
000230 01 02 03 01 c0 28 93 e8 00 00 00 00 00 00 00 00
000240 c5 ac 32 b6 00 00 00 00 3f d7 6c 17 15 4c 75 63
000250 65 6e 65 39 30 4e 6f 72 6d 73 4d 65 74 61 64 61
000260 74 61 00 00 00 00 7a fc 30 52 e0 51 d2 54 be 49
000270 7f 21 78 69 fe c4 00 02 00 00 00 ff ff ff ff ff
000280 ff ff ff 00 00 00 00 00 00 00 00 ff ff ff 02 00
000290 00 00 01 2b 00 00 00 00 00 00 00 ff ff ff ff c0
0002a0 28 93 e8 00 00 00 00 00 00 00 00 1c 85 f4 99 00
0002b0 3f d7 6c 17 1c 4c 75 63 65 6e 65 39 30 53 74 6f
0002c0 72 65 64 46 69 65 6c 64 73 46 61 73 74 44 61 74
0002d0 61 00 00 00 01 7a fc 30 52 e0 51 d2 54 be 49 7f
0002e0 21 78 69 fe c4 00 00 0a 00 01 08 12 13 01 04 02
0002f0 05 05 05 05 05 05 05 05 05 10 00 40 10 2e 2e 5c
000300 40 64 6f 63 73 40 5c 64 65 6d 40 6f 2e 74 78 40
000310 74 00 11 2e 40 2e 5c 64 6f 40 63 73 5c 64 40 65
000320 6d 6f 32 40 2e 74 78 74 c0 28 93 e8 00 00 00 00
000330 00 00 00 00 81 b0 7e 09 3f d7 6c 17 18 4c 75 63
000340 65 6e 65 39 30 50 6f 69 6e 74 73 46 6f 72 6d 61
000350 74 4d 65 74 61 00 00 00 00 7a fc 30 52 e0 51 d2
000360 54 be 49 7f 21 78 69 fe c4 00 01 00 00 00 3f d7
000370 6c 17 03 42 4b 44 00 00 00 09 01 01 80 04 08 01
000380 80 00 01 88 d2 0f 28 0d 80 00 01 88 d2 0f 28 0d
000390 02 02 01 32 00 00 00 00 00 00 00 33 00 00 00 00
0003a0 00 00 00 ff ff ff ff 44 00 00 00 00 00 00 00 4f
0003b0 00 00 00 00 00 00 00 c0 28 93 e8 00 00 00 00 00
0003c0 00 00 00 02 3e 97 d6 00 3f d7 6c 17 17 4c 75 63
0003d0 65 6e 65 39 30 46 69 65 6c 64 73 49 6e 64 65 78
0003e0 4d 65 74 61 00 00 00 01 7a fc 30 52 e0 51 d2 54
0003f0 be 49 7f 21 78 69 fe c4 00 80 80 05 02 00 00 00
000400 0a 00 00 00 02 00 00 00 30 00 00 00 00 00 00 00
000410 00 00 00 00 00 00 00 00 00 00 00 40 00 00 00 00
000420 00 00 00 00 00 30 00 00 00 00 00 00 00 36 00 00
000430 00 00 00 00 00 00 00 84 42 00 00 00 00 00 00 00
000440 00 00 30 00 00 00 00 00 00 00 78 00 00 00 00 00
000450 00 00 01 01 02 c0 28 93 e8 00 00 00 00 00 00 00
000460 00 c3 23 d0 d6 00 00 00 3f d7 6c 17 12 42 6c 6f <------- 3f
000470 63 6b 54 72 65 65 54 65 72 6d 73 44 69 63 74 00
000480 00 00 00 7a fc 30 52 e0 51 d2 54 be 49 7f 21 78
000490 69 fe c4 0a 4c 75 63 65 6e 65 39 30 5f 30 0b 9c <--------
0004a0 01 61 72 65 68 6f 77 6f 6c 64 73 74 75 64 65 6e
0004b0 74 79 6f 75 0a 03 03 03 07 03 05 04 00 05 04 00 <------ 05 04 00 05 04 是position
0004c0 0b 7a 3d 04 00 02 01 01 05 01 00 01 05 8c 02 2e <------- 7a 3d 04 是很多位置信息
0004d0 2e 5c 64 6f 63 73 5c 64 65 6d 6f 2e 74 78 74 2e
0004e0 2e 5c 64 6f 63 73 5c 64 65 6d 6f 32 2e 74 78 74
0004f0 04 10 11 01 03 04 82 01 00 05 c0 28 93 e8 00 00
000500 00 00 00 00 00 00 1a 7f dc 45 00 00 00 00 00 00
000510 3f d7 6c 17 12 42 6c 6f 63 6b 54 72 65 65 54 65
000520 72 6d 73 4d 65 74 61 00 00 00 00 7a fc 30 52 e0
000530 51 d2 54 be 49 7f 21 78 69 fe c4 0a 4c 75 63 65
000540 6e 65 39 30 5f 30 3f d7 6c 17 1b 4c 75 63 65 6e
000550 65 39 30 50 6f 73 74 69 6e 67 73 57 72 69 74 65
000560 72 54 65 72 6d 73 00 00 00 00 7a fc 30 52 e0 51
000570 d2 54 be 49 7f 21 78 69 fe c4 0a 4c 75 63 65 6e
000580 65 39 30 5f 30 80 01 02 02 05 02 da 01 07 07 02
000590 03 61 72 65 03 79 6f 75 37 3f d7 6c 17 03 46 53
0005a0 54 00 00 00 08 01 03 01 da 02 00 00 01 00 02 02
0005b0 92 03 02 02 10 2e 2e 5c 64 6f 63 73 5c 64 65 6d
0005c0 6f 2e 74 78 74 11 2e 2e 5c 64 6f 63 73 5c 64 65
0005d0 6d 6f 32 2e 74 78 74 38 3f d7 6c 17 03 46 53 54
0005e0 00 00 00 08 01 03 03 92 02 00 00 01 49 00 00 00
0005f0 00 00 00 00 a2 00 00 00 00 00 00 00 c0 28 93 e8
000600 00 00 00 00 00 00 00 00 c9 44 df a8 00 00 00 00
000610 3f d7 6c 17 12 4c 75 63 65 6e 65 39 34 46 69 65
000620 6c 64 49 6e 66 6f 73 00 00 00 00 7a fc 30 52 e0
000630 51 d2 54 be 49 7f 21 78 69 fe c4 00 03 04 70 61
000640 74 68 00 02 01 00 ff ff ff ff ff ff ff ff 02 1d
000650 50 65 72 46 69 65 6c 64 50 6f 73 74 69 6e 67 73
000660 46 6f 72 6d 61 74 2e 66 6f 72 6d 61 74 08 4c 75
000670 63 65 6e 65 39 30 1d 50 65 72 46 69 65 6c 64 50
000680 6f 73 74 69 6e 67 73 46 6f 72 6d 61 74 2e 73 75
000690 66 66 69 78 01 30 00 00 01 00 08 6d 6f 64 69 66
0006a0 69 65 64 01 00 00 00 ff ff ff ff ff ff ff ff 00
0006b0 01 01 08 00 01 00 08 63 6f 6e 74 65 6e 74 73 02
0006c0 00 03 00 ff ff ff ff ff ff ff ff 02 1d 50 65 72
0006d0 46 69 65 6c 64 50 6f 73 74 69 6e 67 73 46 6f 72
0006e0 6d 61 74 2e 66 6f 72 6d 61 74 08 4c 75 63 65 6e
0006f0 65 39 30 1d 50 65 72 46 69 65 6c 64 50 6f 73 74
000700 69 6e 67 73 46 6f 72 6d 61 74 2e 73 75 66 66 69
000710 78 01 30 00 00 01 00 c0 28 93 e8 00 00 00 00 00
000720 00 00 00 36 55 24 d2 c0 28 93 e8 00 00 00 00 00
000730 00 00 00 41 6a 49 d4
+ +

tim文件的偏移是offset=1128
tim 文件

+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
score:250, BM25Similarity$BM25Scorer (org.apache.lucene.search.similarities)
score:60, LeafSimScorer (org.apache.lucene.search)
score:75, TermScorer (org.apache.lucene.search)
collect:73, TopScoreDocCollector$SimpleTopScoreDocCollector$1 (org.apache.lucene.search)
scoreAll:305, Weight$DefaultBulkScorer (org.apache.lucene.search)
score:247, Weight$DefaultBulkScorer (org.apache.lucene.search)
score:38, BulkScorer (org.apache.lucene.search)
search:776, IndexSearcher (org.apache.lucene.search)
search:694, IndexSearcher (org.apache.lucene.search)
search:688, IndexSearcher (org.apache.lucene.search)
searchAfter:523, IndexSearcher (org.apache.lucene.search)
search:538, IndexSearcher (org.apache.lucene.search)
doPagingSearch:161, SearchFiles (com.dinosaur.lucene.skiptest)

+ + +
1
2
3
4
5
6
7
8
readField:248, Lucene90CompressingStoredFieldsReader (org.apache.lucene.codecs.lucene90.compressing)
document:642, Lucene90CompressingStoredFieldsReader (org.apache.lucene.codecs.lucene90.compressing)
document:253, SegmentReader (org.apache.lucene.index)
document:171, BaseCompositeReader (org.apache.lucene.index)
document:411, IndexReader (org.apache.lucene.index)
doc:390, IndexSearcher (org.apache.lucene.search)
doPagingSearch:195, SearchFiles (com.dinosaur.lucene.skiptest)

+

tim/tip/doc 关系

tip 是描述一个term的指针
tim 包含term的统计信息
doc 描述的是term对应的docId

+

也就是说
tip -> tim -> doc

+
    +
  • 通过tip判断term是否存在
  • +
  • 然后通过tip找到tim获取统计信息
  • +
  • 然后通过doc 获取包含该term的docId的数组
  • +
+

doc file

    +
  • doc file open:
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    <init>:74, Lucene90PostingsReader (org.apache.lucene.codecs.lucene90)
    fieldsProducer:424, Lucene90PostingsFormat (org.apache.lucene.codecs.lucene90)
    <init>:330, PerFieldPostingsFormat$FieldsReader (org.apache.lucene.codecs.perfield)
    fieldsProducer:392, PerFieldPostingsFormat (org.apache.lucene.codecs.perfield)
    <init>:118, SegmentCoreReaders (org.apache.lucene.index)
    <init>:92, SegmentReader (org.apache.lucene.index)
    doBody:94, StandardDirectoryReader$1 (org.apache.lucene.index)
    doBody:77, StandardDirectoryReader$1 (org.apache.lucene.index)
    run:816, SegmentInfos$FindSegmentsFile (org.apache.lucene.index)
    open:109, StandardDirectoryReader (org.apache.lucene.index)
    open:67, StandardDirectoryReader (org.apache.lucene.index)
    open:60, DirectoryReader (org.apache.lucene.index)
    doSearchDemo:25, SimpleSearchTest (com.dinosaur.lucene.demo)
  • +
+

how to find the docId list

+

org/apache/lucene/codecs/lucene90/Lucene90PostingsReader.java

+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
  final class BlockDocsEnum extends PostingsEnum {

...

public PostingsEnum reset(IntBlockTermState termState, int flags) throws IOException {
docFreq = termState.docFreq;
totalTermFreq = indexHasFreq ? termState.totalTermFreq : docFreq;
docTermStartFP = termState.docStartFP;
skipOffset = termState.skipOffset;
singletonDocID = termState.singletonDocID;
if (docFreq > 1) {
if (docIn == null) {
// lazy init
docIn = startDocIn.clone();
}
docIn.seek(docTermStartFP);
}

doc = -1;
this.needsFreq = PostingsEnum.featureRequested(flags, PostingsEnum.FREQS);
this.isFreqsRead = true;
if (indexHasFreq == false || needsFreq == false) {
for (int i = 0; i < ForUtil.BLOCK_SIZE; ++i) {
freqBuffer[i] = 1;
}
}
accum = 0;
blockUpto = 0;
nextSkipDoc = BLOCK_SIZE - 1; // we won't skip if target is found in first block
docBufferUpto = BLOCK_SIZE;
skipped = false;
return this;
}
}
+ + + +

相关阅读

+ +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/2023/06/27/java-sort-default-order/index.html b/2023/06/27/java-sort-default-order/index.html new file mode 100644 index 0000000000..270d87f939 --- /dev/null +++ b/2023/06/27/java-sort-default-order/index.html @@ -0,0 +1,455 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + java sort default order | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ java sort default order +

+ + +
+ + + + +
+ + +

背景

java 的Array.sort()或者.stream.sorted() 都会使用Comparable<T> 作为参数
目前需要了解这些排序函数究竟是升序还是降序的

+

一句话答案

所有的排序都是升序的ascending

+

原因

Array.sort() , .stream.sorted() 都使用Comparable<T>这个类,都需要实现接口int compare(T o1, T o2);

+

我们看看接口compare的注释:

+
1
2
3
4
5
Params:
o1 – the first object to be compared. o2 – the second object to be compared.
Returns:
a negative integer, zero, or a positive integer as the first argument is less than, equal to, or greater than the second.
int compare(T o1, T o2);
+ +

入参有两个:第一个参数o1,第二个是o2
返回值:

+
    +
  • 如果o1 < o2 返回 负数
  • +
  • 如果o1 > o2 返回正数
  • +
  • 如果o1 = o2,返回0
  • +
+

再看看注释java.util.Comparator<T> , 默认都是natural ordering

+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
/**
* Returns a comparator that imposes the reverse of the <em>natural
* ordering</em>.
*
* <p>The returned comparator is serializable and throws {@link
* NullPointerException} when comparing {@code null}.
*
* @param <T> the {@link Comparable} type of element to be compared
* @return a comparator that imposes the reverse of the <i>natural
* ordering</i> on {@code Comparable} objects.
* @see Comparable
* @since 1.8
*/
public static <T extends Comparable<? super T>> Comparator<T> reverseOrder() {
return Collections.reverseOrder();
}


+

相关阅读

自然排序说明
stackoverflow

+ +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git "a/2023/06/27/milvus-\347\274\226\350\257\221\344\275\277\347\224\250/index.html" "b/2023/06/27/milvus-\347\274\226\350\257\221\344\275\277\347\224\250/index.html" new file mode 100644 index 0000000000..ddb7f8db86 --- /dev/null +++ "b/2023/06/27/milvus-\347\274\226\350\257\221\344\275\277\347\224\250/index.html" @@ -0,0 +1,451 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + milvus 编译使用 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ milvus 编译使用 +

+ + +
+ + + + +
+ + +

背景

1
2
3
4
5
6
7
8
9
# Clone github repository.
$ git clone https://github.com/milvus-io/milvus.git

# Install third-party dependencies.
$ cd milvus/
$ ./scripts/install_deps.sh

# Compile Milvus.
$ make
+ + +

相关错误

    +
  • Could NOT find BLAS (missing: BLAS_LIBRARIES)
    解决方案
  • +
+
1
2
sudo apt-get update
sudo apt-get install -y libopenblas-dev
+ +
    +
  • ./milvus: error while loading shared libraries: libtbbmalloc.so.2: cannot open shared object file: No such file or directory
    解决方案:
    1
    sudo apt-get install libtbb2
  • +
+ +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/2023/07/03/Unable-to-make-protected-final-java-lang-Class-java-lang-ClassLoader-defineClass/index.html b/2023/07/03/Unable-to-make-protected-final-java-lang-Class-java-lang-ClassLoader-defineClass/index.html new file mode 100644 index 0000000000..f1cb65b3ab --- /dev/null +++ b/2023/07/03/Unable-to-make-protected-final-java-lang-Class-java-lang-ClassLoader-defineClass/index.html @@ -0,0 +1,450 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Unable to make protected final java.lang.Class java.lang.ClassLoader.defineClass | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ Unable to make protected final java.lang.Class java.lang.ClassLoader.defineClass +

+ + +
+ + + + +
+ + +

背景

本地编译报错

+
1
Caused by: java.lang.reflect.InaccessibleObjectException: Unable to make protected final java.lang.Class java.lang.ClassLoader.defineClass(java.lang.String,byte[],int,int,java.security.ProtectionDomain) throws java.lang.ClassFormatError accessible: module java.base does not "opens java.lang" to unnamed module @49dc7102
+ +

排查

本地编译的时候,发现报这个错,而同事不会有这个错误.
类加载器相关,第一个怀疑的是jdk,于是最后找到原因是用的jdk版本不对,同事用的是jdk11
我刚刚升级的idea不久,idea使用的是默认的jdk17

+

原因

项目使用的是jdk11 ,而我idea使用的是jdk17

+

修改方式

进入:

+
1
File --> Project Structure
+

选择jdk11,然后问题解决
图片

+ +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/2023/07/04/Numeric-overflow-in-expression-idea-java/index.html b/2023/07/04/Numeric-overflow-in-expression-idea-java/index.html new file mode 100644 index 0000000000..83fc0569e7 --- /dev/null +++ b/2023/07/04/Numeric-overflow-in-expression-idea-java/index.html @@ -0,0 +1,449 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Numeric overflow in expression idea java | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ Numeric overflow in expression idea java +

+ + +
+ + + + +
+ + +

背景

在使用java的idea的时候,有如下代码

+
1
2
Date date = new Date();
date.getTime() + 30 * 24 * 60 * 60 * 1000;
+

其中30 * 24 * 60 * 60 * 1000 会变成负数,而且idea会提示:

+
1
Numeric overflow in expression
+

改成下面的样子就可以去掉警告:

+
1
date.getTime() + 30L * 24 * 60 * 60 * 1000
+ + +

原理

原理就是变量提升的步骤和溢出的逻辑交叉在一起

+ +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/2023/07/04/found-duplicate-key-xxx-spring-boot/index.html b/2023/07/04/found-duplicate-key-xxx-spring-boot/index.html new file mode 100644 index 0000000000..9461ff7c4b --- /dev/null +++ b/2023/07/04/found-duplicate-key-xxx-spring-boot/index.html @@ -0,0 +1,444 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + found duplicate key xxx spring boot | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ found duplicate key xxx spring boot +

+ + +
+ + + + +
+ + +

背景

项目是springboot 框架
发现下面错误: found duplicate key xxx

+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
Caused by: while constructing a mapping
in 'reader', line 1, column 1:
management.server.port: 59326
^
found duplicate key cms
in 'reader', line 286, column 1:
cms:
^

at org.yaml.snakeyaml.constructor.SafeConstructor.processDuplicateKeys(SafeConstructor.java:106)
at org.yaml.snakeyaml.constructor.SafeConstructor.flattenMapping(SafeConstructor.java:76)
at org.yaml.snakeyaml.constructor.SafeConstructor.constructMapping2ndStep(SafeConstructor.java:189)
at org.yaml.snakeyaml.constructor.BaseConstructor.constructMapping(BaseConstructor.java:460)
at org.yaml.snakeyaml.constructor.SafeConstructor$ConstructYamlMap.construct(SafeConstructor.java:556)
at org.yaml.snakeyaml.constructor.BaseConstructor.constructObjectNoCheck(BaseConstructor.java:230)
at org.yaml.snakeyaml.constructor.BaseConstructor.constructObject(BaseConstructor.java:219)
at org.springframework.boot.env.OriginTrackedYamlLoader$OriginTrackingConstructor.constructObject(OriginTrackedYamlLoader.java:105)
at org.yaml.snakeyaml.constructor.BaseConstructor.constructDocument(BaseConstructor.java:173)
at org.yaml.snakeyaml.constructor.BaseConstructor.getData(BaseConstructor.java:138)
at org.yaml.snakeyaml.Yaml$1.next(Yaml.java:494)
at org.springframework.beans.factory.config.YamlProcessor.process(YamlProcessor.java:200)
at org.springframework.beans.factory.config.YamlProcessor.process(YamlProcessor.java:164)
at org.springframework.boot.env.OriginTrackedYamlLoader.load(OriginTrackedYamlLoader.java:82)
at org.springframework.boot.env.YamlPropertySourceLoader.load(YamlPropertySourceLoader.java:50)
at org.springframework.boot.context.config.ConfigFileApplicationListener$Loader.loadDocuments(ConfigFileApplicationListener.java:632)
at org.springframework.boot.context.config.ConfigFileApplicationListener$Loader.load(ConfigFileApplicationListener.java:534)
... 64 more


+ +

解决方案

yml文件上有多个相同的都叫cms的key,所以冲突了,去掉一个即可解决

+ +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git "a/2023/07/04/lucene-\345\210\206\350\257\215/index.html" "b/2023/07/04/lucene-\345\210\206\350\257\215/index.html" new file mode 100644 index 0000000000..b1a6405963 --- /dev/null +++ "b/2023/07/04/lucene-\345\210\206\350\257\215/index.html" @@ -0,0 +1,478 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + lucene 分词 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ lucene 分词 +

+ + +
+ + + + +
+ + +

背景

了解分词过程

+

概述

lucene的查询过程:

+
+

(String query , String field ) -> Query

+
+

整个过程是将字符串"how old" 切割成一个个Term Query

+

最后会构造成一棵语法树:

+
1
should:[how,old]
+ +

图片

+

背景

lucene 的分词是一个基本的话题,主要是利用:incrementToken 这个抽象方法以及继承AttributeSource 这个类

+
1
2
3
public abstract class TokenStream extends AttributeSource implements Closeable {
public abstract boolean incrementToken() throws IOException;
}
+ + +

lucene boolean clause

相关阅读

+

lucene 的bolean 子句有四种:

+
    +
  • MUST
  • +
  • FILTER
  • +
  • SHOULD
  • +
  • MUST_NOT
    子句
  • +
+

堆栈

1
2
3
4
5
6
7
8
9
10
11
12
13
14
<init>:202, TermQuery (org.apache.lucene.search)
newTermQuery:640, QueryBuilder (org.apache.lucene.util)
add:408, QueryBuilder (org.apache.lucene.util)
analyzeMultiBoolean:427, QueryBuilder (org.apache.lucene.util)
createFieldQuery:364, QueryBuilder (org.apache.lucene.util)
createFieldQuery:257, QueryBuilder (org.apache.lucene.util)
newFieldQuery:468, QueryParserBase (org.apache.lucene.queryparser.classic)
getFieldQuery:457, QueryParserBase (org.apache.lucene.queryparser.classic)
MultiTerm:680, QueryParser (org.apache.lucene.queryparser.classic)
Query:233, QueryParser (org.apache.lucene.queryparser.classic)
TopLevelQuery:223, QueryParser (org.apache.lucene.queryparser.classic)
parse:136, QueryParserBase (org.apache.lucene.queryparser.classic)
testParse:20, ParseTest (com.dinosaur.lucene.demo)

+ + +

排序算分

BlockMaxMaxscoreScorermatches会将所有的分词算出来,然后计算分数总和

+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
score:250, BM25Similarity$BM25Scorer (org.apache.lucene.search.similarities)
score:60, LeafSimScorer (org.apache.lucene.search)
score:75, TermScorer (org.apache.lucene.search)
matches:240, BlockMaxMaxscoreScorer$2 (org.apache.lucene.search)
doNext:85, TwoPhaseIterator$TwoPhaseIteratorAsDocIdSetIterator (org.apache.lucene.search)
advance:78, TwoPhaseIterator$TwoPhaseIteratorAsDocIdSetIterator (org.apache.lucene.search)
score:232, BooleanWeight$2 (org.apache.lucene.search)
score:38, BulkScorer (org.apache.lucene.search)
search:776, IndexSearcher (org.apache.lucene.search)
search:694, IndexSearcher (org.apache.lucene.search)
search:688, IndexSearcher (org.apache.lucene.search)
searchAfter:523, IndexSearcher (org.apache.lucene.search)
search:538, IndexSearcher (org.apache.lucene.search)
doPagingSearch:161, SearchFiles (com.dinosaur.lucene.skiptest)
testSearch:131, SearchFiles (com.dinosaur.lucene.skiptest)

+ + +

相关阅读

+ +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git "a/2023/07/05/llvm-ir-\344\276\213\345\255\220/index.html" "b/2023/07/05/llvm-ir-\344\276\213\345\255\220/index.html" new file mode 100644 index 0000000000..2f25b3c1f1 --- /dev/null +++ "b/2023/07/05/llvm-ir-\344\276\213\345\255\220/index.html" @@ -0,0 +1,459 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + llvm ir 例子 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ llvm ir 例子 +

+ + +
+ + + + +
+ + +

背景

在自学编译原理,所以了解了一下后端的内容

+

llvm ir

llvm ir 会生成这样中间格式的内容,这样就可以交给后端处理了,如果只想自己写前端词法和解析树内容,后端的代码生成和优化都不处理的话,可以生成llvm ir,然后交给llvm ir 处理

+
1
2
3
4
5
define i32 @sum(i32 %a, i32 %b) {
entry:
%result = add i32 %a, %b
ret i32 %result
}
+ +

这段代码定义了一个名为sum的函数,它接受两个i32类型的参数%a和%b,并返回它们的和。下面是对代码的逐行解释:

+

define i32 @sum(i32 %a, i32 %b):这是函数的定义。
define关键字用于定义函数,i32表示返回类型为32位整数,@sum是函数名,(i32 %a, i32 %b)表示函数接受两个32位整数类型的参数%a%b

+

entry::这是函数的入口标签。在这个简单的例子中,我们只有一个基本块。

+

%result = add i32 %a, %b:这一行使用add指令将参数%a%b相加,并将结果存储在%result变量中。add指令是LLVM IR中的算术指令之一。

+

ret i32 %result:这一行使用ret指令将%result的值作为函数的返回值。
请注意,LLVM IR是一种低级中间表示,它不同于高级语言(如C++或Python)。它具有一种类似汇编语言的结构,但具有更高级别的抽象。LLVM IR具有丰富的指令集和类型系统,可以表示各种编程语言的代码。

+

编译

要将LLVM IR转换为汇编代码,您可以使用LLVM工具链中的llc命令。llc是LLVM的静态编译器,它将LLVM IR转换为机器特定的汇编代码。

+

以下是将LLVM IR生成汇编代码的基本步骤:

+

编写LLVM IR文件:创建一个文本文件,使用LLVM IR语言编写您的程序代码。将其保存为.ll文件,例如example.ll。

+

使用llc命令生成汇编代码:打开终端或命令提示符,并导航到您的LLVM安装目录中的bin文件夹。然后运行以下命令:

+
1
2
llc -O3 example.ll -o example.s

+ +

这将使用优化级别3(-O3)example.ll文件转换为汇编代码,并将结果保存为example.s文件。

+

图片

+ +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/2023/07/10/hidden-and-shadow-in-java/index.html b/2023/07/10/hidden-and-shadow-in-java/index.html new file mode 100644 index 0000000000..0498d31ba6 --- /dev/null +++ b/2023/07/10/hidden-and-shadow-in-java/index.html @@ -0,0 +1,466 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + hidden and shadow in java | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ hidden and shadow in java +

+ + +
+ + + + +
+ + +

shadow

这个词是描述同一个类里面不同作用范围内的同名变量,越里面的变量越覆盖越外面的同名变量

+

shadow 会和几个词相关:

+
    +
  • scope
  • +
  • simple name
  • +
+

原则: 在使用simple name的情况下 , scope 里面的会shadow scope外面的

+
+

If the class declares a field with a certain name, then the declaration of that field is said to hide any and all accessible declarations of fields with the same name in superclasses, and superinterfaces of the class.

+
+
+

15.11. Field Access Expressions

+
+
+

6.2 Names and Identifiers
A name is used to refer to an entity declared in a program.
There are two forms of names: simple names and qualified names.
A simple name is a single identifier.
A qualified name consists of a name, a “.” token, and an identifier

+
+
+

The scope of a declaration is the region of the program within which the entity
declared by the declaration can be referred to using a simple name, provided it is
not shadowed (§6.4.1)

+
+
+

6.4.1 Shadowing
Some declarations may be shadowed in part of their scope by another declaration of
the same name, in which case a simple name cannot be used to refer to the declared
entity.

+
+ +

hidden

hidden描述的父类和子类同名变量的可读性

+ +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/2023/07/13/kmp-correct/index.html b/2023/07/13/kmp-correct/index.html new file mode 100644 index 0000000000..e120e50e0e --- /dev/null +++ b/2023/07/13/kmp-correct/index.html @@ -0,0 +1,450 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + kmp correct | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ kmp correct +

+ + +
+ + + + +
+ + +

背景

kmp 算法很多时候很多blog是没有写清楚正确性的证明

+

论文地址在这里:

+

+ +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/2023/07/16/System-arraycopy-in-java/index.html b/2023/07/16/System-arraycopy-in-java/index.html new file mode 100644 index 0000000000..01861065c3 --- /dev/null +++ b/2023/07/16/System-arraycopy-in-java/index.html @@ -0,0 +1,443 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + System.arraycopy in java | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ System.arraycopy in java +

+ + +
+ + + + +
+ + +

背景

System.arraycopy 是System包下面的函数,主要是从一个数组复制元素到另外一个数组

+

为什么要介绍一下这个函数呢?
因为lucene经常会用到这个函数复制内容

+

注释

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
public static void arraycopy(Object src,
int srcPos,
Object dest,
int destPos,
int length)

Copies an array from the specified source array, beginning at the specified position, to the specified position of the destination array. A subsequence of array components are copied from the source array referenced by src to the destination array referenced by dest. The number of components copied is equal to the length argument. The components at positions srcPos through srcPos+length-1 in the source array are copied into positions destPos through destPos+length-1, respectively, of the destination array.

If the src and dest arguments refer to the same array object, then the copying is performed as if the components at positions srcPos through srcPos+length-1 were first copied to a temporary array with length components and then the contents of the temporary array were copied into positions destPos through destPos+length-1 of the destination array.

If dest is null, then a NullPointerException is thrown.

If src is null, then a NullPointerException is thrown and the destination array is not modified.

Otherwise, if any of the following is true, an ArrayStoreException is thrown and the destination is not modified:

The src argument refers to an object that is not an array.
The dest argument refers to an object that is not an array.
The src argument and dest argument refer to arrays whose component types are different primitive types.
The src argument refers to an array with a primitive component type and the dest argument refers to an array with a reference component type.
The src argument refers to an array with a reference component type and the dest argument refers to an array with a primitive component type.

Otherwise, if any of the following is true, an IndexOutOfBoundsException is thrown and the destination is not modified:

The srcPos argument is negative.
The destPos argument is negative.
The length argument is negative.
srcPos+length is greater than src.length, the length of the source array.
destPos+length is greater than dest.length, the length of the destination array.

Otherwise, if any actual component of the source array from position srcPos through srcPos+length-1 cannot be converted to the component type of the destination array by assignment conversion, an ArrayStoreException is thrown. In this case, let k be the smallest nonnegative integer less than length such that src[srcPos+k] cannot be converted to the component type of the destination array; when the exception is thrown, source array components from positions srcPos through srcPos+k-1 will already have been copied to destination array positions destPos through destPos+k-1 and no other positions of the destination array will have been modified. (Because of the restrictions already itemized, this paragraph effectively applies only to the situation where both arrays have component types that are reference types.)

Parameters:
src - the source array.
srcPos - starting position in the source array.
dest - the destination array.
destPos - starting position in the destination data.
length - the number of array elements to be copied.
Throws:
IndexOutOfBoundsException - if copying would cause access of data outside array bounds.
ArrayStoreException - if an element in the src array could not be stored into the dest array because of a type mismatch.
NullPointerException - if either src or dest is null.
+ +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/2023/07/30/java-generic/index.html b/2023/07/30/java-generic/index.html new file mode 100644 index 0000000000..fddc537669 --- /dev/null +++ b/2023/07/30/java-generic/index.html @@ -0,0 +1,547 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + java generic | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ java generic +

+ + +
+ + + + +
+ + +

java的泛型

java的泛型是什么?
泛型是class/interface/method/constructor的一个属性,简单来说是一个修饰符,所以当我们说java的泛型的时候,需要描述4个内容:class/interface/method/constructor

+

java 泛型

术语表:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
名称翻译(我自己的翻译)定义来源举例
parameterized type参数化类型A parameterized type is a class or interface type of the form C<T1,…,Tn>, where C is the name of a generic class or interface, and <T1,…,Tn> is a list of type arguments that denote a particular parameterization of the generic class or interface.4.5List<String>
TypeIdentifier类型标识符TypeIdentifier is used in the declaration of classes, interfaces,and type parameters (§8.1,§9.1, §4.4), and when referring to types (§6.5)
type variable类型变量A type variable is an unqualified identifier used as a type in class, interface, method, and constructor bodies. A type variable is introduced by the declaration of a type parameter of a generic class, interface, method, or constructor (§8.1.2, §9.1.2, §8.4.4, §8.8.4)$4.4
+

类型参数和类型变量

两者的关系:
泛型参数由很多东西组成,其中泛型参数可以由泛型变量组成.

+

类型参数(type parameter)

类型参数可以由类型变量构成

+

类变量(type variable)

类型变量加上一些其他标识符(identifier)可以组成类型参数

+

什么是类型变量(type variable):

+
+

A type variable is an unqualified identifier used as a type in class, interface, method,
and constructor bodies.

+
+

类型变量是作用在类/接口/方法/构造函数的标识符(identifier)

+

所以得出结论:
类型变量是标识符

+
类型变量的作用
+

A type variable is introduced by the declaration of a type parameter of a generic
class, interface, method, or constructor

+
+
类型参数的作用域(type parameter scope)

分为两部分:
作用在类/接口是一类,另外一类是作用在构造函数/方法的

+

作用在类上的:

+
+

The scope of a class’s type parameter (§8.1.2) is the type parameter section of
the class declaration, and the type parameter section of any superclass type or
superinterface type of the class declaration, and the class body. If the class is a
NAMES Scope of a Declaration 6.3
record class (§8.10), then the scope of the type parameter additionally includes the
header of the record declaration (§8.10.1).

+
+

泛型类、接口、方法、构造函数

关于泛型相关内容,分为四种:

+
    +
  • 泛型类(generic class)
  • +
  • 泛型接口(generic interface)
  • +
  • 泛型方法(generic method)
  • +
  • 泛型构造函数(generic constructor)
  • +
+

泛型类:

+
+

A class is generic if the class declaration declares one or more type variables

+
+

泛型构造函数:

+
+

A constructor is generic if it declares one or more type variable

+
+

泛型接口:

+
+

An interface is generic if the interface declaration declares one or more type
variables

+
+

泛型构造函数:

+
+

A constructor is generic if it declares one or more type variables

+
+

类型实参 TypeArguments

类型描述符 TypeIdentifier

区别

泛型接口和泛型类是差不多,泛型构造函数和泛型方法是差不多.
所以我们只要区分泛型类和泛型方法的区别就行

+

实现

java 的实现,我们可以直接看javac的源码

+
1
2
3
4
5
6
7
8
/**
* {@literal
* TypeParametersOpt = ["<" TypeParameter {"," TypeParameter} ">"]
* }
*/
protected List<JCTypeParameter> typeParametersOpt() {
return typeParametersOpt(false);
}
+ +
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23

/**
* {@literal
* TypeParameter = [Annotations] TypeVariable [TypeParameterBound]
* TypeParameterBound = EXTENDS Type {"&" Type}
* TypeVariable = Ident
* }
*/
JCTypeParameter typeParameter() {
int pos = token.pos;
List<JCAnnotation> annos = typeAnnotationsOpt();
Name name = typeName();
ListBuffer<JCExpression> bounds = new ListBuffer<>();
if (token.kind == EXTENDS) {
nextToken();
bounds.append(parseType());
while (token.kind == AMP) {
nextToken();
bounds.append(parseType());
}
}
return toP(F.at(pos).TypeParameter(name, bounds.toList(), annos));
}
+ +
1
2
3
4
5
6
7
8
9
Name typeName() {
int pos = token.pos;
Name name = ident();
Source source = restrictedTypeNameStartingAtSource(name, pos, true);
if (source != null) {
reportSyntaxError(pos, Errors.RestrictedTypeNotAllowed(name, source));
}
return name;
}
+ +
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
protected Name ident(boolean allowClass) {
if (token.kind == IDENTIFIER) {
Name name = token.name();
nextToken();
return name;
} else if (token.kind == ASSERT) {
log.error(DiagnosticFlag.SYNTAX, token.pos, Errors.AssertAsIdentifier);
nextToken();
return names.error;
} else if (token.kind == ENUM) {
log.error(DiagnosticFlag.SYNTAX, token.pos, Errors.EnumAsIdentifier);
nextToken();
return names.error;
} else if (token.kind == THIS) {
if (allowThisIdent) {
Name name = token.name();
nextToken();
return name;
} else {
log.error(DiagnosticFlag.SYNTAX, token.pos, Errors.ThisAsIdentifier);
nextToken();
return names.error;
}
} else if (token.kind == UNDERSCORE) {
if (Feature.UNDERSCORE_IDENTIFIER.allowedInSource(source)) {
log.warning(token.pos, Warnings.UnderscoreAsIdentifier);
} else {
log.error(DiagnosticFlag.SYNTAX, token.pos, Errors.UnderscoreAsIdentifier);
}
Name name = token.name();
nextToken();
return name;
} else {
accept(IDENTIFIER);
if (allowClass && token.kind == CLASS) {
nextToken();
return names._class;
}
return names.error;
}
}
+ +
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
main[1] stop in  com.sun.tools.javac.parser.Tokens$Token:315 
Deferring breakpoint com.sun.tools.javac.parser.Tokens$Token:315.
It will be set after the class is loaded.
main[1] cont
> Set deferred breakpoint com.sun.tools.javac.parser.Tokens$Token:315

Breakpoint hit: "thread=main", com.sun.tools.javac.parser.Tokens$Token.<init>(), line=315 bci=14
315 this.endPos = endPos;

main[1] list
311
312 Token(TokenKind kind, int pos, int endPos, List<Comment> comments) {
313 this.kind = kind;
314 this.pos = pos;
315 => this.endPos = endPos;
316 this.comments = comments;
317 checkKind();
318 }
319
320 Token[] split(Tokens tokens) {
main[1] where
[1] com.sun.tools.javac.parser.Tokens$Token.<init> (Tokens.java:315)
[2] com.sun.tools.javac.parser.Tokens.<clinit> (Tokens.java:457)
[3] com.sun.tools.javac.parser.ParserFactory.<init> (ParserFactory.java:79)
[4] com.sun.tools.javac.parser.ParserFactory.instance (ParserFactory.java:56)
[5] com.sun.tools.javac.main.JavaCompiler.<init> (JavaCompiler.java:386)
[6] com.sun.tools.javac.main.JavaCompiler.instance (JavaCompiler.java:115)
[7] com.sun.tools.javac.processing.JavacProcessingEnvironment.<init> (JavacProcessingEnvironment.java:215)
[8] com.sun.tools.javac.processing.JavacProcessingEnvironment.instance (JavacProcessingEnvironment.java:200)
[9] com.sun.tools.javac.api.BasicJavacTask.initPlugins (BasicJavacTask.java:217)
[10] com.sun.tools.javac.main.Main.compile (Main.java:292)
[11] com.sun.tools.javac.main.Main.compile (Main.java:176)
[12] com.sun.tools.javac.Main.compile (Main.java:64)
[13] com.sun.tools.javac.Main.main (Main.java:50)
+ + +

如何调试

窗口1:

+
1
2
##  用javac 编译Hello.java
./java -agentlib:jdwp=transport=dt_socket,server=y,address=8000 --module jdk.compiler/com.sun.tools.javac.Main com/Hello.java
+ +

窗口2:

+
1
2
3
4
5
6
### jdb断点
./jdb -attach 8000 -sourcepath /var/jdk/src/jdk.compiler/share/classes
### 在jdb中 断点main函数
stop in com.sun.tools.javac.Main.main
### 继续执行 命令是cont , 也可以是continue
continue
+ + +

泛型方法的代码

1
2
3
4
5
class PARA{
<TT> void test(TT para ){
System.out.println(para);
}
}
+ + +

满足了这个parseRule

+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
ClassBodyDeclaration =
";"
| [STATIC] Block
| ModifiersOpt
( Type Ident
( VariableDeclaratorsRest ";" | MethodDeclaratorRest )
| VOID Ident VoidMethodDeclaratorRest
| TypeParameters [Annotations]
( Type Ident MethodDeclaratorRest
| VOID Ident VoidMethodDeclaratorRest
)
| Ident ConstructorDeclaratorRest
| TypeParameters Ident ConstructorDeclaratorRest
| ClassOrInterfaceOrEnumDeclaration
* )
+ +

堆栈

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
Breakpoint hit: "thread=main", com.sun.tools.javac.parser.JavacParser.ident(), line=575 bci=0
575 if (token.kind == IDENTIFIER) {

main[1] print token.name()
token.name() = "TT"
main[1] where
[1] com.sun.tools.javac.parser.JavacParser.ident (JavacParser.java:575)
[2] com.sun.tools.javac.parser.JavacParser.ident (JavacParser.java:571)
[3] com.sun.tools.javac.parser.JavacParser.typeName (JavacParser.java:3,979)
[4] com.sun.tools.javac.parser.JavacParser.typeParameter (JavacParser.java:4,563)
[5] com.sun.tools.javac.parser.JavacParser.typeParametersOpt (JavacParser.java:4,541)
[6] com.sun.tools.javac.parser.JavacParser.classOrInterfaceOrRecordBodyDeclaration (JavacParser.java:4,277)
[7] com.sun.tools.javac.parser.JavacParser.classInterfaceOrRecordBody (JavacParser.java:4,214)
[8] com.sun.tools.javac.parser.JavacParser.classDeclaration (JavacParser.java:3,925)
[9] com.sun.tools.javac.parser.JavacParser.classOrRecordOrInterfaceOrEnumDeclaration (JavacParser.java:3,866)
[10] com.sun.tools.javac.parser.JavacParser.typeDeclaration (JavacParser.java:3,855)
[11] com.sun.tools.javac.parser.JavacParser.parseCompilationUnit (JavacParser.java:3,699)
[12] com.sun.tools.javac.main.JavaCompiler.parse (JavaCompiler.java:620)
[13] com.sun.tools.javac.main.JavaCompiler.parse (JavaCompiler.java:657)
[14] com.sun.tools.javac.main.JavaCompiler.parseFiles (JavaCompiler.java:1,006)
[15] com.sun.tools.javac.main.JavaCompiler.parseFiles (JavaCompiler.java:993)
[16] com.sun.tools.javac.main.JavaCompiler.compile (JavaCompiler.java:919)
[17] com.sun.tools.javac.main.Main.compile (Main.java:317)
[18] com.sun.tools.javac.main.Main.compile (Main.java:176)
[19] com.sun.tools.javac.Main.compile (Main.java:64)
[20] com.sun.tools.javac.Main.main (Main.java:50)
+ +
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
main[1] print kind 
kind = "token.identifier"
main[1] print pos
pos = 13
main[1] list
311
312 Token(TokenKind kind, int pos, int endPos, List<Comment> comments) {
313 this.kind = kind;
314 this.pos = pos;
315 => this.endPos = endPos;
316 this.comments = comments;
317 checkKind();
318 }
319
320 Token[] split(Tokens tokens) {
main[1] where
[1] com.sun.tools.javac.parser.Tokens$Token.<init> (Tokens.java:315)
[2] com.sun.tools.javac.parser.Tokens$NamedToken.<init> (Tokens.java:399)
[3] com.sun.tools.javac.parser.JavaTokenizer.readToken (JavaTokenizer.java:1,046)
[4] com.sun.tools.javac.parser.Scanner.nextToken (Scanner.java:115)
[5] com.sun.tools.javac.parser.JavacParser.nextToken (JavacParser.java:275)
[6] com.sun.tools.javac.parser.JavacParser.typeParametersOpt (JavacParser.java:4,540)
[7] com.sun.tools.javac.parser.JavacParser.classOrInterfaceOrRecordBodyDeclaration (JavacParser.java:4,277)
[8] com.sun.tools.javac.parser.JavacParser.classInterfaceOrRecordBody (JavacParser.java:4,214)
[9] com.sun.tools.javac.parser.JavacParser.classDeclaration (JavacParser.java:3,925)
[10] com.sun.tools.javac.parser.JavacParser.classOrRecordOrInterfaceOrEnumDeclaration (JavacParser.java:3,866)
[11] com.sun.tools.javac.parser.JavacParser.typeDeclaration (JavacParser.java:3,855)
[12] com.sun.tools.javac.parser.JavacParser.parseCompilationUnit (JavacParser.java:3,699)
[13] com.sun.tools.javac.main.JavaCompiler.parse (JavaCompiler.java:620)
[14] com.sun.tools.javac.main.JavaCompiler.parse (JavaCompiler.java:657)
[15] com.sun.tools.javac.main.JavaCompiler.parseFiles (JavaCompiler.java:1,006)
[16] com.sun.tools.javac.main.JavaCompiler.parseFiles (JavaCompiler.java:993)
[17] com.sun.tools.javac.main.JavaCompiler.compile (JavaCompiler.java:919)
[18] com.sun.tools.javac.main.Main.compile (Main.java:317)
[19] com.sun.tools.javac.main.Main.compile (Main.java:176)
[20] com.sun.tools.javac.Main.compile (Main.java:64)
[21] com.sun.tools.javac.Main.main (Main.java:50)
+ +

相关阅读

+ +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/2023/08/04/java-nio/index.html b/2023/08/04/java-nio/index.html new file mode 100644 index 0000000000..e33a1bc9b6 --- /dev/null +++ b/2023/08/04/java-nio/index.html @@ -0,0 +1,467 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + java nio | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ java nio +

+ + +
+ + + + +
+ + +

背景

了解java的nio,因为在看到lucene的MappedByteBuffer , 所以想了解一下nio的内容
nio 主要包括三个内容:

+
    +
  • Buffer
  • +
  • Selector
  • +
  • Channel
  • +
+

Buffer

ByteBuffer

    +
  • MappedByteBuffer
  • +
  • HeapByteBuffer
  • +
  • DirectByteBuffer
  • +
+

例子

简单的例子:

+
1
2
3
4
5
6
7
8
9
10
11
12
@Test
public void fileChannel()
throws IOException {
try (FileChannel fc = FileChannel.open(Paths.get("ccc.cc"),StandardOpenOption.WRITE , StandardOpenOption.READ ,StandardOpenOption.CREATE) ) {
MappedByteBuffer bb = fc.map(FileChannel.MapMode.READ_WRITE, 0, 1);
byte b = 97;
bb.put(0 ,b );
fc.write(bb);
var charset = Charset.defaultCharset();
System.out.println( "res" + charset.decode(bb).toString());
}
}
+

其实调用:

+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
@ForceInline
public void put$Type$(Scope scope, Object base, long offset, $type$ value) {
try {
put$Type$Internal(scope, base, offset, value);
} catch (Scope.ScopedAccessError ex) {
throw new IllegalStateException("This segment is already closed");
}
}

@ForceInline @Scoped
private void put$Type$Internal(Scope scope, Object base, long offset, $type$ value) {
try {
if (scope != null) {
scope.checkValidState();
}
UNSAFE.put$Type$(base, offset, value);
} finally {
Reference.reachabilityFence(scope);
}
}
+ +

最后写在这里

+
1
2
3
4
void put(T x) {
GuardUnsafeAccess guard(_thread);
*addr() = normalize_for_write(x);
}
+ + +

整个流程就是:
mmap返回的是一个指针 ,MappedByteBuffer 调用UNSAFE.put方法直接修改堆外内存的值,不经过堆

+

相关阅读

+ +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/2023/08/10/jdbc-Communications-link-failure/index.html b/2023/08/10/jdbc-Communications-link-failure/index.html new file mode 100644 index 0000000000..dff8690e51 --- /dev/null +++ b/2023/08/10/jdbc-Communications-link-failure/index.html @@ -0,0 +1,447 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + jdbc Communications link failure | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ jdbc Communications link failure +

+ + +
+ + + + +
+ + +

背景

本地电脑连接测试环境mysql,发现如下错误Communications link failure

+

测试环境jdbc连不上,最后发现是网络原因,不允许外网访问测试环境的mysql,找运维改了规则就而已访问了

+

堆栈

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
[ERROR] 2023-08-10 15:01:16:468 [ip:] [TID: N/A] [main] [com.alibaba.druid.pool.DruidDataSource:916] [init] => init datasource error, url: jdbc:mysql://xxx.com/ods?useUnicode=true&characterEncoding=utf-8&useSSL=false&serverTimezone=UTC&allowMultiQueries=true
com.mysql.cj.jdbc.exceptions.CommunicationsException: Communications link failure

The last packet sent successfully to the server was 0 milliseconds ago. The driver has not received any packets from the server.
at com.mysql.cj.jdbc.exceptions.SQLError.createCommunicationsException(SQLError.java:174)
at com.mysql.cj.jdbc.exceptions.SQLExceptionsMapping.translateException(SQLExceptionsMapping.java:64)
at com.mysql.cj.jdbc.ConnectionImpl.createNewIO(ConnectionImpl.java:835)
at com.mysql.cj.jdbc.ConnectionImpl.<init>(ConnectionImpl.java:455)
at com.mysql.cj.jdbc.ConnectionImpl.getInstance(ConnectionImpl.java:240)
at com.mysql.cj.jdbc.NonRegisteringDriver.connect(NonRegisteringDriver.java:199)
at com.alibaba.druid.filter.FilterChainImpl.connection_connect(FilterChainImpl.java:156)
at com.alibaba.druid.filter.stat.StatFilter.connection_connect(StatFilter.java:218)
at com.alibaba.druid.filter.FilterChainImpl.connection_connect(FilterChainImpl.java:150)
at com.alibaba.druid.pool.DruidAbstractDataSource.createPhysicalConnection(DruidAbstractDataSource.java:1646)
at com.alibaba.druid.pool.DruidAbstractDataSource.createPhysicalConnection(DruidAbstractDataSource.java:1710)
at com.alibaba.druid.pool.DruidDataSource.init(DruidDataSource.java:912)
at com.baomidou.dynamic.datasource.creator.DruidDataSourceCreator.doCreateDataSource(DruidDataSourceCreator.java:83)
at com.baomidou.dynamic.datasource.creator.AbstractDataSourceCreator.createDataSource(AbstractDataSourceCreator.java:70)
at com.baomidou.dynamic.datasource.creator.DefaultDataSourceCreator.createDataSource(DefaultDataSourceCreator.java:48)
at com.baomidou.dynamic.datasource.provider.AbstractDataSourceProvider.createDataSourceMap(AbstractDataSourceProvider.java:47)
at com.baomidou.dynamic.datasource.provider.YmlDynamicDataSourceProvider.loadDataSources(YmlDynamicDataSourceProvider.java:42)
at com.baomidou.dynamic.datasource.DynamicRoutingDataSource.afterPropertiesSet(DynamicRoutingDataSource.java:219)
at org.springframework.beans.factory.support.AbstractAutowireCapableBeanFactory.invokeInitMethods(AbstractAutowireCapableBeanFactory.java:1858)
at org.springframework.beans.factory.support.AbstractAutowireCapableBeanFactory.initializeBean(AbstractAutowireCapableBeanFactory.java:1795)
at org.springframework.beans.factory.support.AbstractAutowireCapableBeanFactory.doCreateBean(AbstractAutowireCapableBeanFactory.java:594)
at org.springframework.beans.factory.support.AbstractAutowireCapableBeanFactory.createBean(AbstractAutowireCapableBeanFactory.java:516)
at org.springframework.beans.factory.support.AbstractBeanFactory.lambda$doGetBean$0(AbstractBeanFactory.java:324)
at org.springframework.beans.factory.support.DefaultSingletonBeanRegistry.getSingleton(DefaultSingletonBeanRegistry.java:234)
at org.springframework.beans.factory.support.AbstractBeanFactory.doGetBean(AbstractBeanFactory.java:322)
at org.springframework.beans.factory.support.AbstractBeanFactory.getBean(AbstractBeanFactory.java:202)
at org.springframework.beans.factory.config.DependencyDescriptor.resolveCandidate(DependencyDescriptor.java:276)
at org.springframework.beans.factory.support.DefaultListableBeanFactory.doResolveDependency(DefaultListableBeanFactory.java:1307)
at org.springframework.beans.factory.support.DefaultListableBeanFactory.resolveDependency(DefaultListableBeanFactory.java:1227)
at org.springframework.beans.factory.support.ConstructorResolver.resolveAutowiredArgument(ConstructorResolver.java:886)
at org.springframework.beans.factory.support.ConstructorResolver.createArgumentArray(ConstructorResolver.java:790)
at org.springframework.beans.factory.support.ConstructorResolver.instantiateUsingFactoryMethod(ConstructorResolver.java:540)
at org.springframework.beans.factory.support.AbstractAutowireCapableBeanFactory.instantiateUsingFactoryMethod(AbstractAutowireCapableBeanFactory.java:1341)
at org.springframework.beans.factory.support.AbstractAutowireCapableBeanFactory.createBeanInstance(AbstractAutowireCapableBeanFactory.java:1181)
at org.springframework.beans.factory.support.AbstractAutowireCapableBeanFactory.doCreateBean(AbstractAutowireCapableBeanFactory.java:556)
at org.springframework.beans.factory.support.AbstractAutowireCapableBeanFactory.createBean(AbstractAutowireCapableBeanFactory.java:516)
at org.springframework.beans.factory.support.AbstractBeanFactory.lambda$doGetBean$0(AbstractBeanFactory.java:324)
at org.springframework.beans.factory.support.DefaultSingletonBeanRegistry.getSingleton(DefaultSingletonBeanRegistry.java:234)
at org.springframework.beans.factory.support.AbstractBeanFactory.doGetBean(AbstractBeanFactory.java:322)
at org.springframework.beans.factory.support.AbstractBeanFactory.getBean(AbstractBeanFactory.java:202)
at org.springframework.beans.factory.config.DependencyDescriptor.resolveCandidate(DependencyDescriptor.java:276)
at org.springframework.beans.factory.support.DefaultListableBeanFactory.doResolveDependency(DefaultListableBeanFactory.java:1307)
at org.springframework.beans.factory.support.DefaultListableBeanFactory.resolveDependency(DefaultListableBeanFactory.java:1227)
at org.springframework.beans.factory.support.AbstractAutowireCapableBeanFactory.autowireByType(AbstractAutowireCapableBeanFactory.java:1514)
at org.springframework.beans.factory.support.AbstractAutowireCapableBeanFactory.populateBean(AbstractAutowireCapableBeanFactory.java:1409)
at org.springframework.beans.factory.support.AbstractAutowireCapableBeanFactory.doCreateBean(AbstractAutowireCapableBeanFactory.java:593)
at org.springframework.beans.factory.support.AbstractAutowireCapableBeanFactory.createBean(AbstractAutowireCapableBeanFactory.java:516)
at org.springframework.beans.factory.support.AbstractBeanFactory.lambda$doGetBean$0(AbstractBeanFactory.java:324)
at org.springframework.beans.factory.support.DefaultSingletonBeanRegistry.getSingleton(DefaultSingletonBeanRegistry.java:234)
at org.springframework.beans.factory.support.AbstractBeanFactory.doGetBean(AbstractBeanFactory.java:322)
at org.springframework.beans.factory.support.AbstractBeanFactory.getBean(AbstractBeanFactory.java:202)
at org.springframework.beans.factory.config.DependencyDescriptor.resolveCandidate(DependencyDescriptor.java:276)
at org.springframework.beans.factory.support.DefaultListableBeanFactory.doResolveDependency(DefaultListableBeanFactory.java:1307)
at org.springframework.beans.factory.support.DefaultListableBeanFactory.resolveDependency(DefaultListableBeanFactory.java:1227)
at org.springframework.beans.factory.annotation.AutowiredAnnotationBeanPostProcessor$AutowiredFieldElement.resolveFieldValue(AutowiredAnnotationBeanPostProcessor.java:657)
at org.springframework.beans.factory.annotation.AutowiredAnnotationBeanPostProcessor$AutowiredFieldElement.inject(AutowiredAnnotationBeanPostProcessor.java:640)
at org.springframework.beans.factory.annotation.InjectionMetadata.inject(InjectionMetadata.java:119)
at org.springframework.beans.factory.annotation.AutowiredAnnotationBeanPostProcessor.postProcessProperties(AutowiredAnnotationBeanPostProcessor.java:399)
at org.springframework.beans.factory.support.AbstractAutowireCapableBeanFactory.populateBean(AbstractAutowireCapableBeanFactory.java:1425)
at org.springframework.beans.factory.support.AbstractAutowireCapableBeanFactory.doCreateBean(AbstractAutowireCapableBeanFactory.java:593)
at org.springframework.beans.factory.support.AbstractAutowireCapableBeanFactory.createBean(AbstractAutowireCapableBeanFactory.java:516)
at org.springframework.beans.factory.support.AbstractBeanFactory.lambda$doGetBean$0(AbstractBeanFactory.java:324)
at org.springframework.beans.factory.support.DefaultSingletonBeanRegistry.getSingleton(DefaultSingletonBeanRegistry.java:234)
at org.springframework.beans.factory.support.AbstractBeanFactory.doGetBean(AbstractBeanFactory.java:322)
at org.springframework.beans.factory.support.AbstractBeanFactory.getBean(AbstractBeanFactory.java:202)
at org.springframework.beans.factory.config.DependencyDescriptor.resolveCandidate(DependencyDescriptor.java:276)
at org.springframework.beans.factory.support.DefaultListableBeanFactory.doResolveDependency(DefaultListableBeanFactory.java:1307)
at org.springframework.beans.factory.support.DefaultListableBeanFactory.resolveDependency(DefaultListableBeanFactory.java:1227)
at org.springframework.beans.factory.annotation.AutowiredAnnotationBeanPostProcessor$AutowiredFieldElement.resolveFieldValue(AutowiredAnnotationBeanPostProcessor.java:657)
at org.springframework.beans.factory.annotation.AutowiredAnnotationBeanPostProcessor$AutowiredFieldElement.inject(AutowiredAnnotationBeanPostProcessor.java:640)
at org.springframework.beans.factory.annotation.InjectionMetadata.inject(InjectionMetadata.java:119)
at org.springframework.beans.factory.annotation.AutowiredAnnotationBeanPostProcessor.postProcessProperties(AutowiredAnnotationBeanPostProcessor.java:399)
at org.springframework.beans.factory.support.AbstractAutowireCapableBeanFactory.populateBean(AbstractAutowireCapableBeanFactory.java:1425)
at org.springframework.beans.factory.support.AbstractAutowireCapableBeanFactory.doCreateBean(AbstractAutowireCapableBeanFactory.java:593)
at org.springframework.beans.factory.support.AbstractAutowireCapableBeanFactory.createBean(AbstractAutowireCapableBeanFactory.java:516)
at org.springframework.beans.factory.support.AbstractBeanFactory.lambda$doGetBean$0(AbstractBeanFactory.java:324)
at org.springframework.beans.factory.support.DefaultSingletonBeanRegistry.getSingleton(DefaultSingletonBeanRegistry.java:234)
at org.springframework.beans.factory.support.AbstractBeanFactory.doGetBean(AbstractBeanFactory.java:322)
at org.springframework.beans.factory.support.AbstractBeanFactory.getBean(AbstractBeanFactory.java:202)
at org.springframework.beans.factory.config.DependencyDescriptor.resolveCandidate(DependencyDescriptor.java:276)
at org.springframework.beans.factory.support.DefaultListableBeanFactory.doResolveDependency(DefaultListableBeanFactory.java:1307)
at org.springframework.beans.factory.support.DefaultListableBeanFactory.resolveDependency(DefaultListableBeanFactory.java:1227)
at org.springframework.beans.factory.annotation.AutowiredAnnotationBeanPostProcessor$AutowiredFieldElement.resolveFieldValue(AutowiredAnnotationBeanPostProcessor.java:657)
at org.springframework.beans.factory.annotation.AutowiredAnnotationBeanPostProcessor$AutowiredFieldElement.inject(AutowiredAnnotationBeanPostProcessor.java:640)
at org.springframework.beans.factory.annotation.InjectionMetadata.inject(InjectionMetadata.java:119)
at org.springframework.beans.factory.annotation.AutowiredAnnotationBeanPostProcessor.postProcessProperties(AutowiredAnnotationBeanPostProcessor.java:399)
at org.springframework.beans.factory.support.AbstractAutowireCapableBeanFactory.populateBean(AbstractAutowireCapableBeanFactory.java:1425)
at org.springframework.beans.factory.support.AbstractAutowireCapableBeanFactory.doCreateBean(AbstractAutowireCapableBeanFactory.java:593)
at org.springframework.beans.factory.support.AbstractAutowireCapableBeanFactory.createBean(AbstractAutowireCapableBeanFactory.java:516)
at org.springframework.beans.factory.support.AbstractBeanFactory.lambda$doGetBean$0(AbstractBeanFactory.java:324)
at org.springframework.beans.factory.support.DefaultSingletonBeanRegistry.getSingleton(DefaultSingletonBeanRegistry.java:234)
at org.springframework.beans.factory.support.AbstractBeanFactory.doGetBean(AbstractBeanFactory.java:322)
at org.springframework.beans.factory.support.AbstractBeanFactory.getBean(AbstractBeanFactory.java:202)
at org.springframework.beans.factory.support.DefaultListableBeanFactory.preInstantiateSingletons(DefaultListableBeanFactory.java:897)
at org.springframework.context.support.AbstractApplicationContext.finishBeanFactoryInitialization(AbstractApplicationContext.java:879)
at org.springframework.context.support.AbstractApplicationContext.refresh(AbstractApplicationContext.java:551)
at org.springframework.boot.SpringApplication.refresh(SpringApplication.java:755)
at org.springframework.boot.SpringApplication.refresh(SpringApplication.java:747)
at org.springframework.boot.SpringApplication.refreshContext(SpringApplication.java:402)
at org.springframework.boot.SpringApplication.run(SpringApplication.java:312)
at org.springframework.boot.test.context.SpringBootContextLoader.loadContext(SpringBootContextLoader.java:120)
at org.springframework.test.context.cache.DefaultCacheAwareContextLoaderDelegate.loadContextInternal(DefaultCacheAwareContextLoaderDelegate.java:99)
at org.springframework.test.context.cache.DefaultCacheAwareContextLoaderDelegate.loadContext(DefaultCacheAwareContextLoaderDelegate.java:124)
at org.springframework.test.context.support.DefaultTestContext.getApplicationContext(DefaultTestContext.java:123)
at org.springframework.test.context.web.ServletTestExecutionListener.setUpRequestContextIfNecessary(ServletTestExecutionListener.java:190)
at org.springframework.test.context.web.ServletTestExecutionListener.prepareTestInstance(ServletTestExecutionListener.java:132)
at org.springframework.test.context.TestContextManager.prepareTestInstance(TestContextManager.java:244)
at org.springframework.test.context.junit4.SpringJUnit4ClassRunner.createTest(SpringJUnit4ClassRunner.java:227)
at org.springframework.test.context.junit4.SpringJUnit4ClassRunner$1.runReflectiveCall(SpringJUnit4ClassRunner.java:289)
at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12)
at org.springframework.test.context.junit4.SpringJUnit4ClassRunner.methodBlock(SpringJUnit4ClassRunner.java:291)
at org.springframework.test.context.junit4.SpringJUnit4ClassRunner.runChild(SpringJUnit4ClassRunner.java:246)
at org.springframework.test.context.junit4.SpringJUnit4ClassRunner.runChild(SpringJUnit4ClassRunner.java:97)
at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331)
at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79)
at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329)
at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66)
at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293)
at org.springframework.test.context.junit4.statements.RunBeforeTestClassCallbacks.evaluate(RunBeforeTestClassCallbacks.java:61)
at org.springframework.test.context.junit4.statements.RunAfterTestClassCallbacks.evaluate(RunAfterTestClassCallbacks.java:70)
at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306)
at org.junit.runners.ParentRunner.run(ParentRunner.java:413)
at org.springframework.test.context.junit4.SpringJUnit4ClassRunner.run(SpringJUnit4ClassRunner.java:190)
at org.junit.runner.JUnitCore.run(JUnitCore.java:137)
at com.intellij.junit4.JUnit4IdeaTestRunner.startRunnerWithArgs(JUnit4IdeaTestRunner.java:69)
at com.intellij.rt.junit.IdeaTestRunner$Repeater$1.execute(IdeaTestRunner.java:38)
at com.intellij.rt.execution.junit.TestsRepeater.repeat(TestsRepeater.java:11)
at com.intellij.rt.junit.IdeaTestRunner$Repeater.startRunnerWithArgs(IdeaTestRunner.java:35)
at com.intellij.rt.junit.JUnitStarter.prepareStreamsAndStart(JUnitStarter.java:232)
at com.intellij.rt.junit.JUnitStarter.main(JUnitStarter.java:55)
Caused by: com.mysql.cj.exceptions.CJCommunicationsException: Communications link failure

The last packet sent successfully to the server was 0 milliseconds ago. The driver has not received any packets from the server.
at java.base/jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method)
at java.base/jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:62)
at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45)
at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:490)
at com.mysql.cj.exceptions.ExceptionFactory.createException(ExceptionFactory.java:61)
at com.mysql.cj.exceptions.ExceptionFactory.createException(ExceptionFactory.java:105)
at com.mysql.cj.exceptions.ExceptionFactory.createException(ExceptionFactory.java:151)
at com.mysql.cj.exceptions.ExceptionFactory.createCommunicationsException(ExceptionFactory.java:167)
at com.mysql.cj.protocol.a.NativeSocketConnection.connect(NativeSocketConnection.java:91)
at com.mysql.cj.NativeSession.connect(NativeSession.java:152)
at com.mysql.cj.jdbc.ConnectionImpl.connectOneTryOnly(ConnectionImpl.java:955)
at com.mysql.cj.jdbc.ConnectionImpl.createNewIO(ConnectionImpl.java:825)
... 127 common frames omitted
Caused by: java.net.ConnectException: Connection timed out: connect
at java.base/java.net.PlainSocketImpl.waitForConnect(Native Method)
at java.base/java.net.PlainSocketImpl.socketConnect(PlainSocketImpl.java:107)
at java.base/java.net.AbstractPlainSocketImpl.doConnect(AbstractPlainSocketImpl.java:399)
at java.base/java.net.AbstractPlainSocketImpl.connectToAddress(AbstractPlainSocketImpl.java:242)
at java.base/java.net.AbstractPlainSocketImpl.connect(AbstractPlainSocketImpl.java:224)
at java.base/java.net.SocksSocketImpl.connect(SocksSocketImpl.java:403)
at java.base/java.net.Socket.connect(Socket.java:591)
at com.mysql.cj.protocol.StandardSocketFactory.connect(StandardSocketFactory.java:155)
at com.mysql.cj.protocol.a.NativeSocketConnection.connect(NativeSocketConnection.java:65)
... 130 common frames omitted
[ERROR] 2023-08-10 15:01:16:472 [ip:] [TID: N/A] [main] [com.alibaba.druid.pool.DruidDataSource:958] [init] => {dataSource-2} init error
com.mysql.cj.jdbc.exceptions.CommunicationsException: Communications link failure

The last packet sent successfully to the server was 0 milliseconds ago. The driver has not received any packets from the server.
at com.mysql.cj.jdbc.exceptions.SQLError.createCommunicationsException(SQLError.java:174)
at com.mysql.cj.jdbc.exceptions.SQLExceptionsMapping.translateException(SQLExceptionsMapping.java:64)
at com.mysql.cj.jdbc.ConnectionImpl.createNewIO(ConnectionImpl.java:835)
at com.mysql.cj.jdbc.ConnectionImpl.<init>(ConnectionImpl.java:455)
at com.mysql.cj.jdbc.ConnectionImpl.getInstance(ConnectionImpl.java:240)
at com.mysql.cj.jdbc.NonRegisteringDriver.connect(NonRegisteringDriver.java:199)
at com.alibaba.druid.filter.FilterChainImpl.connection_connect(FilterChainImpl.java:156)
at com.alibaba.druid.filter.stat.StatFilter.connection_connect(StatFilter.java:218)
at com.alibaba.druid.filter.FilterChainImpl.connection_connect(FilterChainImpl.java:150)
at com.alibaba.druid.pool.DruidAbstractDataSource.createPhysicalConnection(DruidAbstractDataSource.java:1646)
at com.alibaba.druid.pool.DruidAbstractDataSource.createPhysicalConnection(DruidAbstractDataSource.java:1710)
at com.alibaba.druid.pool.DruidDataSource.init(DruidDataSource.java:912)
at com.baomidou.dynamic.datasource.creator.DruidDataSourceCreator.doCreateDataSource(DruidDataSourceCreator.java:83)
at com.baomidou.dynamic.datasource.creator.AbstractDataSourceCreator.createDataSource(AbstractDataSourceCreator.java:70)
at com.baomidou.dynamic.datasource.creator.DefaultDataSourceCreator.createDataSource(DefaultDataSourceCreator.java:48)
at com.baomidou.dynamic.datasource.provider.AbstractDataSourceProvider.createDataSourceMap(AbstractDataSourceProvider.java:47)
at com.baomidou.dynamic.datasource.provider.YmlDynamicDataSourceProvider.loadDataSources(YmlDynamicDataSourceProvider.java:42)
at com.baomidou.dynamic.datasource.DynamicRoutingDataSource.afterPropertiesSet(DynamicRoutingDataSource.java:219)
at org.springframework.beans.factory.support.AbstractAutowireCapableBeanFactory.invokeInitMethods(AbstractAutowireCapableBeanFactory.java:1858)
at org.springframework.beans.factory.support.AbstractAutowireCapableBeanFactory.initializeBean(AbstractAutowireCapableBeanFactory.java:1795)
at org.springframework.beans.factory.support.AbstractAutowireCapableBeanFactory.doCreateBean(AbstractAutowireCapableBeanFactory.java:594)
at org.springframework.beans.factory.support.AbstractAutowireCapableBeanFactory.createBean(AbstractAutowireCapableBeanFactory.java:516)
at org.springframework.beans.factory.support.AbstractBeanFactory.lambda$doGetBean$0(AbstractBeanFactory.java:324)
at org.springframework.beans.factory.support.DefaultSingletonBeanRegistry.getSingleton(DefaultSingletonBeanRegistry.java:234)
at org.springframework.beans.factory.support.AbstractBeanFactory.doGetBean(AbstractBeanFactory.java:322)
at org.springframework.beans.factory.support.AbstractBeanFactory.getBean(AbstractBeanFactory.java:202)
at org.springframework.beans.factory.config.DependencyDescriptor.resolveCandidate(DependencyDescriptor.java:276)
at org.springframework.beans.factory.support.DefaultListableBeanFactory.doResolveDependency(DefaultListableBeanFactory.java:1307)
at org.springframework.beans.factory.support.DefaultListableBeanFactory.resolveDependency(DefaultListableBeanFactory.java:1227)
at org.springframework.beans.factory.support.ConstructorResolver.resolveAutowiredArgument(ConstructorResolver.java:886)
at org.springframework.beans.factory.support.ConstructorResolver.createArgumentArray(ConstructorResolver.java:790)
at org.springframework.beans.factory.support.ConstructorResolver.instantiateUsingFactoryMethod(ConstructorResolver.java:540)
at org.springframework.beans.factory.support.AbstractAutowireCapableBeanFactory.instantiateUsingFactoryMethod(AbstractAutowireCapableBeanFactory.java:1341)
at org.springframework.beans.factory.support.AbstractAutowireCapableBeanFactory.createBeanInstance(AbstractAutowireCapableBeanFactory.java:1181)
at org.springframework.beans.factory.support.AbstractAutowireCapableBeanFactory.doCreateBean(AbstractAutowireCapableBeanFactory.java:556)
at org.springframework.beans.factory.support.AbstractAutowireCapableBeanFactory.createBean(AbstractAutowireCapableBeanFactory.java:516)
at org.springframework.beans.factory.support.AbstractBeanFactory.lambda$doGetBean$0(AbstractBeanFactory.java:324)
at org.springframework.beans.factory.support.DefaultSingletonBeanRegistry.getSingleton(DefaultSingletonBeanRegistry.java:234)
at org.springframework.beans.factory.support.AbstractBeanFactory.doGetBean(AbstractBeanFactory.java:322)
at org.springframework.beans.factory.support.AbstractBeanFactory.getBean(AbstractBeanFactory.java:202)
at org.springframework.beans.factory.config.DependencyDescriptor.resolveCandidate(DependencyDescriptor.java:276)
at org.springframework.beans.factory.support.DefaultListableBeanFactory.doResolveDependency(DefaultListableBeanFactory.java:1307)
at org.springframework.beans.factory.support.DefaultListableBeanFactory.resolveDependency(DefaultListableBeanFactory.java:1227)
at org.springframework.beans.factory.support.AbstractAutowireCapableBeanFactory.autowireByType(AbstractAutowireCapableBeanFactory.java:1514)
at org.springframework.beans.factory.support.AbstractAutowireCapableBeanFactory.populateBean(AbstractAutowireCapableBeanFactory.java:1409)
at org.springframework.beans.factory.support.AbstractAutowireCapableBeanFactory.doCreateBean(AbstractAutowireCapableBeanFactory.java:593)
at org.springframework.beans.factory.support.AbstractAutowireCapableBeanFactory.createBean(AbstractAutowireCapableBeanFactory.java:516)
at org.springframework.beans.factory.support.AbstractBeanFactory.lambda$doGetBean$0(AbstractBeanFactory.java:324)
at org.springframework.beans.factory.support.DefaultSingletonBeanRegistry.getSingleton(DefaultSingletonBeanRegistry.java:234)
at org.springframework.beans.factory.support.AbstractBeanFactory.doGetBean(AbstractBeanFactory.java:322)
at org.springframework.beans.factory.support.AbstractBeanFactory.getBean(AbstractBeanFactory.java:202)
at org.springframework.beans.factory.config.DependencyDescriptor.resolveCandidate(DependencyDescriptor.java:276)
at org.springframework.beans.factory.support.DefaultListableBeanFactory.doResolveDependency(DefaultListableBeanFactory.java:1307)
at org.springframework.beans.factory.support.DefaultListableBeanFactory.resolveDependency(DefaultListableBeanFactory.java:1227)
at org.springframework.beans.factory.annotation.AutowiredAnnotationBeanPostProcessor$AutowiredFieldElement.resolveFieldValue(AutowiredAnnotationBeanPostProcessor.java:657)
at org.springframework.beans.factory.annotation.AutowiredAnnotationBeanPostProcessor$AutowiredFieldElement.inject(AutowiredAnnotationBeanPostProcessor.java:640)
at org.springframework.beans.factory.annotation.InjectionMetadata.inject(InjectionMetadata.java:119)
at org.springframework.beans.factory.annotation.AutowiredAnnotationBeanPostProcessor.postProcessProperties(AutowiredAnnotationBeanPostProcessor.java:399)
at org.springframework.beans.factory.support.AbstractAutowireCapableBeanFactory.populateBean(AbstractAutowireCapableBeanFactory.java:1425)
at org.springframework.beans.factory.support.AbstractAutowireCapableBeanFactory.doCreateBean(AbstractAutowireCapableBeanFactory.java:593)
at org.springframework.beans.factory.support.AbstractAutowireCapableBeanFactory.createBean(AbstractAutowireCapableBeanFactory.java:516)
at org.springframework.beans.factory.support.AbstractBeanFactory.lambda$doGetBean$0(AbstractBeanFactory.java:324)
at org.springframework.beans.factory.support.DefaultSingletonBeanRegistry.getSingleton(DefaultSingletonBeanRegistry.java:234)
at org.springframework.beans.factory.support.AbstractBeanFactory.doGetBean(AbstractBeanFactory.java:322)
at org.springframework.beans.factory.support.AbstractBeanFactory.getBean(AbstractBeanFactory.java:202)
at org.springframework.beans.factory.config.DependencyDescriptor.resolveCandidate(DependencyDescriptor.java:276)
at org.springframework.beans.factory.support.DefaultListableBeanFactory.doResolveDependency(DefaultListableBeanFactory.java:1307)
at org.springframework.beans.factory.support.DefaultListableBeanFactory.resolveDependency(DefaultListableBeanFactory.java:1227)
at org.springframework.beans.factory.annotation.AutowiredAnnotationBeanPostProcessor$AutowiredFieldElement.resolveFieldValue(AutowiredAnnotationBeanPostProcessor.java:657)
at org.springframework.beans.factory.annotation.AutowiredAnnotationBeanPostProcessor$AutowiredFieldElement.inject(AutowiredAnnotationBeanPostProcessor.java:640)
at org.springframework.beans.factory.annotation.InjectionMetadata.inject(InjectionMetadata.java:119)
at org.springframework.beans.factory.annotation.AutowiredAnnotationBeanPostProcessor.postProcessProperties(AutowiredAnnotationBeanPostProcessor.java:399)
at org.springframework.beans.factory.support.AbstractAutowireCapableBeanFactory.populateBean(AbstractAutowireCapableBeanFactory.java:1425)
at org.springframework.beans.factory.support.AbstractAutowireCapableBeanFactory.doCreateBean(AbstractAutowireCapableBeanFactory.java:593)
at org.springframework.beans.factory.support.AbstractAutowireCapableBeanFactory.createBean(AbstractAutowireCapableBeanFactory.java:516)
at org.springframework.beans.factory.support.AbstractBeanFactory.lambda$doGetBean$0(AbstractBeanFactory.java:324)
at org.springframework.beans.factory.support.DefaultSingletonBeanRegistry.getSingleton(DefaultSingletonBeanRegistry.java:234)
at org.springframework.beans.factory.support.AbstractBeanFactory.doGetBean(AbstractBeanFactory.java:322)
at org.springframework.beans.factory.support.AbstractBeanFactory.getBean(AbstractBeanFactory.java:202)
at org.springframework.beans.factory.config.DependencyDescriptor.resolveCandidate(DependencyDescriptor.java:276)
at org.springframework.beans.factory.support.DefaultListableBeanFactory.doResolveDependency(DefaultListableBeanFactory.java:1307)
at org.springframework.beans.factory.support.DefaultListableBeanFactory.resolveDependency(DefaultListableBeanFactory.java:1227)
at org.springframework.beans.factory.annotation.AutowiredAnnotationBeanPostProcessor$AutowiredFieldElement.resolveFieldValue(AutowiredAnnotationBeanPostProcessor.java:657)
at org.springframework.beans.factory.annotation.AutowiredAnnotationBeanPostProcessor$AutowiredFieldElement.inject(AutowiredAnnotationBeanPostProcessor.java:640)
at org.springframework.beans.factory.annotation.InjectionMetadata.inject(InjectionMetadata.java:119)
at org.springframework.beans.factory.annotation.AutowiredAnnotationBeanPostProcessor.postProcessProperties(AutowiredAnnotationBeanPostProcessor.java:399)
at org.springframework.beans.factory.support.AbstractAutowireCapableBeanFactory.populateBean(AbstractAutowireCapableBeanFactory.java:1425)
at org.springframework.beans.factory.support.AbstractAutowireCapableBeanFactory.doCreateBean(AbstractAutowireCapableBeanFactory.java:593)
at org.springframework.beans.factory.support.AbstractAutowireCapableBeanFactory.createBean(AbstractAutowireCapableBeanFactory.java:516)
at org.springframework.beans.factory.support.AbstractBeanFactory.lambda$doGetBean$0(AbstractBeanFactory.java:324)
at org.springframework.beans.factory.support.DefaultSingletonBeanRegistry.getSingleton(DefaultSingletonBeanRegistry.java:234)
at org.springframework.beans.factory.support.AbstractBeanFactory.doGetBean(AbstractBeanFactory.java:322)
at org.springframework.beans.factory.support.AbstractBeanFactory.getBean(AbstractBeanFactory.java:202)
at org.springframework.beans.factory.support.DefaultListableBeanFactory.preInstantiateSingletons(DefaultListableBeanFactory.java:897)
at org.springframework.context.support.AbstractApplicationContext.finishBeanFactoryInitialization(AbstractApplicationContext.java:879)
at org.springframework.context.support.AbstractApplicationContext.refresh(AbstractApplicationContext.java:551)
at org.springframework.boot.SpringApplication.refresh(SpringApplication.java:755)
at org.springframework.boot.SpringApplication.refresh(SpringApplication.java:747)
at org.springframework.boot.SpringApplication.refreshContext(SpringApplication.java:402)
at org.springframework.boot.SpringApplication.run(SpringApplication.java:312)
at org.springframework.boot.test.context.SpringBootContextLoader.loadContext(SpringBootContextLoader.java:120)
at org.springframework.test.context.cache.DefaultCacheAwareContextLoaderDelegate.loadContextInternal(DefaultCacheAwareContextLoaderDelegate.java:99)
at org.springframework.test.context.cache.DefaultCacheAwareContextLoaderDelegate.loadContext(DefaultCacheAwareContextLoaderDelegate.java:124)
at org.springframework.test.context.support.DefaultTestContext.getApplicationContext(DefaultTestContext.java:123)
at org.springframework.test.context.web.ServletTestExecutionListener.setUpRequestContextIfNecessary(ServletTestExecutionListener.java:190)
at org.springframework.test.context.web.ServletTestExecutionListener.prepareTestInstance(ServletTestExecutionListener.java:132)
at org.springframework.test.context.TestContextManager.prepareTestInstance(TestContextManager.java:244)
at org.springframework.test.context.junit4.SpringJUnit4ClassRunner.createTest(SpringJUnit4ClassRunner.java:227)
at org.springframework.test.context.junit4.SpringJUnit4ClassRunner$1.runReflectiveCall(SpringJUnit4ClassRunner.java:289)
at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12)
at org.springframework.test.context.junit4.SpringJUnit4ClassRunner.methodBlock(SpringJUnit4ClassRunner.java:291)
at org.springframework.test.context.junit4.SpringJUnit4ClassRunner.runChild(SpringJUnit4ClassRunner.java:246)
at org.springframework.test.context.junit4.SpringJUnit4ClassRunner.runChild(SpringJUnit4ClassRunner.java:97)
at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331)
at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79)
at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329)
at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66)
at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293)
at org.springframework.test.context.junit4.statements.RunBeforeTestClassCallbacks.evaluate(RunBeforeTestClassCallbacks.java:61)
at org.springframework.test.context.junit4.statements.RunAfterTestClassCallbacks.evaluate(RunAfterTestClassCallbacks.java:70)
at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306)
at org.junit.runners.ParentRunner.run(ParentRunner.java:413)
at org.springframework.test.context.junit4.SpringJUnit4ClassRunner.run(SpringJUnit4ClassRunner.java:190)
at org.junit.runner.JUnitCore.run(JUnitCore.java:137)
at com.intellij.junit4.JUnit4IdeaTestRunner.startRunnerWithArgs(JUnit4IdeaTestRunner.java:69)
at com.intellij.rt.junit.IdeaTestRunner$Repeater$1.execute(IdeaTestRunner.java:38)
at com.intellij.rt.execution.junit.TestsRepeater.repeat(TestsRepeater.java:11)
at com.intellij.rt.junit.IdeaTestRunner$Repeater.startRunnerWithArgs(IdeaTestRunner.java:35)
at com.intellij.rt.junit.JUnitStarter.prepareStreamsAndStart(JUnitStarter.java:232)
at com.intellij.rt.junit.JUnitStarter.main(JUnitStarter.java:55)
Caused by: com.mysql.cj.exceptions.CJCommunicationsException: Communications link failure

The last packet sent successfully to the server was 0 milliseconds ago. The driver has not received any packets from the server.
at java.base/jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method)
at java.base/jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:62)
at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45)
at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:490)
at com.mysql.cj.exceptions.ExceptionFactory.createException(ExceptionFactory.java:61)
at com.mysql.cj.exceptions.ExceptionFactory.createException(ExceptionFactory.java:105)
at com.mysql.cj.exceptions.ExceptionFactory.createException(ExceptionFactory.java:151)
at com.mysql.cj.exceptions.ExceptionFactory.createCommunicationsException(ExceptionFactory.java:167)
at com.mysql.cj.protocol.a.NativeSocketConnection.connect(NativeSocketConnection.java:91)
at com.mysql.cj.NativeSession.connect(NativeSession.java:152)
at com.mysql.cj.jdbc.ConnectionImpl.connectOneTryOnly(ConnectionImpl.java:955)
at com.mysql.cj.jdbc.ConnectionImpl.createNewIO(ConnectionImpl.java:825)
... 127 common frames omitted
Caused by: java.net.ConnectException: Connection timed out: connect
at java.base/java.net.PlainSocketImpl.waitForConnect(Native Method)
at java.base/java.net.PlainSocketImpl.socketConnect(PlainSocketImpl.java:107)
at java.base/java.net.AbstractPlainSocketImpl.doConnect(AbstractPlainSocketImpl.java:399)
at java.base/java.net.AbstractPlainSocketImpl.connectToAddress(AbstractPlainSocketImpl.java:242)
at java.base/java.net.AbstractPlainSocketImpl.connect(AbstractPlainSocketImpl.java:224)
at java.base/java.net.SocksSocketImpl.connect(SocksSocketImpl.java:403)
at java.base/java.net.Socket.connect(Socket.java:591)
at com.mysql.cj.protocol.StandardSocketFactory.connect(StandardSocketFactory.java:155)
at com.mysql.cj.protocol.a.NativeSocketConnection.connect(NativeSocketConnection.java:65)
... 130 common frames omitted
+ +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/2023/08/15/java-juc/index.html b/2023/08/15/java-juc/index.html new file mode 100644 index 0000000000..f8feb5c311 --- /dev/null +++ b/2023/08/15/java-juc/index.html @@ -0,0 +1,504 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + java juc | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ java juc +

+ + +
+ + + + +
+ + +

背景

了解java容器类

+

juc主要类

Collection 接口:

+ + + + + + + + + + + + + + + + + + + +
描述
List列表
Queue队列
Set集合
+

Map接口:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + +
描述线程安全是否可以由null值
HashMaphanshmap
PriorityQueue优先队列,implement Queue否,因为要排序,null会抛异常
HashSet一个set的实现This class permits the null element.
+

aqs

aqs 主要是提供了三个property:

+
    +
  • state: int 表示资源 ,维护一个volatile 的int
  • +
  • queue: 来block队列,一个普通队列,用来塞线程Thread这个变量
  • +
  • block: 获取不到资源就阻塞 , 使用park和unpark, jni实现
  • +
+

block

park 和unpark 使用的是pthread_cond_wait 和pthread_cond_notify

+

park 源码分析

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
// Parker::park decrements count if > 0, else does a condvar wait.  Unpark
// sets count to 1 and signals condvar. Only one thread ever waits
// on the condvar. Contention seen when trying to park implies that someone
// is unparking you, so don't wait. And spurious returns are fine, so there
// is no need to track notifications.

void Parker::park(bool isAbsolute, jlong time) {

// Optional fast-path check:
// Return immediately if a permit is available.
// We depend on Atomic::xchg() having full barrier semantics
// since we are doing a lock-free update to _counter.
if (Atomic::xchg(&_counter, 0) > 0) return;

JavaThread *jt = JavaThread::current();

// Optional optimization -- avoid state transitions if there's
// an interrupt pending.
if (jt->is_interrupted(false)) {
return;
}

// Next, demultiplex/decode time arguments
struct timespec absTime;
if (time < 0 || (isAbsolute && time == 0)) { // don't wait at all
return;
}
if (time > 0) {
to_abstime(&absTime, time, isAbsolute, false);
}

// Enter safepoint region
// Beware of deadlocks such as 6317397.
// The per-thread Parker:: mutex is a classic leaf-lock.
// In particular a thread must never block on the Threads_lock while
// holding the Parker:: mutex. If safepoints are pending both the
// the ThreadBlockInVM() CTOR and DTOR may grab Threads_lock.
ThreadBlockInVM tbivm(jt);

// Can't access interrupt state now that we are _thread_blocked. If we've
// been interrupted since we checked above then _counter will be > 0.

// Don't wait if cannot get lock since interference arises from
// unparking.
if (pthread_mutex_trylock(_mutex) != 0) {
return;
}

int status;
if (_counter > 0) { // no wait needed
_counter = 0;
status = pthread_mutex_unlock(_mutex);
assert_status(status == 0, status, "invariant");
// Paranoia to ensure our locked and lock-free paths interact
// correctly with each other and Java-level accesses.
OrderAccess::fence();
return;
}

OSThreadWaitState osts(jt->osthread(), false /* not Object.wait() */);

assert(_cur_index == -1, "invariant");
if (time == 0) {
_cur_index = REL_INDEX; // arbitrary choice when not timed
status = pthread_cond_wait(&_cond[_cur_index], _mutex);
assert_status(status == 0 MACOS_ONLY(|| status == ETIMEDOUT),
status, "cond_wait");
}
else {
_cur_index = isAbsolute ? ABS_INDEX : REL_INDEX;
status = pthread_cond_timedwait(&_cond[_cur_index], _mutex, &absTime);
assert_status(status == 0 || status == ETIMEDOUT,
status, "cond_timedwait");
}
_cur_index = -1;

_counter = 0;
status = pthread_mutex_unlock(_mutex);
assert_status(status == 0, status, "invariant");
// Paranoia to ensure our locked and lock-free paths interact
// correctly with each other and Java-level accesses.
OrderAccess::fence();
}
+ +

相关阅读

+ +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/2023/08/15/mybatisplus-Column-status-cannot-be-null/index.html b/2023/08/15/mybatisplus-Column-status-cannot-be-null/index.html new file mode 100644 index 0000000000..af16b5f8cd --- /dev/null +++ b/2023/08/15/mybatisplus-Column-status-cannot-be-null/index.html @@ -0,0 +1,455 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + mybatisplus Column 'xxx' cannot be null | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ mybatisplus Column 'xxx' cannot be null +

+ + +
+ + + + +
+ + +

背景

有下面的表:
crated_at是自动由mysql填充的,但是使用mybatisplus的BatchSave的时候,发现
Column 'created_at' cannot be null

+
1
2
3
4
5
6
7
8

CREATE TABLE `table` (
`id` int(11) unsigned NOT NULL AUTO_INCREMENT COMMENT '自增id',
`status` VARCHAR(100) NOT NULL AUTO_INCREMENT COMMENT '状态',
`created_at` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
`updated_at` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT '更新时间',
PRIMARY KEY (`id`)
) ENGINE=InnoDB AUTO_INCREMENT=2 DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci
+ +

mybatisplus生成的sql大概是把created_at 也作为sql字段然后拼入

+
1
insert into table  (status , created_at,updated_at) values (1 , null ,null) 
+

也就是没有过滤掉空值

+

排查原因

一开始找这个文档相关文档
发现加了下面注解
@TableField(insertStrategy = FieldStrategy.IGNORED)

+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
@Data
@TableName("table")
public class TableDO {
/**
* 自增id
*/
@TableId(value = "id", type = IdType.AUTO)
private Integer id;

/**
* 状态 , WRITE_TO_DB RESUME_FROM_DB
*/
private String status;

/**
* 延迟发送时间
*/
private Date delayAt;

/**
* 创建时间
*/
@TableField(insertStrategy = FieldStrategy.IGNORED) // 没有生效
private Date createdAt;

/**
* 更新时间
*/

private Date updatedAt;


}

+

发现没有生效 , 然后一直改@TableName , 连这个注解也没有生效,所以发现是整个mybatisplus的注解都没有生效

+

排查

根据上面现象,再继续排查,发现是idea自动生成的xml优先级更高,有resultMap ,把整个xml清空之后,整个insert正常了,最后sql变成了

+
1
insert into table  (status ) values (1 ) 
+ +

相关阅读

+ +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/2023/08/16/WARNING-An-illegal-reflective-access-operation-has-occurred-groovy/index.html b/2023/08/16/WARNING-An-illegal-reflective-access-operation-has-occurred-groovy/index.html new file mode 100644 index 0000000000..b1f23d1707 --- /dev/null +++ b/2023/08/16/WARNING-An-illegal-reflective-access-operation-has-occurred-groovy/index.html @@ -0,0 +1,445 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + WARNING: An illegal reflective access operation has occurred groovy | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ WARNING: An illegal reflective access operation has occurred groovy +

+ + +
+ + + + +
+ + +

背景

jdk11 的时候,会抛出如下的报错:

+
1
2
3
4
5
WARNING: An illegal reflective access operation has occurred
WARNING: Illegal reflective access by org.codehaus.groovy.reflection.CachedClass (file:/D:/packageFile/org/codehaus/groovy/groovy/2.5.14/groovy-2.5.14.jar) to method java.lang.Object.finalize()
WARNING: Please consider reporting this to the maintainers of org.codehaus.groovy.reflection.CachedClass
WARNING: Use --illegal-access=warn to enable warnings of further illegal reflective access operations
WARNING: All illegal access operations will be denied in a future release
+ + +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git "a/2023/08/16/antlr-\344\275\277\347\224\250/index.html" "b/2023/08/16/antlr-\344\275\277\347\224\250/index.html" new file mode 100644 index 0000000000..8551be96cd --- /dev/null +++ "b/2023/08/16/antlr-\344\275\277\347\224\250/index.html" @@ -0,0 +1,451 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + antlr 使用 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ antlr 使用 +

+ + +
+ + + + +
+ + +

背景

ANTLR 是一个lex/parser 工具,类似与c的bison/yacc

+

语法

+

Token names always start with a capital letter and so do lexer rules as defined by Java’s Character.isUpperCase method. Parser rule names always start with a lowercase letter (those that fail Character.isUpperCase). The initial character can be followed by uppercase and lowercase letters, digits, and underscores. Here are some sample names:

+
+
1
2
ID, LPAREN, RIGHT_CURLY // token names/lexer rules
expr, simpleDeclarator, d2, header_file // parser rule names
+ +

大写字母开头的都是token name 或者lexer rurles name
消息的都是parser rule name

+

相关阅读

+ +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/2023/08/24/java-main/index.html b/2023/08/24/java-main/index.html new file mode 100644 index 0000000000..1e242c8cce --- /dev/null +++ b/2023/08/24/java-main/index.html @@ -0,0 +1,482 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + java main | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ java main +

+ + +
+ + + + +
+ + +

main 函数介绍

java的main函数在入口函数
一般都是这个签名

+
1
2
3
public static void main(String[] argv){

}
+ +

那么这个main函数是怎么加载的呢?

+

调用时机:

jni_invoke_static

+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
(gdb) p method._value->print()
{method}
- this oop: 0x00007fffb44112d8
- method holder: 'Hello'
- constants: 0x00007fffb4411030 constant pool [34] {0x00007fffb4411030} for 'Hello' cache=0x00007fffb44113e0
- access: 0x9 public static
- name: 'main'
- signature: '([Ljava/lang/String;)V'
- max stack: 3
- max locals: 1
- size of params: 1
- method size: 13
- vtable index: -2
- i2i entry: 0x00007fffe100dc00 /////////// entity_point
- adapters: AHE@0x00007ffff01015d0: 0xb i2c: 0x00007fffe1114d60 c2i: 0x00007fffe1114e1a c2iUV: 0x00007fffe1114de4 c2iNCI: 0x00007fffe1114e57
- compiled entry 0x00007fffe1114e1a
- code size: 13
- code start: 0x00007fffb44112c0 // 这里是bytecode 的起点
- code end (excl): 0x00007fffb44112cd // 这是bytecode的终点
- checked ex length: 0
- linenumber start: 0x00007fffb44112cd
- localvar length: 0
$7 = void

+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
(gdb) info registers 
rax 0x7ffff59fe940 140737314285888
rbx 0x7fffe1000c9e 140736968264862
rcx 0x7fffb44112d8 140736217551576
rdx 0xa 10
rsi 0x7ffff59febf8 140737314286584
rdi 0x7ffff59fe940 140737314285888
rbp 0x7ffff59fe870 0x7ffff59fe870
rsp 0x7ffff59fe810 0x7ffff59fe810
r8 0x7fffe100dc00 140736968317952 // 这里就是入口点
r9 0x7ffff59feaf0 140737314286320
r10 0x7ffff053ae20 140737225403936
r11 0x7ffff0000090 140737219920016
r12 0x1 1
r13 0x0 0
r14 0x7ffff7c94850 140737350551632
r15 0x7fffffffa800 140737488332800
rip 0x7fffe1000ca6 0x7fffe1000ca6
eflags 0x202 [ IF ]
cs 0x33 51
ss 0x2b 43
ds 0x0 0
es 0x0 0
fs 0x0 0
gs 0x0 0
+ +
1
2
3
4
5
6
7
8
9
10
11
$2 = void
(gdb) where
#0 JavaCalls::call_helper (result=0x7ffff7bfec10, method=..., args=0x7ffff7bfeb30, __the_thread__=0x7ffff00295a0) at /home/ubuntu/jdk/src/hotspot/share/runtime/javaCalls.cpp:333
#1 0x00007ffff6799785 in jni_invoke_static (result=result@entry=0x7ffff7bfec10, method_id=method_id@entry=0x7ffff02c84d0, args=args@entry=0x7ffff7bfec80, __the_thread__=__the_thread__@entry=0x7ffff00295a0, env=0x7ffff00298d0,
call_type=JNI_STATIC, receiver=0x0) at /home/ubuntu/jdk/src/hotspot/share/prims/jni.cpp:889
#2 0x00007ffff679cd19 in jni_CallStaticVoidMethod (env=0x7ffff00298d0, cls=<optimized out>, methodID=0x7ffff02c84d0) at /home/ubuntu/jdk/src/hotspot/share/prims/jni.cpp:1713
#3 0x00007ffff7fadcb5 in JavaMain (_args=<optimized out>) at /home/ubuntu/jdk/src/java.base/share/native/libjli/java.c:547
#4 0x00007ffff7fb0f4d in ThreadJavaMain (args=<optimized out>) at /home/ubuntu/jdk/src/java.base/unix/native/libjli/java_md.c:651
#5 0x00007ffff7c94b43 in start_thread (arg=<optimized out>) at ./nptl/pthread_create.c:442
#6 0x00007ffff7d26a00 in clone3 () at ../sysdeps/unix/sysv/linux/x86_64/clone3.S:81
(gdb
+ + +

c++ 调用java static method方法

调用generate_call_stub ,这是入口点,这个会调用method的entry_point

+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
address StubGenerator::generate_call_stub(address& return_address) {

assert((int)frame::entry_frame_after_call_words == -(int)rsp_after_call_off + 1 &&
(int)frame::entry_frame_call_wrapper_offset == (int)call_wrapper_off,
"adjust this code");
StubCodeMark mark(this, "StubRoutines", "call_stub");
address start = __ pc();

// same as in generate_catch_exception()!
const Address rsp_after_call(rbp, rsp_after_call_off * wordSize);

const Address call_wrapper (rbp, call_wrapper_off * wordSize);
const Address result (rbp, result_off * wordSize);
const Address result_type (rbp, result_type_off * wordSize);
const Address method (rbp, method_off * wordSize);
const Address entry_point (rbp, entry_point_off * wordSize);
const Address parameters (rbp, parameters_off * wordSize);
const Address parameter_size(rbp, parameter_size_off * wordSize);

// same as in generate_catch_exception()!
const Address thread (rbp, thread_off * wordSize);

const Address r15_save(rbp, r15_off * wordSize);
const Address r14_save(rbp, r14_off * wordSize);
const Address r13_save(rbp, r13_off * wordSize);
const Address r12_save(rbp, r12_off * wordSize);
const Address rbx_save(rbp, rbx_off * wordSize);

// stub code
__ enter();
__ subptr(rsp, -rsp_after_call_off * wordSize);

// save register parameters
#ifndef _WIN64
__ movptr(parameters, c_rarg5); // parameters
__ movptr(entry_point, c_rarg4); // entry_point
#endif

__ movptr(method, c_rarg3); // method
__ movl(result_type, c_rarg2); // result type
__ movptr(result, c_rarg1); // result
__ movptr(call_wrapper, c_rarg0); // call wrapper

// save regs belonging to calling function
__ movptr(rbx_save, rbx);
__ movptr(r12_save, r12);
__ movptr(r13_save, r13);
__ movptr(r14_save, r14);
__ movptr(r15_save, r15);

#ifdef _WIN64
int last_reg = 15;
if (UseAVX > 2) {
last_reg = 31;
}
if (VM_Version::supports_evex()) {
for (int i = xmm_save_first; i <= last_reg; i++) {
__ vextractf32x4(xmm_save(i), as_XMMRegister(i), 0);
}
} else {
for (int i = xmm_save_first; i <= last_reg; i++) {
__ movdqu(xmm_save(i), as_XMMRegister(i));
}
}

const Address rdi_save(rbp, rdi_off * wordSize);
const Address rsi_save(rbp, rsi_off * wordSize);

__ movptr(rsi_save, rsi);
__ movptr(rdi_save, rdi);
#else
const Address mxcsr_save(rbp, mxcsr_off * wordSize);
{
Label skip_ldmx;
__ stmxcsr(mxcsr_save);
__ movl(rax, mxcsr_save);
__ andl(rax, 0xFFC0); // Mask out any pending exceptions (only check control and mask bits)
ExternalAddress mxcsr_std(StubRoutines::x86::addr_mxcsr_std());
__ cmp32(rax, mxcsr_std, rscratch1);
__ jcc(Assembler::equal, skip_ldmx);
__ ldmxcsr(mxcsr_std, rscratch1);
__ bind(skip_ldmx);
}
#endif

// Load up thread register
__ movptr(r15_thread, thread);
__ reinit_heapbase();

#ifdef ASSERT
// make sure we have no pending exceptions
{
Label L;
__ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), NULL_WORD);
__ jcc(Assembler::equal, L);
__ stop("StubRoutines::call_stub: entered with pending exception");
__ bind(L);
}
#endif

// pass parameters if any
BLOCK_COMMENT("pass parameters if any");
Label parameters_done;
__ movl(c_rarg3, parameter_size);
__ testl(c_rarg3, c_rarg3);
__ jcc(Assembler::zero, parameters_done);

Label loop;
__ movptr(c_rarg2, parameters); // parameter pointer
__ movl(c_rarg1, c_rarg3); // parameter counter is in c_rarg1
__ BIND(loop);
__ movptr(rax, Address(c_rarg2, 0));// get parameter
__ addptr(c_rarg2, wordSize); // advance to next parameter
__ decrementl(c_rarg1); // decrement counter
__ push(rax); // pass parameter
__ jcc(Assembler::notZero, loop);

// call Java function
__ BIND(parameters_done);
__ movptr(rbx, method); // get Method*
__ movptr(c_rarg1, entry_point); // get entry_point
__ mov(r13, rsp); // set sender sp
BLOCK_COMMENT("call Java function");
__ call(c_rarg1);

BLOCK_COMMENT("call_stub_return_address:");
return_address = __ pc();

// store result depending on type (everything that is not
// T_OBJECT, T_LONG, T_FLOAT or T_DOUBLE is treated as T_INT)
__ movptr(c_rarg0, result);
Label is_long, is_float, is_double, exit;
__ movl(c_rarg1, result_type);
__ cmpl(c_rarg1, T_OBJECT);
__ jcc(Assembler::equal, is_long);
__ cmpl(c_rarg1, T_LONG);
__ jcc(Assembler::equal, is_long);
__ cmpl(c_rarg1, T_FLOAT);
__ jcc(Assembler::equal, is_float);
__ cmpl(c_rarg1, T_DOUBLE);
__ jcc(Assembler::equal, is_double);

// handle T_INT case
__ movl(Address(c_rarg0, 0), rax);

__ BIND(exit);

// pop parameters
__ lea(rsp, rsp_after_call);

#ifdef ASSERT
// verify that threads correspond
{
Label L1, L2, L3;
__ cmpptr(r15_thread, thread);
__ jcc(Assembler::equal, L1);
__ stop("StubRoutines::call_stub: r15_thread is corrupted");
__ bind(L1);
__ get_thread(rbx);
__ cmpptr(r15_thread, thread);
__ jcc(Assembler::equal, L2);
__ stop("StubRoutines::call_stub: r15_thread is modified by call");
__ bind(L2);
__ cmpptr(r15_thread, rbx);
__ jcc(Assembler::equal, L3);
__ stop("StubRoutines::call_stub: threads must correspond");
__ bind(L3);
}
#endif

__ pop_cont_fastpath();

// restore regs belonging to calling function
#ifdef _WIN64
// emit the restores for xmm regs
if (VM_Version::supports_evex()) {
for (int i = xmm_save_first; i <= last_reg; i++) {
__ vinsertf32x4(as_XMMRegister(i), as_XMMRegister(i), xmm_save(i), 0);
}
} else {
for (int i = xmm_save_first; i <= last_reg; i++) {
__ movdqu(as_XMMRegister(i), xmm_save(i));
}
}
#endif
__ movptr(r15, r15_save);
__ movptr(r14, r14_save);
__ movptr(r13, r13_save);
__ movptr(r12, r12_save);
__ movptr(rbx, rbx_save);

#ifdef _WIN64
__ movptr(rdi, rdi_save);
__ movptr(rsi, rsi_save);
#else
__ ldmxcsr(mxcsr_save);
#endif

// restore rsp
__ addptr(rsp, -rsp_after_call_off * wordSize);

// return
__ vzeroupper();
__ pop(rbp);
__ ret(0);

// handle return types different from T_INT
__ BIND(is_long);
__ movq(Address(c_rarg0, 0), rax);
__ jmp(exit);

__ BIND(is_float);
__ movflt(Address(c_rarg0, 0), xmm0);
__ jmp(exit);

__ BIND(is_double);
__ movdbl(Address(c_rarg0, 0), xmm0);
__ jmp(exit);

return start;
}
+

如何列出汇编代码

1
2
// 列出从0x7fffe1000ca6 开始的100 个汇编指令
x/100i 0x7fffe1000ca6
+ +

如何用gdb断点地址

1
2
(gdb) b *0x7fffe1000ca6

+ + +

方法入口点

1
2
3
JavaCalls::call_helper
-----> address entry_point = method->from_interpreted_entry();
---------> Atomic::load_acquire(&_from_interpreted_entry)
+ +

执行方法的时候会调用 _from_interpreted_entry生成对应的栈以及上下文,其中寄存器r13会指向下一个bytecode ,
然后通过r13读取下一个bytecode的例程,并执行对应例程

+

那么_from_interpreted_entry 是从哪里可以设置的?
link_method会设置

+
1
2
3
4
5
6
void Method::link_method(const methodHandle& h_method, TRAPS) {
...
address entry = Interpreter::entry_for_method(h_method);
set_interpreter_entry(entry);
...
}
+

这里的Interpreter::entry_for_method(h_method)是下面这个数组:

+
1
AbstractInterpreter::_entry_table  
+ +

那么_entry_table是在哪里设置呢?

+

在下面

+
1
2
3
4
5
6
7
8
9
10
void TemplateInterpreterGenerator::generate_all(){

#define method_entry(kind) \
{ CodeletMark cm(_masm, "method entry point (kind = " #kind ")"); \
Interpreter::_entry_table[Interpreter::kind] = generate_method_entry(Interpreter::kind); \
}

// all non-native method kinds
method_entry(zerolocals) // 就是这里会设置AbstractInterpreter::_entry_table[Interpreter::zerolocals] = generate_method_entry(Interpreter::zerolocals)
}
+ +

这里生成的例程就是包括方法帧

+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
address TemplateInterpreterGenerator::generate_normal_entry(bool synchronized) {
// determine code generation flags
bool inc_counter = UseCompiler || CountCompiledCalls || LogTouchedMethods;

// ebx: Method*
// rbcp: sender sp
address entry_point = __ pc();

const Address constMethod(rbx, Method::const_offset());
const Address access_flags(rbx, Method::access_flags_offset());
const Address size_of_parameters(rdx,
ConstMethod::size_of_parameters_offset());
const Address size_of_locals(rdx, ConstMethod::size_of_locals_offset());


// get parameter size (always needed)
__ movptr(rdx, constMethod);
__ load_unsigned_short(rcx, size_of_parameters);

// rbx: Method*
// rcx: size of parameters
// rbcp: sender_sp (could differ from sp+wordSize if we were called via c2i )

__ load_unsigned_short(rdx, size_of_locals); // get size of locals in words
__ subl(rdx, rcx); // rdx = no. of additional locals

// YYY
// __ incrementl(rdx);
// __ andl(rdx, -2);

// see if we've got enough room on the stack for locals plus overhead.
generate_stack_overflow_check();

// get return address
__ pop(rax);

// compute beginning of parameters
__ lea(rlocals, Address(rsp, rcx, Interpreter::stackElementScale(), -wordSize));

// rdx - # of additional locals
// allocate space for locals
// explicitly initialize locals
{
Label exit, loop;
__ testl(rdx, rdx);
__ jcc(Assembler::lessEqual, exit); // do nothing if rdx <= 0
__ bind(loop);
__ push((int) NULL_WORD); // initialize local variables
__ decrementl(rdx); // until everything initialized
__ jcc(Assembler::greater, loop);
__ bind(exit);
}

// initialize fixed part of activation frame
generate_fixed_frame(false);

// make sure method is not native & not abstract
#ifdef ASSERT
__ movl(rax, access_flags);
{
Label L;
__ testl(rax, JVM_ACC_NATIVE);
__ jcc(Assembler::zero, L);
__ stop("tried to execute native method as non-native");
__ bind(L);
}
{
Label L;
__ testl(rax, JVM_ACC_ABSTRACT);
__ jcc(Assembler::zero, L);
__ stop("tried to execute abstract method in interpreter");
__ bind(L);
}
#endif

// Since at this point in the method invocation the exception
// handler would try to exit the monitor of synchronized methods
// which hasn't been entered yet, we set the thread local variable
// _do_not_unlock_if_synchronized to true. The remove_activation
// will check this flag.

const Register thread = NOT_LP64(rax) LP64_ONLY(r15_thread);
NOT_LP64(__ get_thread(thread));
const Address do_not_unlock_if_synchronized(thread,
in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()));
__ movbool(do_not_unlock_if_synchronized, true);

__ profile_parameters_type(rax, rcx, rdx);
// increment invocation count & check for overflow
Label invocation_counter_overflow;
if (inc_counter) {
generate_counter_incr(&invocation_counter_overflow);
}

Label continue_after_compile;
__ bind(continue_after_compile);

// check for synchronized interpreted methods
bang_stack_shadow_pages(false);

// reset the _do_not_unlock_if_synchronized flag
NOT_LP64(__ get_thread(thread));
__ movbool(do_not_unlock_if_synchronized, false);

// check for synchronized methods
// Must happen AFTER invocation_counter check and stack overflow check,
// so method is not locked if overflows.
if (synchronized) {
// Allocate monitor and lock method
lock_method();
} else {
// no synchronization necessary
#ifdef ASSERT
{
Label L;
__ movl(rax, access_flags);
__ testl(rax, JVM_ACC_SYNCHRONIZED);
__ jcc(Assembler::zero, L);
__ stop("method needs synchronization");
__ bind(L);
}
#endif
}

// start execution
#ifdef ASSERT
{
Label L;
const Address monitor_block_top (rbp,
frame::interpreter_frame_monitor_block_top_offset * wordSize);
__ movptr(rax, monitor_block_top);
__ cmpptr(rax, rsp);
__ jcc(Assembler::equal, L);
__ stop("broken stack frame setup in interpreter");
__ bind(L);
}
#endif

// jvmti support
__ notify_method_entry();

__ dispatch_next(vtos); //////// 生成方法帧和上下文执行 , 执行下一个bytecode

// invocation counter overflow
if (inc_counter) {
// Handle overflow of counter and compile method
__ bind(invocation_counter_overflow);
generate_counter_overflow(continue_after_compile);
}

return entry_point;
}
+ + + + +

相关阅读

+ +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/2023/08/25/Hydration-completed-but-contains-mismatches/index.html b/2023/08/25/Hydration-completed-but-contains-mismatches/index.html new file mode 100644 index 0000000000..0dedd83caf --- /dev/null +++ b/2023/08/25/Hydration-completed-but-contains-mismatches/index.html @@ -0,0 +1,444 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Hydration completed but contains mismatches | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ Hydration completed but contains mismatches +

+ + +
+ + + + +
+ + +

背景

使用cloudflare 搭建了一个vitepress 的静态页面,页面重复了,然后chrome devtool有下面的错误Hydration completed but contains mismatches

+

解决方式

Auto Minify 里面去掉css/js/html 勾选

+ +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/2023/08/25/java-unbox/index.html b/2023/08/25/java-unbox/index.html new file mode 100644 index 0000000000..6600774610 --- /dev/null +++ b/2023/08/25/java-unbox/index.html @@ -0,0 +1,454 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + java unbox | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ java unbox +

+ + +
+ + + + +
+ + +

背景

想比较两个数字的时候:Long的equal比较的值,而不是对象的地址

+
1
2
3
4
5
6
7
public boolean equals(Object obj) {
if (obj instanceof Long) {
return this.value == (Long)obj;
} else {
return false;
}
}
+ +

为什么?

+

因为涉及到binary numeric promotion

+

jls 文档

相关文档

+
1
2
3
4
5
6
// "==" 操作符 的jls 文档

15.21.1 Numerical Equality Operators == and !=
If the operands of an equality operator are both of numeric type, or one is of
numeric type and the other is convertible (§5.1.8) to numeric type, binary numeric
promotion is performed on the operands (§5.6.2).
+ +
1
2
3
4
5
6
7
// Binary Numeric Promotion 文档

5.6.2 Binary Numeric Promotion
When an operator applies binary numeric promotion to a pair of operands, each
of which must denote a value that is convertible to a numeric type, the following
rules apply, in order:
1. If any operand is of a reference type, it is subjected to unboxing conversion
+ +
1
2
3
4
5
6
// == 会触发 Binary Numeric Promotion
Binary numeric promotion is performed on the operands of certain operators:
• The multiplicative operators *, /, and % (§15.17)
• The addition and subtraction operators for numeric types + and - (§15.18.2)
• The numerical comparison operators <, <=, >, and >= (§15.20.1)
• The numerical equality operators == and != (§15.21.1)
+ +

所以

+
1
2
3
4
5
6
7
8
@Test
public void testEq(){
Long i = new Long(1000L);
Long j = new Long(1000L);
Assert.assertFalse(i == j); // 两个都是对象 , 不满足Binary Numeric Promotion 条件 ,所以不会拆箱 , 所以比较的是地址

Assert.assertTrue(i == 1000L); // 这里 满足 Binary Numeric Promotion 条件 , 所以比较的是值
}
+ +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/2023/09/06/java-branch-bytecode/index.html b/2023/09/06/java-branch-bytecode/index.html new file mode 100644 index 0000000000..3201b8f6a0 --- /dev/null +++ b/2023/09/06/java-branch-bytecode/index.html @@ -0,0 +1,450 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + java branch bytecode | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ java branch bytecode +

+ + +
+ + + + +
+ + +

背景

了解各种分支跳转

+

注释

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
(gdb) x/30i 0x7fffe10176ab

0x7fffe10176ab: mov -0x18(%rbp),%rcx ## __ get_method(rcx); // rcx holds method , rbp-0x18 就是方法地址
0x7fffe10176af: mov -0x28(%rbp),%rax ## __ profile_taken_branch(rax, rbx); // rax holds updated MDP, rbx 一直到 0x7fffe10176d0都是profile_taken_branch
0x7fffe10176b3: test %rax,%rax
0x7fffe10176b6: je 0x7fffe10176d4
0x7fffe10176bc: mov 0x8(%rax),%rbx
0x7fffe10176c0: add $0x1,%rbx
0x7fffe10176c4: sbb $0x0,%rbx
0x7fffe10176c8: mov %rbx,0x8(%rax)
0x7fffe10176cc: add 0x10(%rax),%rax
0x7fffe10176d0: mov %rax,-0x28(%rbp)
0x7fffe10176d4: movswl 0x1(%r13),%edx ## __ movl(rdx, at_bcp(1)); r13指向当前要取的字节码指令的地址
0x7fffe10176d9: bswap %edx ##__ bswapl(rdx);
0x7fffe10176db: sar $0x10,%edx ## __ sarl(rdx, 16);
0x7fffe10176de: movslq %edx,%rdx ## LP64_ONLY(__ movl2ptr(rdx, rdx));
0x7fffe10176e1: add %rdx,%r13 ## __ addptr(rbcp, rdx); // Adjust the bcp in r13 by the displacement in rdx
0x7fffe10176e4: test %edx,%edx ## 判断是不是 0 ,是0 就ZF=1 ,SF
0x7fffe10176e6: jns 0x7fffe10176ec ## 大于等于0 , 跳转到0x7fffe10176ec
0x7fffe10176ec: mov 0x20(%rcx),%rax
0x7fffe10176ec: mov 0x20(%rcx),%rax ## __ movptr(rax, Address(rcx, Method::method_counters_offset()));
0x7fffe10176f0: test %rax,%rax ## __ testptr(rax, rax);
0x7fffe10176f3: jne 0x7fffe10176f9 ## ZF!=0 跳转, 也就是rax >0 跳转 __ jcc(Assembler::notZero, has_counters);
0x7fffe10176f9: push %rdx ## __ push(rdx);
0x7fffe10176fa: push %rcx ## __ push(rcx);
0x7fffe10176fb: call 0x7fffe1017705 ## begin , __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::build_method_counters),
rcx); 一直到0x7fffe1017828
0x7fffe1017700: jmp 0x7fffe1017829
0x7fffe1017705: mov %rcx,%rsi
0x7fffe1017708: lea 0x8(%rsp),%rax
0x7fffe101770d: mov %r13,-0x40(%rbp)
0x7fffe1017711: cmpq $0x0,-0x10(%rbp)
0x7fffe1017719: je 0x7fffe1017733
0x7fffe101771f: movabs $0x7ffff71becc8,%rdi
0x7fffe1017729: and $0xfffffffffffffff0,%rsp
0x7fffe101772d:
call 0x7ffff694f3c0 <_ZN14MacroAssembler7debug64EPclPl>
0x7fffe1017732: hlt
0x7fffe1017733: push %r10
0x7fffe1017735:
cmp 0x16aef7c4(%rip),%r12 # 0x7ffff7b06f00 <_ZN14CompressedOops11_narrow_oopE>
0x7fffe101773c: je 0x7fffe1017756
0x7fffe1017742: movabs $0x7ffff7311c28,%rdi
0x7fffe101774c: and $0xfffffffffffffff0,%rsp
0x7fffe1017750:
call 0x7ffff694f3c0 <_ZN14MacroAssembler7debug64EPclPl>
0x7fffe1017755: hlt
0x7fffe1017756: pop %r10
0x7fffe1017758: mov %r15,%rdi
0x7fffe101775b: vzeroupper
0x7fffe101775e: mov %rbp,0x2d0(%r15)
0x7fffe1017765: mov %rax,0x2c0(%r15)
0x7fffe101776c: test $0xf,%esp
0x7fffe1017772: je 0x7fffe101778a
0x7fffe1017778: sub $0x8,%rsp
0x7fffe101777c:
call 0x7ffff65d4a46 <_ZN18InterpreterRuntime21build_method_countersEP10JavaThreadP6Method>
0x7fffe1017781: add $0x8,%rsp
0x7fffe1017785: jmp 0x7fffe101778f
0x7fffe101778a:
call 0x7ffff65d4a46 <_ZN18InterpreterRuntime21build_method_countersEP10JavaThreadP6Method>
0x7fffe101778f: push %rax
0x7fffe1017790: push %rdi
0x7fffe1017791: push %rsi
0x7fffe1017792: push %rdx
0x7fffe1017793: push %rcx
0x7fffe1017794: push %r8
0x7fffe1017796: push %r9
0x7fffe1017798: push %r10
0x7fffe101779a: push %r11
0x7fffe101779c: test $0xf,%esp
0x7fffe10177a2: je 0x7fffe10177ba
0x7fffe10177a8: sub $0x8,%rsp
0x7fffe10177ac: call 0x7ffff5d1c04e <_ZN6Thread7currentEv>
0x7fffe10177b1: add $0x8,%rsp
0x7fffe10177b5: jmp 0x7fffe10177bf
0x7fffe10177ba: call 0x7ffff5d1c04e <_ZN6Thread7currentEv>
0x7fffe10177bf: pop %r11
0x7fffe10177c1: pop %r10
0x7fffe10177c3: pop %r9
0x7fffe10177c5: pop %r8
0x7fffe10177c7: pop %rcx
0x7fffe10177c8: pop %rdx
0x7fffe10177c9: pop %rsi
0x7fffe10177ca: pop %rdi
0x7fffe10177cb: cmp %rax,%r15
0x7fffe10177ce: je 0x7fffe10177e8
0x7fffe10177d4: movabs $0x7ffff7311da0,%rdi
0x7fffe10177de: and $0xfffffffffffffff0,%rsp
0x7fffe10177e2:
call 0x7ffff694f3c0 <_ZN14MacroAssembler7debug64EPclPl>
0x7fffe10177e7: hlt
0x7fffe10177e8: pop %rax
0x7fffe10177e9: movq $0x0,0x2c0(%r15)
0x7fffe10177f4: movq $0x0,0x2d0(%r15)
0x7fffe10177ff: movq $0x0,0x2c8(%r15)
0x7fffe101780a: vzeroupper
0x7fffe101780d: cmpq $0x0,0x8(%r15)
0x7fffe1017815: je 0x7fffe1017820
0x7fffe101781b: jmp 0x7fffe1000c20
0x7fffe1017820: mov -0x40(%rbp),%r13
0x7fffe1017824: mov -0x38(%rbp),%r14
0x7fffe1017828: ret ### end __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::build_method_counters),
rcx);
0x7fffe1017829: pop %rcx ### __ pop(rcx);
0x7fffe101782a: pop %rdx ## __ pop(rdx);
0x7fffe101782b: mov 0x20(%rcx),%rax ## __ movptr(rax, Address(rcx, Method::method_counters_offset()));
0x7fffe101782f: test %rax,%rax ## __ testptr(rax, rax);
0x7fffe1017832: je 0x7fffe1017838 ## __ jcc(Assembler::zero, dispatch);
0x7fffe1017838: mov 0x18(%rcx),%rbx ## __ movptr(rbx, Address(rcx, in_bytes(Method::method_data_offset()))); // Are we profiling?
0x7fffe101783c: test %rbx,%rbx ## __ testptr(rbx, rbx);
0x7fffe101783f: je 0x7fffe1017841 ## __ jccb(Assembler::zero, no_mdo);
0x7fffe1017841: mov 0x130(%rbx),%eax ## 一直到0x7fffe1017856 都是 __ increment_mask_and_jump(mdo_backedge_counter, increment, mask, rax, false, Assembler::zero,
UseOnStackReplacement ? &backedge_counter_overflow : NULL);
0x7fffe1017847: add $0x2,%eax
0x7fffe101784a: mov %eax,0x130(%rbx)
0x7fffe1017850: and 0x144(%rbx),%eax
0x7fffe1017856: je 0x7fffe101785c
0x7fffe101785c: jmp 0x7fffe1017861 ## __ jmp(dispatch);
0x7fffe1017861: mov 0x20(%rcx),%rcx ## __ movptr(rcx, Address(rcx, Method::method_counters_offset()));
0x7fffe1017865: mov 0x10(%rcx),%eax ### __ increment_mask_and_jump(Address(rcx, be_offset), increment, mask,rax, false, Assembler::zero, UseOnStackReplacement ? &backedge_counter_overflow : NULL);
0x7fffe1017868: add $0x2,%eax
0x7fffe101786b: mov %eax,0x10(%rcx)
0x7fffe101786e: and 0x2c(%rcx),%eax
0x7fffe1017871: je 0x7fffe1017877
0x7fffe1017877: movzbl 0x0(%r13),%ebx ### // Pre-load the next target bytecode into rbx __ load_unsigned_byte(rbx, Address(rbcp, 0));
0x7fffe101787c: testb $0x1,0x388(%r15) ### // continue with the bytecode @ target
### // rax: return bci for jsr's, unused otherwise
###// rbx: target bytecode
###// r13: target bcp
### // __ dispatch_only(vtos, true); 从0x7fffe101787c 到 0x7fffe101789c
0x7fffe1017884: je 0x7fffe1017892
0x7fffe1017886: movabs $0x7ffff7bd68a0,%r10
0x7fffe1017890: jmp 0x7fffe101789c
0x7fffe1017892: movabs $0x7ffff7bcc8a0,%r10
0x7fffe101789c: jmp *(%r10,%rbx,8)
0x7fffe10178a0: neg %rdx ## __ negptr(rdx);
0x7fffe10178a3: add %r13,%rdx ## __ addptr(rdx, rbcp); // branch bcp
0x7fffe10178a6: call 0x7fffe10178b0 ## 从 0x7fffe10178a6 到0x7fffe10179d3 都是call_VM
## __ call_VM(noreg,
## CAST_FROM_FN_PTR(address,
## InterpreterRuntime::frequency_counter_overflow),
## rdx);
0x7fffe10178ab: jmp 0x7fffe10179d4
0x7fffe10178b0: mov %rdx,%rsi
0x7fffe10178b3: lea 0x8(%rsp),%rax
0x7fffe10178b8: mov %r13,-0x40(%rbp)
0x7fffe10178bc: cmpq $0x0,-0x10(%rbp)
0x7fffe10178c4: je 0x7fffe10178de
0x7fffe10178ca: movabs $0x7ffff71becc8,%rdi
0x7fffe10178d4: and $0xfffffffffffffff0,%rsp
0x7fffe10178d8: call 0x7ffff694f3c0 <_ZN14MacroAssembler7debug64EPclPl>
0x7fffe10178dd: hlt
0x7fffe10178de: push %r10
0x7fffe10178e0: cmp 0x16aef619(%rip),%r12 # 0x7ffff7b06f00 <_ZN14CompressedOops11_narrow_oopE>
0x7fffe10178e7: je 0x7fffe1017901
0x7fffe10178ed: movabs $0x7ffff7311c28,%rdi
0x7fffe10178f7: and $0xfffffffffffffff0,%rsp
0x7fffe10178fb: call 0x7ffff694f3c0 <_ZN14MacroAssembler7debug64EPclPl>
0x7fffe1017900: hlt
0x7fffe1017901: pop %r10
0x7fffe1017903: mov %r15,%rdi
0x7fffe1017906: vzeroupper
0x7fffe1017909: mov %rbp,0x2d0(%r15)
0x7fffe1017910: mov %rax,0x2c0(%r15)
0x7fffe1017917: test $0xf,%esp
0x7fffe101791d: je 0x7fffe1017935
0x7fffe1017923: sub $0x8,%rsp
0x7fffe1017927: call 0x7ffff65d3eb4 <_ZN18InterpreterRuntime26frequency_counter_overflowEP10JavaThreadPh>
0x7fffe101792c: add $0x8,%rsp
0x7fffe1017930: jmp 0x7fffe101793a
0x7fffe1017935: call 0x7ffff65d3eb4 <_ZN18InterpreterRuntime26frequency_counter_overflowEP10JavaThreadPh>
0x7fffe101793a: push %rax
0x7fffe101793b: push %rdi
0x7fffe101793c: push %rsi
0x7fffe101793d: push %rdx
0x7fffe101793e: push %rcx
0x7fffe101793f: push %r8
0x7fffe1017941: push %r9
0x7fffe1017943: push %r10
0x7fffe1017945: push %r11
0x7fffe1017947: test $0xf,%esp
0x7fffe101794d: je 0x7fffe1017965
0x7fffe1017953: sub $0x8,%rsp
0x7fffe1017957: call 0x7ffff5d1c04e <_ZN6Thread7currentEv>
0x7fffe101795c: add $0x8,%rsp
0x7fffe1017960: jmp 0x7fffe101796a
0x7fffe1017965: call 0x7ffff5d1c04e <_ZN6Thread7currentEv>
0x7fffe101796a: pop %r11
0x7fffe101796c: pop %r10
0x7fffe101796e: pop %r9
0x7fffe1017970: pop %r8
0x7fffe1017972: pop %rcx
0x7fffe1017973: pop %rdx
0x7fffe1017974: pop %rsi
0x7fffe1017975: pop %rdi
0x7fffe1017976: cmp %rax,%r15
0x7fffe1017979: je 0x7fffe1017993
0x7fffe101797f: movabs $0x7ffff7311da0,%rdi
0x7fffe1017989: and $0xfffffffffffffff0,%rsp
0x7fffe101798d: call 0x7ffff694f3c0 <_ZN14MacroAssembler7debug64EPclPl>
0x7fffe1017992: hlt
0x7fffe1017993: pop %rax
0x7fffe1017994: movq $0x0,0x2c0(%r15)
0x7fffe101799f: movq $0x0,0x2d0(%r15)
0x7fffe10179aa: movq $0x0,0x2c8(%r15)
0x7fffe10179b5: vzeroupper
0x7fffe10179b8: cmpq $0x0,0x8(%r15)
0x7fffe10179c0: je 0x7fffe10179cb
0x7fffe10179c6: jmp 0x7fffe1000c20
0x7fffe10179cb: mov -0x40(%rbp),%r13
0x7fffe10179cf: mov -0x38(%rbp),%r14
0x7fffe10179d3: ret
## // rax: osr nmethod (osr ok) or NULL (osr not possible)
## // rdx: scratch
## // r14: locals pointer
## // r13: bcp
0x7fffe10179d4: test %rax,%rax ## __ testptr(rax, rax); // test result
0x7fffe10179d7: je 0x7fffe1017877 ## __ jcc(Assembler::zero, dispatch); // no osr if null
0x7fffe10179dd: cmpb $0x0,0x14b(%rax) ## // nmethod may have been invalidated (VM may block upon call_VM return) __ cmpb(Address(rax, nmethod::state_offset()), nmethod::in_use);
0x7fffe10179e4: jne 0x7fffe1017877 ## __ jcc(Assembler::notEqual, dispatch);
### // We have the address of an on stack replacement routine in rax.
### // In preparation of invoking it, first we must migrate the locals
### // and monitors from off the interpreter frame on the stack.
### // Ensure to save the osr nmethod over the migration call,
### // it will be preserved in rbx.
0x7fffe10179ea: mov %rax,%rbx ### __ mov(rbx, rax);

0x7fffe10179ed: call 0x7fffe10179f7 ### 从 0x7fffe10179ed 0x7fffe1017b17 都是 call_VM(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_begin));
0x7fffe10179f2: jmp 0x7fffe1017b18
0x7fffe10179f7: lea 0x8(%rsp),%rax
0x7fffe10179fc: mov %r13,-0x40(%rbp)
0x7fffe1017a00: cmpq $0x0,-0x10(%rbp)
0x7fffe1017a08: je 0x7fffe1017a22
0x7fffe1017a0e: movabs $0x7ffff71becc8,%rdi
0x7fffe1017a18: and $0xfffffffffffffff0,%rsp
0x7fffe1017a1c: call 0x7ffff694f3c0 <_ZN14MacroAssembler7debug64EPclPl>
0x7fffe1017a21: hlt
0x7fffe1017a22: push %r10
0x7fffe1017a24: cmp 0x16aef4d5(%rip),%r12 # 0x7ffff7b06f00 <_ZN14CompressedOops11_narrow_oopE>
0x7fffe1017a2b: je 0x7fffe1017a45
0x7fffe1017a31: movabs $0x7ffff7311c28,%rdi
0x7fffe1017a3b: and $0xfffffffffffffff0,%rsp
0x7fffe1017a3f: call 0x7ffff694f3c0 <_ZN14MacroAssembler7debug64EPclPl>
0x7fffe1017a44: hlt
0x7fffe1017a45: pop %r10
0x7fffe1017a47: mov %r15,%rdi
0x7fffe1017a4a: vzeroupper
0x7fffe1017a4d: mov %rbp,0x2d0(%r15)
0x7fffe1017a54: mov %rax,0x2c0(%r15)
0x7fffe1017a5b: test $0xf,%esp
0x7fffe1017a61: je 0x7fffe1017a79
0x7fffe1017a67: sub $0x8,%rsp
0x7fffe1017a6b: call 0x7ffff6bcdb22 <_ZN13SharedRuntime19OSR_migration_beginEP10JavaThread>
0x7fffe1017a70: add $0x8,%rsp
0x7fffe1017a74: jmp 0x7fffe1017a7e
0x7fffe1017a79: call 0x7ffff6bcdb22 <_ZN13SharedRuntime19OSR_migration_beginEP10JavaThread>
0x7fffe1017a7e: push %rax
0x7fffe1017a7f: push %rdi
0x7fffe1017a80: push %rsi
0x7fffe1017a81: push %rdx
0x7fffe1017a82: push %rcx
0x7fffe1017a83: push %r8
0x7fffe1017a85: push %r9
0x7fffe1017a87: push %r10
0x7fffe1017a89: push %r11
0x7fffe1017a8b: test $0xf,%esp
0x7fffe1017a91: je 0x7fffe1017aa9
0x7fffe1017a97: sub $0x8,%rsp
0x7fffe1017a9b: call 0x7ffff5d1c04e <_ZN6Thread7currentEv>
0x7fffe1017aa0: add $0x8,%rsp
0x7fffe1017aa4: jmp 0x7fffe1017aae
0x7fffe1017aa9: call 0x7ffff5d1c04e <_ZN6Thread7currentEv>
0x7fffe1017aae: pop %r11
0x7fffe1017ab0: pop %r10
0x7fffe1017ab2: pop %r9
0x7fffe1017ab4: pop %r8
0x7fffe1017ab6: pop %rcx
0x7fffe1017ab7: pop %rdx
0x7fffe1017ab8: pop %rsi
0x7fffe1017ab9: pop %rdi
0x7fffe1017aba: cmp %rax,%r15
0x7fffe1017abd: je 0x7fffe1017ad7
0x7fffe1017ac3: movabs $0x7ffff7311da0,%rdi
0x7fffe1017acd: and $0xfffffffffffffff0,%rsp
0x7fffe1017ad1: call 0x7ffff694f3c0 <_ZN14MacroAssembler7debug64EPclPl>
0x7fffe1017ad6: hlt
0x7fffe1017ad7: pop %rax
0x7fffe1017ad8: movq $0x0,0x2c0(%r15)
0x7fffe1017ae3: movq $0x0,0x2d0(%r15)
0x7fffe1017aee: movq $0x0,0x2c8(%r15)
0x7fffe1017af9: vzeroupper
0x7fffe1017afc: cmpq $0x0,0x8(%r15)
0x7fffe1017b04: je 0x7fffe1017b0f
0x7fffe1017b0a: jmp 0x7fffe1000c20
0x7fffe1017b0f: mov -0x40(%rbp),%r13
0x7fffe1017b13: mov -0x38(%rbp),%r14
0x7fffe1017b17: ret
0x7fffe1017b18: mov %rax,%rsi ## LP64_ONLY(__ mov(j_rarg0, rax));
0x7fffe1017b1b: mov -0x8(%rbp),%rdx ## __ movptr(sender_sp, Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize)); // get sender sp
0x7fffe1017b1f: leave ## __ leave(); // remove frame anchor
0x7fffe1017b20: pop %rcx ## __ pop(retaddr); // get return address
0x7fffe1017b21: mov %rdx,%rsp ## __ mov(rsp, sender_sp); // set sp to sender sp
0x7fffe1017b24: and $0xfffffffffffffff0,%rsp ## // Ensure compiled code always sees stack at proper alignment __ andptr(rsp, -(StackAlignmentInBytes));
0x7fffe1017b28: push %rcx ## // push the return address __ push(retaddr);
0x7fffe1017b29: jmp *0xf8(%rbx) ## __ jmp(Address(rbx, nmethod::osr_entry_point_offset()));
+ +

完整堆栈

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
(gdb) bt
#0 TemplateTable::branch (is_jsr=false, is_wide=false)
at /home/dai/jdk/src/hotspot/cpu/x86/templateTable_x86.cpp:2188
#1 0x00007ffff6d74ce0 in TemplateTable::if_0cmp (cc=TemplateTable::equal)
at /home/dai/jdk/src/hotspot/cpu/x86/templateTable_x86.cpp:2302
#2 0x00007ffff6d66161 in Template::generate (
this=0x7ffff7bd8ce0 <TemplateTable::_template_table+4896>,
masm=0x7ffff0029588)
at /home/dai/jdk/src/hotspot/share/interpreter/templateTable.cpp:62
#3 0x00007ffff6d59a3c in TemplateInterpreterGenerator::generate_and_dispatch (
this=0x7ffff59fea10,
t=0x7ffff7bd8ce0 <TemplateTable::_template_table+4896>, tos_out=vtos)
at /home/dai/jdk/src/hotspot/share/interpreter/templateInterpreterGenerator.cpp:380
#4 0x00007ffff6d59572 in TemplateInterpreterGenerator::set_short_entry_points
(this=0x7ffff59fea10,
t=0x7ffff7bd8ce0 <TemplateTable::_template_table+4896>,
bep=@0x7ffff59fe398: 0x7fffe1008f14 "H\277h\260N\367\377\177",
cep=@0x7ffff59fe3a0: 0x7fffe1008f14 "H\277h\260N\367\377\177",
sep=@0x7ffff59fe3a8: 0x7fffe1008f14 "H\277h\260N\367\377\177",
aep=@0x7ffff59fe3b0: 0x7fffe1008f14 "H\277h\260N\367\377\177",
iep=@0x7ffff59fe3b8: 0x7fffe1017627 "PSQRH\213M\330H\205\311\017\204g",
lep=@0x7ffff59fe3c0: 0x7fffe1008f14 "H\277h\260N\367\377\177",
fep=@0x7ffff59fe3c8: 0x7fffe1008f14 "H\277h\260N\367\377\177",
--Type <RET> for more, q to quit, c to continue without paging--
dep=@0x7ffff59fe3d0: 0x7fffe1008f14 "H\277h\260N\367\377\177",
vep=@0x7ffff59fe3d8: 0x7fffe1017620 "\213\004$H\203\304\bPSQRH\213M\330H\205\311\017\204g")
at /home/dai/jdk/src/hotspot/share/interpreter/templateInterpreterGenerator.cpp:344
#5 0x00007ffff6d590ec in TemplateInterpreterGenerator::set_entry_points (
this=0x7ffff59fea10, code=Bytecodes::_ifeq)
at /home/dai/jdk/src/hotspot/share/interpreter/templateInterpreterGenerator.cpp:313
#6 0x00007ffff6d58d4a in TemplateInterpreterGenerator::set_entry_points_for_all_bytes (this=0x7ffff59fea10)
at /home/dai/jdk/src/hotspot/share/interpreter/templateInterpreterGenerator.cpp:269
#7 0x00007ffff6d5893a in TemplateInterpreterGenerator::generate_all (
this=0x7ffff59fea10)
at /home/dai/jdk/src/hotspot/share/interpreter/templateInterpreterGenerator.cpp:227
#8 0x00007ffff6d57259 in TemplateInterpreterGenerator::TemplateInterpreterGenerator (this=0x7ffff59fea10, _code=0x7ffff00febe0)
at /home/dai/jdk/src/hotspot/share/interpreter/templateInterpreterGenerator.cpp:40
#9 0x00007ffff6d55de4 in TemplateInterpreter::initialize_code ()
at /home/dai/jdk/src/hotspot/share/interpreter/templateInterpreter.cpp:62
--Type <RET> for more, q to quit, c to continue without paging--
#10 0x00007ffff65cc48d in interpreter_init_code ()
at /home/dai/jdk/src/hotspot/share/interpreter/interpreter.cpp:137
#11 0x00007ffff65a6d94 in init_globals ()
at /home/dai/jdk/src/hotspot/share/runtime/init.cpp:134
#12 0x00007ffff6d8d1ca in Threads::create_vm (args=0x7ffff59fed50,
canTryAgain=0x7ffff59fec5b)
at /home/dai/jdk/src/hotspot/share/runtime/thread.cpp:2843
#13 0x00007ffff66b243b in JNI_CreateJavaVM_inner (vm=0x7ffff59feda8,
penv=0x7ffff59fedb0, args=0x7ffff59fed50)
at /home/dai/jdk/src/hotspot/share/prims/jni.cpp:3613
#14 0x00007ffff66b2787 in JNI_CreateJavaVM (vm=0x7ffff59feda8,
penv=0x7ffff59fedb0, args=0x7ffff59fed50)
at /home/dai/jdk/src/hotspot/share/prims/jni.cpp:3701
#15 0x00007ffff7faca6a in InitializeJVM (pvm=0x7ffff59feda8,
penv=0x7ffff59fedb0, ifn=0x7ffff59fee00)
at /home/dai/jdk/src/java.base/share/native/libjli/java.c:1459
#16 0x00007ffff7fa95ec in JavaMain (_args=0x7fffffffa9a0)
at /home/dai/jdk/src/java.base/share/native/libjli/java.c:411
#17 0x00007ffff7fb05ec in ThreadJavaMain (args=0x7fffffffa9a0)
at /home/dai/jdk/src/java.base/unix/native/libjli/java_md.c:651
#18 0x00007ffff7c94b43 in start_thread (arg=<optimized out>)
at ./nptl/pthread_create.c:442
#19 0x00007ffff7d26a00 in clone3 ()
--Type <RET> for more, q to quit, c to continue without paging--
at ../sysdeps/unix/sysv/linux/x86_64/clone3.S:81
(gdb) info breakpoints
Num Type Disp Enb Address What
1 breakpoint keep y 0x00007ffff6d73852 in TemplateTable::branch(bool, bool) at /home/dai/jdk/src/hotspot/cpu/x86/templateTable_x86.cpp:2122
breakpoint already hit 1 time
(gdb) p _masm->_code_section->_end

+ + + +

相关阅读

+ +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git "a/2023/09/06/java-jdk-\344\270\272\344\273\200\344\271\210\344\270\200\344\270\252\347\272\277\347\250\213\347\251\272\346\214\207\351\222\210\344\270\215\351\200\200\345\207\272/index.html" "b/2023/09/06/java-jdk-\344\270\272\344\273\200\344\271\210\344\270\200\344\270\252\347\272\277\347\250\213\347\251\272\346\214\207\351\222\210\344\270\215\351\200\200\345\207\272/index.html" new file mode 100644 index 0000000000..615d511d98 --- /dev/null +++ "b/2023/09/06/java-jdk-\344\270\272\344\273\200\344\271\210\344\270\200\344\270\252\347\272\277\347\250\213\347\251\272\346\214\207\351\222\210\344\270\215\351\200\200\345\207\272/index.html" @@ -0,0 +1,449 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + java jdk 为什么一个线程空指针不退出 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ java jdk 为什么一个线程空指针不退出 +

+ + +
+ + + + +
+ + +

背景

了解jdk异常的捕获原理

+

堆栈

1
2
3
4
5
6
7
8
9
10
11
(gdb) bt
#0 PosixSignals::pd_hotspot_signal_handler (sig=sig@entry=11, info=info@entry=0x7ffff7bfd330, uc=uc@entry=0x7ffff7bfd200, thread=0x7ffff00295a0) at /home/ubuntu/jdk/src/hotspot/os_cpu/linux_x86/os_linux_x86.cpp:201
#1 0x00007ffff7090f7d in JVM_handle_linux_signal (abort_if_unrecognized=1, ucVoid=0x7ffff7bfd200, info=0x7ffff7bfd330, sig=11) at /home/ubuntu/jdk/src/hotspot/os/posix/signals_posix.cpp:656
#2 JVM_handle_linux_signal (sig=11, info=0x7ffff7bfd330, ucVoid=0x7ffff7bfd200, abort_if_unrecognized=1) at /home/ubuntu/jdk/src/hotspot/os/posix/signals_posix.cpp:557
#3 <signal handler called>
#4 0x00007fffe8537640 in ?? ()
#5 0x0000000000000246 in ?? ()
#6 0x00007fffe8537734 in ?? ()
#7 0x00007ffff79f1858 in ?? () from /home/ubuntu/jdk/build/linux-x86_64-server-fastdebug/jdk/lib/server/libjvm.so
#8 0x00007ffff7bfe290 in ?? ()
#9 0x00007ffff734777a in VM_Version::get_processor_features () at /home/ubuntu/jdk/src/hotspot/cpu/x86/vm_version_x86.cpp:803
+ +

这里会返回true , 然后就跳过jdk的退出

+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
bool PosixSignals::pd_hotspot_signal_handler(int sig, siginfo_t* info,
ucontext_t* uc, JavaThread* thread) {

/*
NOTE: does not seem to work on linux.
if (info == NULL || info->si_code <= 0 || info->si_code == SI_NOINFO) {
// can't decode this kind of signal
info = NULL;
} else {
assert(sig == info->si_signo, "bad siginfo");
}
*/
// decide if this trap can be handled by a stub
address stub = NULL;

address pc = NULL;

//%note os_trap_1
if (info != NULL && uc != NULL && thread != NULL) {
pc = (address) os::Posix::ucontext_get_pc(uc);

if (sig == SIGSEGV && info->si_addr == 0 && info->si_code == SI_KERNEL) {
// An irrecoverable SI_KERNEL SIGSEGV has occurred.
// It's likely caused by dereferencing an address larger than TASK_SIZE.
return false;
}

// Handle ALL stack overflow variations here
if (sig == SIGSEGV) {
address addr = (address) info->si_addr;

// check if fault address is within thread stack
if (thread->is_in_full_stack(addr)) {
// stack overflow
if (os::Posix::handle_stack_overflow(thread, addr, pc, uc, &stub)) {
return true; // continue
}
}
}

if ((sig == SIGSEGV) && VM_Version::is_cpuinfo_segv_addr(pc)) {
// Verify that OS save/restore AVX registers.
stub = VM_Version::cpuinfo_cont_addr();
}

if (thread->thread_state() == _thread_in_Java) {
// Java thread running in Java code => find exception handler if any
// a fault inside compiled code, the interpreter, or a stub

if (sig == SIGSEGV && SafepointMechanism::is_poll_address((address)info->si_addr)) {
stub = SharedRuntime::get_poll_stub(pc);
} else if (sig == SIGBUS /* && info->si_code == BUS_OBJERR */) {
// BugId 4454115: A read from a MappedByteBuffer can fault
// here if the underlying file has been truncated.
// Do not crash the VM in such a case.
CodeBlob* cb = CodeCache::find_blob(pc);
CompiledMethod* nm = (cb != NULL) ? cb->as_compiled_method_or_null() : NULL;
bool is_unsafe_arraycopy = thread->doing_unsafe_access() && UnsafeCopyMemory::contains_pc(pc);
if ((nm != NULL && nm->has_unsafe_access()) || is_unsafe_arraycopy) {
address next_pc = Assembler::locate_next_instruction(pc);
if (is_unsafe_arraycopy) {
next_pc = UnsafeCopyMemory::page_error_continue_pc(pc);
}
stub = SharedRuntime::handle_unsafe_access(thread, next_pc);
}
}
else

#ifdef AMD64
if (sig == SIGFPE &&
(info->si_code == FPE_INTDIV || info->si_code == FPE_FLTDIV)) {
stub =
SharedRuntime::
continuation_for_implicit_exception(thread,
pc,
SharedRuntime::
IMPLICIT_DIVIDE_BY_ZERO);
#else
if (sig == SIGFPE /* && info->si_code == FPE_INTDIV */) {
// HACK: si_code does not work on linux 2.2.12-20!!!
int op = pc[0];
if (op == 0xDB) {
// FIST
// TODO: The encoding of D2I in x86_32.ad can cause an exception
// prior to the fist instruction if there was an invalid operation
// pending. We want to dismiss that exception. From the win_32
// side it also seems that if it really was the fist causing
// the exception that we do the d2i by hand with different
// rounding. Seems kind of weird.
// NOTE: that we take the exception at the NEXT floating point instruction.
assert(pc[0] == 0xDB, "not a FIST opcode");
assert(pc[1] == 0x14, "not a FIST opcode");
assert(pc[2] == 0x24, "not a FIST opcode");
return true;
} else if (op == 0xF7) {
// IDIV
stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_DIVIDE_BY_ZERO);
} else {
// TODO: handle more cases if we are using other x86 instructions
// that can generate SIGFPE signal on linux.
tty->print_cr("unknown opcode 0x%X with SIGFPE.", op);
fatal("please update this code.");
}
#endif // AMD64
} else if (sig == SIGSEGV &&
MacroAssembler::uses_implicit_null_check(info->si_addr)) {
// Determination of interpreter/vtable stub/compiled code null exception
stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_NULL);
}
} else if ((thread->thread_state() == _thread_in_vm ||
thread->thread_state() == _thread_in_native) &&
(sig == SIGBUS && /* info->si_code == BUS_OBJERR && */
thread->doing_unsafe_access())) {
address next_pc = Assembler::locate_next_instruction(pc);
if (UnsafeCopyMemory::contains_pc(pc)) {
next_pc = UnsafeCopyMemory::page_error_continue_pc(pc);
}
stub = SharedRuntime::handle_unsafe_access(thread, next_pc);
}

// jni_fast_Get<Primitive>Field can trap at certain pc's if a GC kicks in
// and the heap gets shrunk before the field access.
if ((sig == SIGSEGV) || (sig == SIGBUS)) {
address addr = JNI_FastGetField::find_slowcase_pc(pc);
if (addr != (address)-1) {
stub = addr;
}
}
}

#ifndef AMD64
// Execution protection violation
//
// This should be kept as the last step in the triage. We don't
// have a dedicated trap number for a no-execute fault, so be
// conservative and allow other handlers the first shot.
//
// Note: We don't test that info->si_code == SEGV_ACCERR here.
// this si_code is so generic that it is almost meaningless; and
// the si_code for this condition may change in the future.
// Furthermore, a false-positive should be harmless.
if (UnguardOnExecutionViolation > 0 &&
stub == NULL &&
(sig == SIGSEGV || sig == SIGBUS) &&
uc->uc_mcontext.gregs[REG_TRAPNO] == trap_page_fault) {
int page_size = os::vm_page_size();
address addr = (address) info->si_addr;
address pc = os::Posix::ucontext_get_pc(uc);
// Make sure the pc and the faulting address are sane.
//
// If an instruction spans a page boundary, and the page containing
// the beginning of the instruction is executable but the following
// page is not, the pc and the faulting address might be slightly
// different - we still want to unguard the 2nd page in this case.
//
// 15 bytes seems to be a (very) safe value for max instruction size.
bool pc_is_near_addr =
(pointer_delta((void*) addr, (void*) pc, sizeof(char)) < 15);
bool instr_spans_page_boundary =
(align_down((intptr_t) pc ^ (intptr_t) addr,
(intptr_t) page_size) > 0);

if (pc == addr || (pc_is_near_addr && instr_spans_page_boundary)) {
static volatile address last_addr =
(address) os::non_memory_address_word();

// In conservative mode, don't unguard unless the address is in the VM
if (addr != last_addr &&
(UnguardOnExecutionViolation > 1 || os::address_is_in_vm(addr))) {

// Set memory to RWX and retry
address page_start = align_down(addr, page_size);
bool res = os::protect_memory((char*) page_start, page_size,
os::MEM_PROT_RWX);

log_debug(os)("Execution protection violation "
"at " INTPTR_FORMAT
", unguarding " INTPTR_FORMAT ": %s, errno=%d", p2i(addr),
p2i(page_start), (res ? "success" : "failed"), errno);
stub = pc;

// Set last_addr so if we fault again at the same address, we don't end
// up in an endless loop.
//
// There are two potential complications here. Two threads trapping at
// the same address at the same time could cause one of the threads to
// think it already unguarded, and abort the VM. Likely very rare.
//
// The other race involves two threads alternately trapping at
// different addresses and failing to unguard the page, resulting in
// an endless loop. This condition is probably even more unlikely than
// the first.
//
// Although both cases could be avoided by using locks or thread local
// last_addr, these solutions are unnecessary complication: this
// handler is a best-effort safety net, not a complete solution. It is
// disabled by default and should only be used as a workaround in case
// we missed any no-execute-unsafe VM code.

last_addr = addr;
}
}
}
#endif // !AMD64

if (stub != NULL) {
// save all thread context in case we need to restore it
if (thread != NULL) thread->set_saved_exception_pc(pc);

os::Posix::ucontext_set_pc(uc, stub);
return true; ///////////////////////////////////////// 这里会是true
}

return false;
}
+ +

相关阅读

+ +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git "a/2023/09/10/elastic-search-\347\274\226\350\257\221\345\222\214\350\260\203\350\257\225/index.html" "b/2023/09/10/elastic-search-\347\274\226\350\257\221\345\222\214\350\260\203\350\257\225/index.html" new file mode 100644 index 0000000000..654ab4ede7 --- /dev/null +++ "b/2023/09/10/elastic-search-\347\274\226\350\257\221\345\222\214\350\260\203\350\257\225/index.html" @@ -0,0 +1,498 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + elastic search 编译和调试 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ elastic search 编译和调试 +

+ + +
+ + + + +
+ + +

背景

熟悉elastic search

+

构建

1
./gradlew localDistro
+ +
    +
  • 第一步: 关闭安全相关检查,我本地是http,不需要https
    1
    2
    3
    4
    ### config/elasticsearch.yml 的这个选项改成false , 这样可以关闭https校验
    # Enable security features
    xpack.security.enabled: false

  • +
  • 改代码
    elasticsearch/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/ServerProcess.java
    添加jdb相关参数:
    1
    2
    3
    // also pass through distribution type
    jvmOptions.add("-Des.distribution.type=" + processInfo.sysprops().get("es.distribution.type"));
    jvmOptions.add("-agentlib:jdwp=transport=dt_socket,server=y,address=9999"); // 添加这一行 , 让jdb可以调试
  • +
  • 运行elasticsearch , 这时候会卡在启动的时候,等待jdb连接
  • +
+
1
./elasticsearch
+ + +
    +
  • 使用jdb调试
  • +
+

使用gradle 拉取代码之后,需要获取lucene的代码,并解压到/home/ubuntu/lucene目录

+
1
jar -xf /home/ubuntu/.gradle/caches/modules-2/files-2.1/org.apache.lucene/lucene-core/9.7.0/35359f1763c9d7a0f04188c4933311be3c07b60e/lucene-core-9.7.0-sources.jar
+ +
1
2
## 这里/home/dai/ 是我放elasticsearch 的地方 , `/home/ubuntu/.gradle/caches/modules-2/files-2.1/org.apache.lucene/lucene-core/9.7.0/35359f1763c9d7a0f04188c4933311be3c07b60e/` 是我的gradle 的默认下载路径,可以自己用find去找
jdb -attach 9999 -sourcepath /home/ubuntu/elasticsearch/distribution/tools/cli-launcher/src/main/java/:/home/ubuntu/elasticsearch/server/src/main/java/:/home/ubuntu/lucene/
+ +
    +
  • 断点
    1
    stop in org.elasticsearch.rest.action.search.RestSearchAction.prepareRequest
  • +
+

堆栈

使用jdb调试:

+

断点是:stop in org.elasticsearch.rest.action.search.RestSearchAction.prepareRequest

+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
Breakpoint hit: 
Breakpoint hit: "thread=elasticsearch[myhost][transport_worker][T#5]", org.elasticsearch.rest.action.search.RestSearchAction.prepareRequest(), line=100 bci=0
100 if (request.hasParam("min_compatible_shard_node")) {

elasticsearch[myhost][transport_worker][T#5][1] where
[1] org.elasticsearch.rest.action.search.RestSearchAction.prepareRequest (RestSearchAction.java:100)
[2] org.elasticsearch.rest.BaseRestHandler.handleRequest (BaseRestHandler.java:80)
[3] org.elasticsearch.xpack.security.rest.SecurityRestFilter.doHandleRequest (SecurityRestFilter.java:96)
[4] org.elasticsearch.xpack.security.rest.SecurityRestFilter.handleRequest (SecurityRestFilter.java:76)
[5] org.elasticsearch.rest.RestController.dispatchRequest (RestController.java:414)
[6] org.elasticsearch.rest.RestController.tryAllHandlers (RestController.java:543)
[7] org.elasticsearch.rest.RestController.dispatchRequest (RestController.java:316)
[8] org.elasticsearch.http.AbstractHttpServerTransport.dispatchRequest (AbstractHttpServerTransport.java:453)
[9] org.elasticsearch.http.AbstractHttpServerTransport.handleIncomingRequest (AbstractHttpServerTransport.java:549)
[10] org.elasticsearch.http.AbstractHttpServerTransport.incomingRequest (AbstractHttpServerTransport.java:426)
[11] org.elasticsearch.http.netty4.Netty4HttpPipeliningHandler.handlePipelinedRequest (Netty4HttpPipeliningHandler.java:128)
[12] org.elasticsearch.http.netty4.Netty4HttpPipeliningHandler.channelRead (Netty4HttpPipeliningHandler.java:118)
[13] io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead (AbstractChannelHandlerContext.java:442)
[14] io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead (AbstractChannelHandlerContext.java:420)
[15] io.netty.channel.AbstractChannelHandlerContext.fireChannelRead (AbstractChannelHandlerContext.java:412)
[16] io.netty.handler.codec.MessageToMessageDecoder.channelRead (MessageToMessageDecoder.java:103)
[17] io.netty.handler.codec.MessageToMessageCodec.channelRead (MessageToMessageCodec.java:111)
[18] io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead (AbstractChannelHandlerContext.java:442)
[19] io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead (AbstractChannelHandlerContext.java:420)
[20] io.netty.channel.AbstractChannelHandlerContext.fireChannelRead (AbstractChannelHandlerContext.java:412)
[21] io.netty.handler.codec.MessageToMessageDecoder.channelRead (MessageToMessageDecoder.java:103)
[22] io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead (AbstractChannelHandlerContext.java:444)
[23] io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead (AbstractChannelHandlerContext.java:420)
[24] io.netty.channel.AbstractChannelHandlerContext.fireChannelRead (AbstractChannelHandlerContext.java:412)
[25] io.netty.handler.codec.MessageToMessageDecoder.channelRead (MessageToMessageDecoder.java:103)
[26] io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead (AbstractChannelHandlerContext.java:444)
[27] io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead (AbstractChannelHandlerContext.java:420)
[28] io.netty.channel.AbstractChannelHandlerContext.fireChannelRead (AbstractChannelHandlerContext.java:412)
[29] io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead (ByteToMessageDecoder.java:346)
[30] io.netty.handler.codec.ByteToMessageDecoder.channelRead (ByteToMessageDecoder.java:318)
[31] io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead (AbstractChannelHandlerContext.java:444)
[32] io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead (AbstractChannelHandlerContext.java:420)
[33] io.netty.channel.AbstractChannelHandlerContext.fireChannelRead (AbstractChannelHandlerContext.java:412)
[34] io.netty.handler.codec.MessageToMessageDecoder.channelRead (MessageToMessageDecoder.java:103)
[35] io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead (AbstractChannelHandlerContext.java:444)
[36] io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead (AbstractChannelHandlerContext.java:420)
[37] io.netty.channel.AbstractChannelHandlerContext.fireChannelRead (AbstractChannelHandlerContext.java:412)
[38] io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead (DefaultChannelPipeline.java:1,410)
[39] io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead (AbstractChannelHandlerContext.java:440)
[40] io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead (AbstractChannelHandlerContext.java:420)
[41] io.netty.channel.DefaultChannelPipeline.fireChannelRead (DefaultChannelPipeline.java:919)
[42] io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read (AbstractNioByteChannel.java:166)
[43] io.netty.channel.nio.NioEventLoop.processSelectedKey (NioEventLoop.java:788)
[44] io.netty.channel.nio.NioEventLoop.processSelectedKeysPlain (NioEventLoop.java:689)
[45] io.netty.channel.nio.NioEventLoop.processSelectedKeys (NioEventLoop.java:652)
[46] io.netty.channel.nio.NioEventLoop.run (NioEventLoop.java:562)
[47] io.netty.util.concurrent.SingleThreadEventExecutor$4.run (SingleThreadEventExecutor.java:997)
[48] io.netty.util.internal.ThreadExecutorMap$2.run (ThreadExecutorMap.java:74)
[49] java.lang.Thread.runWith (Thread.java:1,636)
[50] java.lang.Thread.run (Thread.java:1,623)
+

query phase

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
elasticsearch[myhost][search][T#7][1] where
[1] org.elasticsearch.search.query.QueryPhase.execute (QueryPhase.java:62)
[2] org.elasticsearch.search.SearchService.loadOrExecuteQueryPhase (SearchService.java:516)
[3] org.elasticsearch.search.SearchService.executeQueryPhase (SearchService.java:668)
[4] org.elasticsearch.search.SearchService.lambda$executeQueryPhase$2 (SearchService.java:541)
[5] org.elasticsearch.search.SearchService$$Lambda$7604/0x00007fceb1297320.get (null)
[6] org.elasticsearch.action.ActionRunnable$2.accept (ActionRunnable.java:51)
[7] org.elasticsearch.action.ActionRunnable$2.accept (ActionRunnable.java:48)
[8] org.elasticsearch.action.ActionRunnable$3.doRun (ActionRunnable.java:73)
[9] org.elasticsearch.common.util.concurrent.AbstractRunnable.run (AbstractRunnable.java:26)
[10] org.elasticsearch.common.util.concurrent.TimedRunnable.doRun (TimedRunnable.java:33)
[11] org.elasticsearch.common.util.concurrent.ThreadContext$ContextPreservingAbstractRunnable.doRun (ThreadContext.java:983)
[12] org.elasticsearch.common.util.concurrent.AbstractRunnable.run (AbstractRunnable.java:26)
[13] java.util.concurrent.ThreadPoolExecutor.runWorker (ThreadPoolExecutor.java:1,144)
[14] java.util.concurrent.ThreadPoolExecutor$Worker.run (ThreadPoolExecutor.java:642)
[15] java.lang.Thread.runWith (Thread.java:1,636)
[16] java.lang.Thread.run (Thread.java:1,623)
elasticsearch[myhost][search][T#7][1]
+ +

查看reader

+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
Breakpoint hit: "thread=elasticsearch[myhost][search][T#7]", org.elasticsearch.search.query.QueryPhase.addCollectorsAndSearch(), line=150 bci=0
150 final ContextIndexSearcher searcher = searchContext.searcher();

elasticsearch[myhost][search][T#7][1] next
>
Step completed: "thread=elasticsearch[myhost][search][T#7]", org.elasticsearch.search.query.QueryPhase.addCollectorsAndSearch(), line=151 bci=5
151 final IndexReader reader = searcher.getIndexReader();

elasticsearch[myhost][search][T#7][1] next
>
Step completed: "thread=elasticsearch[myhost][search][T#7]", org.elasticsearch.search.query.QueryPhase.addCollectorsAndSearch(), line=152 bci=10
152 QuerySearchResult queryResult = searchContext.queryResult();

elasticsearch[myhost][search][T#7][1] print reader
reader = "ExitableDirectoryReader(FilterLeafReader(FieldUsageTrackingLeafReader(reader=FilterLeafReader(_0(9.7.0):c1:[diagnostics={timestamp=1694357055349, source=flush, lucene.version=9.7.0, os.version=6.2.0-31-generic, os.arch=amd64, os=Linux, java.vendor=Oracle Corporation, java.runtime.version=20.0.2+9-78}]:[attributes={Lucene90StoredFieldsFormat.mode=BEST_SPEED}] :id=bv125vla2ovjxnipt5j9ssmby))))"
elasticsearch[myhost][search][T#7][1]
+ + +
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
elasticsearch[myhost][search][T#7][1] print query
query = "age:38"
elasticsearch[myhost][search][T#7][1] step in
>
Step completed: "thread=elasticsearch[myhost][search][T#7]", org.elasticsearch.search.internal.ContextIndexSearcher.search(), line=340 bci=0
340 final C firstCollector = collectorManager.newCollector();

elasticsearch[myhost][search][T#7][1] where
[1] org.elasticsearch.search.internal.ContextIndexSearcher.search (ContextIndexSearcher.java:340)
[2] org.elasticsearch.search.query.QueryPhase.addCollectorsAndSearch (QueryPhase.java:206)
[3] org.elasticsearch.search.query.QueryPhase.executeQuery (QueryPhase.java:134)
[4] org.elasticsearch.search.query.QueryPhase.execute (QueryPhase.java:63)
[5] org.elasticsearch.search.SearchService.loadOrExecuteQueryPhase (SearchService.java:516)
[6] org.elasticsearch.search.SearchService.executeQueryPhase (SearchService.java:668)
[7] org.elasticsearch.search.SearchService.lambda$executeQueryPhase$2 (SearchService.java:541)
[8] org.elasticsearch.search.SearchService$$Lambda$7604/0x00007fceb1297320.get (null)
[9] org.elasticsearch.action.ActionRunnable$2.accept (ActionRunnable.java:51)
[10] org.elasticsearch.action.ActionRunnable$2.accept (ActionRunnable.java:48)
[11] org.elasticsearch.action.ActionRunnable$3.doRun (ActionRunnable.java:73)
[12] org.elasticsearch.common.util.concurrent.AbstractRunnable.run (AbstractRunnable.java:26)
[13] org.elasticsearch.common.util.concurrent.TimedRunnable.doRun (TimedRunnable.java:33)
[14] org.elasticsearch.common.util.concurrent.ThreadContext$ContextPreservingAbstractRunnable.doRun (ThreadContext.java:983)
[15] org.elasticsearch.common.util.concurrent.AbstractRunnable.run (AbstractRunnable.java:26)
[16] java.util.concurrent.ThreadPoolExecutor.runWorker (ThreadPoolExecutor.java:1,144)
[17] java.util.concurrent.ThreadPoolExecutor$Worker.run (ThreadPoolExecutor.java:642)
[18] java.lang.Thread.runWith (Thread.java:1,636)
[19] java.lang.Thread.run (Thread.java:1,623)

+ + +

bulkScorer 和Scorer

核心变成了这两个函数:

+
    +
  • bulkScorer
  • +
  • scorer
  • +
+
1
2
3
4
5
6
7
8
9
10
BulkScorer bulkScorer = weight.bulkScorer(ctx);
if (bulkScorer != null) {
if (cancellable.isEnabled()) {
bulkScorer = new CancellableBulkScorer(bulkScorer, cancellable::checkCancelled);
}
try {
bulkScorer.score(leafCollector, liveDocs);
} catch (CollectionTerminatedException e) {
...
}
+ +

这里的weight:

+
1
2
3
4
5
6
7
8
9
10
elasticsearch[ubuntu-Vostro-3690][search_worker][T#2][1] dump weight
weight = {
similarity: instance of org.elasticsearch.index.similarity.SimilarityService$PerFieldSimilarity(id=25223)
simScorer: instance of org.apache.lucene.search.similarities.BM25Similarity$BM25Scorer(id=25224)
termStates: instance of org.apache.lucene.index.TermStates(id=25225)
scoreMode: instance of org.apache.lucene.search.ScoreMode(id=25226)
$assertionsDisabled: true
this$0: instance of org.apache.lucene.search.TermQuery(id=25227)
org.apache.lucene.search.Weight.parentQuery: instance of org.apache.lucene.search.TermQuery(id=25227)
}
+

搜索过程

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
elasticsearch[myhost][search_worker][T#5][1] list
246 // float. And then monotonicity is preserved through composition via
247 // x -> 1 + x and x -> 1 - 1/x.
248 // Finally we expand weight * (1 - 1 / (1 + freq * 1/norm)) to
249 // weight - weight / (1 + freq * 1/norm), which runs slightly faster.
250 => float normInverse = cache[((byte) encodedNorm) & 0xFF];
251 return weight - weight / (1f + freq * normInverse);
252 }
253
254 @Override
255 public Explanation explain(Explanation freq, long encodedNorm) {
elasticsearch[myhost][search_worker][T#5][1] where
[1] org.apache.lucene.search.similarities.BM25Similarity$BM25Scorer.score (BM25Similarity.java:250)
[2] org.apache.lucene.search.LeafSimScorer.score (LeafSimScorer.java:60)
[3] org.apache.lucene.search.TermScorer.score (TermScorer.java:75)
[4] org.apache.lucene.search.TopScoreDocCollector$SimpleTopScoreDocCollector$1.collect (TopScoreDocCollector.java:73)
[5] org.apache.lucene.search.Weight$DefaultBulkScorer.scoreRange (Weight.java:274)
[6] org.apache.lucene.search.Weight$DefaultBulkScorer.score (Weight.java:254)
[7] org.elasticsearch.search.internal.CancellableBulkScorer.score (CancellableBulkScorer.java:45)
[8] org.apache.lucene.search.BulkScorer.score (BulkScorer.java:38)
[9] org.elasticsearch.search.internal.ContextIndexSearcher.searchLeaf (ContextIndexSearcher.java:538)
[10] org.elasticsearch.search.internal.ContextIndexSearcher.search (ContextIndexSearcher.java:480)
[11] org.elasticsearch.search.internal.ContextIndexSearcher.lambda$search$4 (ContextIndexSearcher.java:396)
[12] org.elasticsearch.search.internal.ContextIndexSearcher$$Lambda$7626/0x00007fceb12a4e58.call (null)
[13] java.util.concurrent.FutureTask.run (FutureTask.java:317)
[14] org.elasticsearch.common.util.concurrent.TimedRunnable.doRun (TimedRunnable.java:33)
[15] org.elasticsearch.common.util.concurrent.ThreadContext$ContextPreservingAbstractRunnable.doRun (ThreadContext.java:983)
[16] org.elasticsearch.common.util.concurrent.AbstractRunnable.run (AbstractRunnable.java:26)
[17] java.util.concurrent.ThreadPoolExecutor.runWorker (ThreadPoolExecutor.java:1,144)
[18] java.util.concurrent.ThreadPoolExecutor$Worker.run (ThreadPoolExecutor.java:642)
[19] java.lang.Thread.runWith (Thread.java:1,636)
[20] java.lang.Thread.run (Thread.java:1,623)
+ +

es 常用 crud

    +
  • 写入

    +
    1
    curl -XPOST http://localhost:9200/test/_doc -H "Content-Type: application/json" -d  '{"name":"John Smith","age":"38"}'
    +
  • +
  • 查询

    +
    1
    curl -X GET  -H "Content-Type: application/json"    http://localhost:9200/test/_search -d '{"query":{"match":{"age":"38"}}}'
  • +
+

相关阅读

+ +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git "a/2023/09/19/nginx-temp-proxy-\346\235\203\351\231\220\345\257\274\350\207\264\346\212\245\351\224\231/index.html" "b/2023/09/19/nginx-temp-proxy-\346\235\203\351\231\220\345\257\274\350\207\264\346\212\245\351\224\231/index.html" new file mode 100644 index 0000000000..f36a9a8d07 --- /dev/null +++ "b/2023/09/19/nginx-temp-proxy-\346\235\203\351\231\220\345\257\274\350\207\264\346\212\245\351\224\231/index.html" @@ -0,0 +1,467 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + nginx temp_proxy 权限导致报错 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ nginx temp_proxy 权限导致报错 +

+ + +
+ + + + +
+ + +

背景

线上有个服务,本来请求路径是
PHP -> NGINX -> PHP
需要做项目迁移,迁移到
PHP-> NGINX -> JAVA, 也就是将请求的服务从php改成请求java

+

排查

错误复现

上线后php请求java的接口报错cURL error 18: transfer closed with outstanding read data remaining , 排查之后发现java有个socket rest

+

排查java 错误

查看java日志,线上发现有rst错误,请求只有不够1秒,但是被rst掉了,所以排除了接口太慢导致超时的问题

+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
org.apache.catalina.connector.ClientAbortException: java.io.IOException: Connection reset by peer

at org.apache.catalina.connector.OutputBuffer.realWriteBytes(OutputBuffer.java:353)

at org.apache.catalina.connector.OutputBuffer.flushByteBuffer(OutputBuffer.java:783)

at org.apache.catalina.connector.OutputBuffer.append(OutputBuffer.java:688)

at org.apache.catalina.connector.OutputBuffer.writeBytes(OutputBuffer.java:388)

at org.apache.catalina.connector.OutputBuffer.write(OutputBuffer.java:366)

at org.apache.catalina.connector.CoyoteOutputStream.write(CoyoteOutputStream.java:96)

at org.springframework.util.StreamUtils$NonClosingOutputStream.write(StreamUtils.java:287)

at com.fasterxml.jackson.core.json.UTF8JsonGenerator._flushBuffer(UTF8JsonGenerator.java:2161)

at com.fasterxml.jackson.core.json.UTF8JsonGenerator._writeStringSegment2(UTF8JsonGenerator.java:1476)

at com.fasterxml.jackson.core.json.UTF8JsonGenerator._writeStringSegment(UTF8JsonGenerator.java:1423)

at com.fasterxml.jackson.core.json.UTF8JsonGenerator._writeStringSegments(UTF8JsonGenerator.java:1306)

at com.fasterxml.jackson.core.json.UTF8JsonGenerator.writeString(UTF8JsonGenerator.java:502)

at com.fasterxml.jackson.databind.ser.std.StringSerializer.serialize(StringSerializer.java:41)

at com.fasterxml.jackson.databind.ser.BeanPropertyWriter.serializeAsField(BeanPropertyWriter.java:728)

at com.fasterxml.jackson.databind.ser.std.BeanSerializerBase.serializeFields(BeanSerializerBase.java:755)

at com.fasterxml.jackson.databind.ser.BeanSerializer.serialize(BeanSerializer.java:178)

at com.fasterxml.jackson.databind.ser.std.MapSerializer.serializeFields(MapSerializer.java:726)

at com.fasterxml.jackson.databind.ser.std.MapSerializer.serializeWithoutTypeInfo(MapSerializer.java:681)

at com.fasterxml.jackson.databind.ser.std.MapSerializer.serialize(MapSerializer.java:637)

at com.fasterxml.jackson.databind.ser.std.MapSerializer.serialize(MapSerializer.java:33)

at com.fasterxml.jackson.databind.ser.BeanPropertyWriter.serializeAsField(BeanPropertyWriter.java:728)

at com.fasterxml.jackson.databind.ser.std.BeanSerializerBase.serializeFields(BeanSerializerBase.java:755)

at com.fasterxml.jackson.databind.ser.BeanSerializer.serialize(BeanSerializer.java:178)

at com.fasterxml.jackson.databind.ser.impl.IndexedListSerializer.serializeContents(IndexedListSerializer.java:119)

at com.fasterxml.jackson.databind.ser.impl.IndexedListSerializer.serialize(IndexedListSerializer.java:79)

at com.fasterxml.jackson.databind.ser.impl.IndexedListSerializer.serialize(IndexedListSerializer.java:18)

at com.fasterxml.jackson.databind.ser.BeanPropertyWriter.serializeAsField(BeanPropertyWriter.java:728)

at com.fasterxml.jackson.databind.ser.std.BeanSerializerBase.serializeFields(BeanSerializerBase.java:755)

at com.fasterxml.jackson.databind.ser.BeanSerializer.serialize(BeanSerializer.java:178)

at com.fasterxml.jackson.databind.ser.DefaultSerializerProvider._serialize(DefaultSerializerProvider.java:480)

at com.fasterxml.jackson.databind.ser.DefaultSerializerProvider.serializeValue(DefaultSerializerProvider.java:319)

at com.fasterxml.jackson.databind.ObjectWriter$Prefetch.serialize(ObjectWriter.java:1516)

at com.fasterxml.jackson.databind.ObjectWriter.writeValue(ObjectWriter.java:1006)

at org.springframework.http.converter.json.AbstractJackson2HttpMessageConverter.writeInternal(AbstractJackson2HttpMessageConverter.java:346)

at org.springframework.http.converter.AbstractGenericHttpMessageConverter.write(AbstractGenericHttpMessageConverter.java:104)

at org.springframework.web.servlet.mvc.method.annotation.AbstractMessageConverterMethodProcessor.writeWithMessageConverters(AbstractMessageConverterMethodProcessor.java:277)

at org.springframework.web.servlet.mvc.method.annotation.RequestResponseBodyMethodProcessor.handleReturnValue(RequestResponseBodyMethodProcessor.java:181)

at org.springframework.web.method.support.HandlerMethodReturnValueHandlerComposite.handleReturnValue(HandlerMethodReturnValueHandlerComposite.java:82)

at org.springframework.web.servlet.mvc.method.annotation.ServletInvocableHandlerMethod.invokeAndHandle(ServletInvocableHandlerMethod.java:123)

at org.springframework.web.servlet.mvc.method.annotation.RequestMappingHandlerAdapter.invokeHandlerMethod(RequestMappingHandlerAdapter.java:878)

at org.springframework.web.servlet.mvc.method.annotation.RequestMappingHandlerAdapter.handleInternal(RequestMappingHandlerAdapter.java:792)

at org.springframework.web.servlet.mvc.method.AbstractHandlerMethodAdapter.handle(AbstractHandlerMethodAdapter.java:87)

at org.springframework.web.servlet.DispatcherServlet.doDispatch(DispatcherServlet.java:1040)

at org.springframework.web.servlet.DispatcherServlet.doService(DispatcherServlet.java:943)

at org.springframework.web.servlet.FrameworkServlet.processRequest(FrameworkServlet.java:1006)

at org.springframework.web.servlet.FrameworkServlet.doPost(FrameworkServlet.java:909)

at javax.servlet.http.HttpServlet.service(HttpServlet.java:652)

at org.springframework.web.servlet.FrameworkServlet.service(FrameworkServlet.java:883)

at javax.servlet.http.HttpServlet.service(HttpServlet.java:733)

at org.apache.catalina.core.ApplicationFilterChain.internalDoFilter(ApplicationFilterChain.java:227)

at org.apache.catalina.core.ApplicationFilterChain.doFilter(ApplicationFilterChain.java:162)

at org.apache.tomcat.websocket.server.WsFilter.doFilter(WsFilter.java:53)

at org.apache.catalina.core.ApplicationFilterChain.internalDoFilter(ApplicationFilterChain.java:189)

at org.apache.catalina.core.ApplicationFilterChain.doFilter(ApplicationFilterChain.java:162)

at org.springframework.web.filter.CharacterEncodingFilter.doFilterInternal(CharacterEncodingFilter.java:201)

at org.springframework.web.filter.OncePerRequestFilter.doFilter(OncePerRequestFilter.java:119)

at org.apache.catalina.core.ApplicationFilterChain.internalDoFilter(ApplicationFilterChain.java:189)

at org.apache.catalina.core.ApplicationFilterChain.doFilter(ApplicationFilterChain.java:162)

at org.apache.catalina.core.StandardWrapperValve.invoke(StandardWrapperValve.java:202)

at org.apache.catalina.core.StandardContextValve.invoke(StandardContextValve.java:97)

at org.apache.catalina.authenticator.AuthenticatorBase.invoke(AuthenticatorBase.java:542)

at org.apache.catalina.core.StandardHostValve.invoke(StandardHostValve.java:143)

at org.apache.catalina.valves.ErrorReportValve.invoke(ErrorReportValve.java:92)

at org.apache.catalina.core.StandardEngineValve.invoke(StandardEngineValve.java:78)

at org.apache.catalina.connector.CoyoteAdapter.service(CoyoteAdapter.java:357)

at org.apache.coyote.http11.Http11Processor.service(Http11Processor.java:374)

at org.apache.coyote.AbstractProcessorLight.process(AbstractProcessorLight.java:65)

at org.apache.coyote.AbstractProtocol$ConnectionHandler.process(AbstractProtocol.java:893)

at org.apache.tomcat.util.net.NioEndpoint$SocketProcessor.doRun(NioEndpoint.java:1707)

at org.apache.tomcat.util.net.SocketProcessorBase.run(SocketProcessorBase.java:49)

at java.base/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1128)

at java.base/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:628)

at org.apache.tomcat.util.threads.TaskThread$WrappingRunnable.run(TaskThread.java:61)

at java.base/java.lang.Thread.run(Thread.java:834)

Caused by: java.io.IOException: Connection reset by peer

at java.base/sun.nio.ch.FileDispatcherImpl.write0(Native Method)

at java.base/sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:47)

at java.base/sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:113)

at java.base/sun.nio.ch.IOUtil.write(IOUtil.java:79)

at java.base/sun.nio.ch.IOUtil.write(IOUtil.java:50)

at java.base/sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:466)

at org.apache.tomcat.util.net.NioChannel.write(NioChannel.java:135)

at org.apache.tomcat.util.net.NioBlockingSelector.write(NioBlockingSelector.java:118)

at org.apache.tomcat.util.net.NioSelectorPool.write(NioSelectorPool.java:151)

at org.apache.tomcat.util.net.NioEndpoint$NioSocketWrapper.doWrite(NioEndpoint.java:1367)

at org.apache.tomcat.util.net.SocketWrapperBase.doWrite(SocketWrapperBase.java:766)

at org.apache.tomcat.util.net.SocketWrapperBase.writeBlocking(SocketWrapperBase.java:586)

at org.apache.tomcat.util.net.SocketWrapperBase.write(SocketWrapperBase.java:530)

at org.apache.coyote.http11.Http11OutputBuffer$SocketOutputBuffer.doWrite(Http11OutputBuffer.java:546)

at org.apache.coyote.http11.filters.IdentityOutputFilter.doWrite(IdentityOutputFilter.java:84)

at org.apache.coyote.http11.Http11OutputBuffer.doWrite(Http11OutputBuffer.java:193)

at org.apache.coyote.Response.doWrite(Response.java:606)

at org.apache.catalina.connector.OutputBuffer.realWriteBytes(OutputBuffer.java:340)

... 73 common frames omitted

+

排查nginx日志

proxy_temp permission denied , 所以问题找到了,就是nginx权限有问题,没有权限创建proxy_temp 文件

+

原因: 每个请求都会分配一个页的缓冲区.如果超过一个页8kb/16kb,就会将内容存到proxy_temp文件里面,因为没有权限创建这个文件,导致请求直接被断开

+

+

解决方式

当前方案:
修改到当前nginx的用户组可以创建proxy_temp文件

+

可选方案:
如果不想写文件,嫌弃写文件有性能问题,可以调节

+
    +
  • proxy_max_temp_file_size
  • +
  • proxy_buffer_size
  • +
  • proxy_buffers
  • +
+
1
When buffering is enabled, nginx receives a response from the proxied server as soon as possible, saving it into the buffers set by the proxy_buffer_size and proxy_buffers directives. If the whole response does not fit into memory, a part of it can be saved to a temporary file on the disk. Writing to temporary files is controlled by the proxy_max_temp_file_size and proxy_temp_file_write_size directives.
+ + + +

上线后:

+

相关阅读

+ +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git "a/2023/09/21/java-\344\270\232\345\212\241oom\346\216\222\346\237\245/index.html" "b/2023/09/21/java-\344\270\232\345\212\241oom\346\216\222\346\237\245/index.html" new file mode 100644 index 0000000000..f9c0c2a44f --- /dev/null +++ "b/2023/09/21/java-\344\270\232\345\212\241oom\346\216\222\346\237\245/index.html" @@ -0,0 +1,458 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + java 业务oom排查 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ java 业务oom排查 +

+ + +
+ + + + +
+ + +

背景

    +
  • jdk版本:jdk11
  • +
+

之前会请求一个php的内部商品接口服务,现在切换成java的商品接口服务,但是java的代码很多边界有问题,导致oom

+

现象

cpu 飙升100% ,内存飙升100%,然后直接挂了

+

+

排查

开始排查

日志:显示是oom

+

开始发现没有dump文件,添加dump文件

+
1
-XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=/home/admin/logs/jvmlogs/java.hprof
+ +

然后分析,整个堆有个400m和100m的大对象,整个堆也就1G,这两个对象就已经占了50%+ 了

+

+

最后确认是sql有问题,把整个表都查出来了

+ +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git "a/2023/09/22/java-oom-hprof\346\226\207\344\273\266\347\224\237\346\210\220\346\227\266\346\234\272/index.html" "b/2023/09/22/java-oom-hprof\346\226\207\344\273\266\347\224\237\346\210\220\346\227\266\346\234\272/index.html" new file mode 100644 index 0000000000..c401abccc8 --- /dev/null +++ "b/2023/09/22/java-oom-hprof\346\226\207\344\273\266\347\224\237\346\210\220\346\227\266\346\234\272/index.html" @@ -0,0 +1,465 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + java oom hprof文件生成时机 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ java oom hprof文件生成时机 +

+ + +
+ + + + +
+ + +

背景

1 有次排查oom问题,发现没有对应的目录,oom后会不生成hprof的dump文件
2 oom后被try catch 后依然可以生成dump的prof文件,所以不是在退出生成hprof文件的,而是在生成这个异常的时候生成dump的hprof文件的

+

代码

java -Xms50m -Xmx50m -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=/home/admin/logs/jvmlogs/java.hprof Main.java

+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
import java.util.ArrayList;
import java.util.List;

public class Main {
public static void main(String[] args) {
List<Object> tem = new ArrayList<>();

boolean test = true;
try {
while (test) {
tem.add(new int[10000000]);
}
}catch (Throwable table){
System.out.println("oom test");
}
System.out.println("afasdfadsf");
}
}
+ +

堆栈

+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
Thread 2 "java" hit Breakpoint 1, HeapDumper::dump (this=this@entry=0x7ffff7bfe090, path=path@entry=0x7ffff0803c20 "/home/ubuntu/fasdfd/fadf", out=0x7ffff0000b80, compression=0, overwrite=overwrite@entry=false, num_dump_threads=num_dump_threads@entry=1) at /home/ubuntu/jdk/src/hotspot/share/services/heapDumper.cpp:2383
2383 int HeapDumper::dump(const char* path, outputStream* out, int compression, bool overwrite, uint num_dump_threads) {
(gdb) bt
#0 HeapDumper::dump (this=this@entry=0x7ffff7bfe090, path=path@entry=0x7ffff0803c20 "/home/ubuntu/fasdfd/fadf", out=0x7ffff0000b80, compression=0, overwrite=overwrite@entry=false, num_dump_threads=num_dump_threads@entry=1)
at /home/ubuntu/jdk/src/hotspot/share/services/heapDumper.cpp:2383
#1 0x00007ffff65473a8 in HeapDumper::dump_heap (oome=oome@entry=true) at /home/ubuntu/jdk/src/hotspot/share/services/heapDumper.cpp:2573
#2 0x00007ffff654750e in HeapDumper::dump_heap_from_oome () at /home/ubuntu/jdk/src/hotspot/share/services/heapDumper.cpp:2487
#3 0x00007ffff61e9c78 in report_java_out_of_memory (message=message@entry=0x7ffff75a0d5e "Java heap space") at /home/ubuntu/jdk/src/hotspot/share/utilities/debug.cpp:356
#4 0x00007ffff6c3760d in MemAllocator::Allocation::check_out_of_memory (this=this@entry=0x7ffff7bfe1b0) at /home/ubuntu/jdk/src/hotspot/share/gc/shared/memAllocator.cpp:126
#5 0x00007ffff6c3aac6 in MemAllocator::Allocation::~Allocation (this=0x7ffff7bfe1b0, __in_chrg=<optimized out>) at /home/ubuntu/jdk/src/hotspot/share/gc/shared/memAllocator.cpp:83
#6 MemAllocator::allocate (this=this@entry=0x7ffff7bfe280) at /home/ubuntu/jdk/src/hotspot/share/gc/shared/memAllocator.cpp:375
#7 0x00007ffff72658e7 in CollectedHeap::array_allocate (__the_thread__=0x7ffff0029850, do_zero=true, length=10000000, size=<optimized out>, klass=0x100040d90, this=<optimized out>) at /home/ubuntu/jdk/src/hotspot/share/gc/shared/collectedHeap.inline.hpp:41
#8 TypeArrayKlass::allocate_common (this=this@entry=0x100040d90, length=length@entry=10000000, do_zero=do_zero@entry=true, __the_thread__=__the_thread__@entry=0x7ffff0029850) at /home/ubuntu/jdk/src/hotspot/share/oops/typeArrayKlass.cpp:93
#9 0x00007ffff6d861d9 in TypeArrayKlass::allocate (__the_thread__=0x7ffff0029850, length=10000000, this=<optimized out>) at /home/ubuntu/jdk/src/hotspot/share/oops/typeArrayKlass.hpp:68
#10 oopFactory::new_typeArray (type=type@entry=T_INT, length=length@entry=10000000, __the_thread__=__the_thread__@entry=0x7ffff0029850) at /home/ubuntu/jdk/src/hotspot/share/memory/oopFactory.cpp:93
#11 0x00007ffff662b51a in InterpreterRuntime::newarray (current=0x7ffff0029850, type=T_INT, size=10000000) at /home/ubuntu/jdk/src/hotspot/share/interpreter/interpreterRuntime.cpp:248
#12 0x00007fffe856be1a in ?? ()
#13 0x00007fffe856bd91 in ?? ()
#14 0x00000000fcf98230 in ?? ()
#15 0x00007ffff7bfe3e0 in ?? ()
#16 0x00007fffc9014349 in ?? ()
#17 0x00007ffff7bfe450 in ?? ()
#18 0x00007fffc9014408 in ?? ()
#19 0x0000000000000000 in ?? ()
+ + +

核心函数是

+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
void report_java_out_of_memory(const char* message) {
static int out_of_memory_reported = 0;

if (Atomic::cmpxchg(&out_of_memory_reported, 0, 1) == 0) {
// create heap dump before OnOutOfMemoryError commands are executed
if (HeapDumpOnOutOfMemoryError) {
tty->print_cr("java.lang.OutOfMemoryError: %s", message);
HeapDumper::dump_heap_from_oome(); // 生成hprof 文件
}

if (OnOutOfMemoryError && OnOutOfMemoryError[0]) {
VMError::report_java_out_of_memory(message); // 生成错误信息
}

if (CrashOnOutOfMemoryError) {
tty->print_cr("Aborting due to java.lang.OutOfMemoryError: %s", message);
report_fatal(OOM_JAVA_HEAP_FATAL, __FILE__, __LINE__, "OutOfMemory encountered: %s", message); // catch 导致的
}

if (ExitOnOutOfMemoryError) {
tty->print_cr("Terminating due to java.lang.OutOfMemoryError: %s", message);
os::_exit(3); // quick exit with no cleanup hooks run
}
}
}
+ + +

如何打开dump文件的

1
2
3
4
5
6
7
8
9
10
11
char const* FileWriter::open_writer() {
assert(_fd < 0, "Must not already be open");

_fd = os::create_binary_file(_path, _overwrite);

if (_fd < 0) {
return os::strerror(errno);
}

return NULL;
}
+

最后调用的是linux 的库函数open64

+
1
2
3
4
5
6
7
// jdk/src/hotspot/os/linux/os_linux.cpp
// create binary file, rewriting existing file if required
int os::create_binary_file(const char* path, bool rewrite_existing) {
int oflags = O_WRONLY | O_CREAT;
oflags |= rewrite_existing ? O_TRUNC : O_EXCL;
return ::open64(path, oflags, S_IREAD | S_IWRITE);
}
+ +

dump的目录一定要存在,不存在也不会检查

+

生成hprof文件和exception的时机

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
bool MemAllocator::Allocation::check_out_of_memory() {
JavaThread* THREAD = _thread; // For exception macros.
assert(!HAS_PENDING_EXCEPTION, "Unexpected exception, will result in uninitialized storage");

if (obj() != NULL) {
return false;
}

const char* message = _overhead_limit_exceeded ? "GC overhead limit exceeded" : "Java heap space";
if (!_thread->in_retryable_allocation()) {
// -XX:+HeapDumpOnOutOfMemoryError and -XX:OnOutOfMemoryError support
report_java_out_of_memory(message); ////////////// 生成hprof 文件 , 里面就是上面的一对内容

if (JvmtiExport::should_post_resource_exhausted()) {
JvmtiExport::post_resource_exhausted(
JVMTI_RESOURCE_EXHAUSTED_OOM_ERROR | JVMTI_RESOURCE_EXHAUSTED_JAVA_HEAP,
message);
}
oop exception = _overhead_limit_exceeded ?
Universe::out_of_memory_error_gc_overhead_limit() : // gc 超过limit 导致的oom的异常
Universe::out_of_memory_error_java_heap(); // 我们平常说的堆内存不足导致oom
THROW_OOP_(exception, true);
} else {
THROW_OOP_(Universe::out_of_memory_error_retry(), true);
}
}
+ +

所以是先生成dump文件,再抛异常

+

相关阅读

+ +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git "a/2023/10/16/clickhouse-cloud-dbeaver\350\277\236\346\216\245\344\270\215\344\270\212/index.html" "b/2023/10/16/clickhouse-cloud-dbeaver\350\277\236\346\216\245\344\270\215\344\270\212/index.html" new file mode 100644 index 0000000000..74902f42af --- /dev/null +++ "b/2023/10/16/clickhouse-cloud-dbeaver\350\277\236\346\216\245\344\270\215\344\270\212/index.html" @@ -0,0 +1,447 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + clickhouse cloud dbeaver连接不上 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ clickhouse cloud dbeaver连接不上 +

+ + +
+ + + + +
+ + +

背景

使用dbeaver 连接clickhouse cloud,连接不上

+

最后参考下面的链接内容

+

发现是要用https的端口,也就是8443 而不是默认的8123

+

而且database一栏是必填的,不然也会连不上

+

内容

+ +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git "a/2023/10/16/clickhouse-\347\211\251\345\214\226\350\247\206\345\233\276\345\222\214\344\275\215\345\233\276/index.html" "b/2023/10/16/clickhouse-\347\211\251\345\214\226\350\247\206\345\233\276\345\222\214\344\275\215\345\233\276/index.html" new file mode 100644 index 0000000000..53d97ea54f --- /dev/null +++ "b/2023/10/16/clickhouse-\347\211\251\345\214\226\350\247\206\345\233\276\345\222\214\344\275\215\345\233\276/index.html" @@ -0,0 +1,447 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + clickhouse 物化视图和位图 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ clickhouse 物化视图和位图 +

+ + +
+ + + + +
+ + +

背景

相关阅读

+ +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/2023/10/18/java-mybatis-plus-date-handler/index.html b/2023/10/18/java-mybatis-plus-date-handler/index.html new file mode 100644 index 0000000000..1d2d50552b --- /dev/null +++ b/2023/10/18/java-mybatis-plus-date-handler/index.html @@ -0,0 +1,442 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + java mybatis-plus date handler | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ java mybatis-plus date handler +

+ + +
+ + + + +
+ + +

背景

堆栈:

+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
setNonNullParameter:33, DateTypeHandler (org.apache.ibatis.type)
setNonNullParameter:28, DateTypeHandler (org.apache.ibatis.type)
setParameter:73, BaseTypeHandler (org.apache.ibatis.type)
setNonNullParameter:67, UnknownTypeHandler (org.apache.ibatis.type)
setParameter:73, BaseTypeHandler (org.apache.ibatis.type)
setParameters:232, MybatisParameterHandler (com.baomidou.mybatisplus.core)
parameterize:94, PreparedStatementHandler (org.apache.ibatis.executor.statement)
parameterize:64, RoutingStatementHandler (org.apache.ibatis.executor.statement)
invoke0:-1, NativeMethodAccessorImpl (jdk.internal.reflect)
invoke:62, NativeMethodAccessorImpl (jdk.internal.reflect)
invoke:43, DelegatingMethodAccessorImpl (jdk.internal.reflect)
invoke:566, Method (java.lang.reflect)
invoke:64, Plugin (org.apache.ibatis.plugin)
parameterize:-1, $Proxy255 (com.sun.proxy)
prepareStatement:88, SimpleExecutor (org.apache.ibatis.executor)
doQuery:62, SimpleExecutor (org.apache.ibatis.executor)
queryFromDatabase:325, BaseExecutor (org.apache.ibatis.executor)
query:156, BaseExecutor (org.apache.ibatis.executor)
query:109, CachingExecutor (org.apache.ibatis.executor)
intercept:81, MybatisPlusInterceptor (com.baomidou.mybatisplus.extension.plugins)
invoke:62, Plugin (org.apache.ibatis.plugin)
query:-1, $Proxy254 (com.sun.proxy)
selectList:151, DefaultSqlSession (org.apache.ibatis.session.defaults)
selectList:145, DefaultSqlSession (org.apache.ibatis.session.defaults)
selectList:140, DefaultSqlSession (org.apache.ibatis.session.defaults)
invoke0:-1, NativeMethodAccessorImpl (jdk.internal.reflect)
invoke:62, NativeMethodAccessorImpl (jdk.internal.reflect)
invoke:43, DelegatingMethodAccessorImpl (jdk.internal.reflect)
invoke:566, Method (java.lang.reflect)
invoke:427, SqlSessionTemplate$SqlSessionInterceptor (org.mybatis.spring)
selectList:-1, $Proxy146 (com.sun.proxy)
selectList:224, SqlSessionTemplate (org.mybatis.spring)
executeForMany:166, MybatisMapperMethod (com.baomidou.mybatisplus.core.override)
execute:77, MybatisMapperMethod (com.baomidou.mybatisplus.core.override)
invoke:148, MybatisMapperProxy$PlainMethodInvoker (com.baomidou.mybatisplus.core.override)
invoke:89, MybatisMapperProxy (com.baomidou.mybatisplus.core.override)
getUserAndSkuByDay:-1, $Proxy215 (com.sun.proxy)
invoke0:-1, NativeMethodAccessorImpl (jdk.internal.reflect)
invoke:62, NativeMethodAccessorImpl (jdk.internal.reflect)
invoke:43, DelegatingMethodAccessorImpl (jdk.internal.reflect)
invoke:566, Method (java.lang.reflect)
invokeJoinpointUsingReflection:344, AopUtils (org.springframework.aop.support)
invokeJoinpoint:198, ReflectiveMethodInvocation (org.springframework.aop.framework)
proceed:163, ReflectiveMethodInvocation (org.springframework.aop.framework)
invoke:50, DynamicDataSourceAnnotationInterceptor (com.baomidou.dynamic.datasource.aop)
proceed:186, ReflectiveMethodInvocation (org.springframework.aop.framework)
invoke:212, JdkDynamicAopProxy (org.springframework.aop.framework)
getUserAndSkuByDay:-1, $Proxy216 (com.sun.proxy)
testQuery:23, CdpUserBehaviorDataMapperTest (com.patpat.mms.mdp.base.core.service.mapper.cdp)
invoke0:-1, NativeMethodAccessorImpl (jdk.internal.reflect)
invoke:62, NativeMethodAccessorImpl (jdk.internal.reflect)
invoke:43, DelegatingMethodAccessorImpl (jdk.internal.reflect)
invoke:566, Method (java.lang.reflect)
runReflectiveCall:59, FrameworkMethod$1 (org.junit.runners.model)
run:12, ReflectiveCallable (org.junit.internal.runners.model)
invokeExplosively:56, FrameworkMethod (org.junit.runners.model)
evaluate:17, InvokeMethod (org.junit.internal.runners.statements)
evaluate:74, RunBeforeTestExecutionCallbacks (org.springframework.test.context.junit4.statements)
evaluate:84, RunAfterTestExecutionCallbacks (org.springframework.test.context.junit4.statements)
evaluate:75, RunBeforeTestMethodCallbacks (org.springframework.test.context.junit4.statements)
evaluate:86, RunAfterTestMethodCallbacks (org.springframework.test.context.junit4.statements)
evaluate:84, SpringRepeat (org.springframework.test.context.junit4.statements)
runLeaf:366, ParentRunner (org.junit.runners)
runChild:251, SpringJUnit4ClassRunner (org.springframework.test.context.junit4)
runChild:97, SpringJUnit4ClassRunner (org.springframework.test.context.junit4)
run:331, ParentRunner$4 (org.junit.runners)
schedule:79, ParentRunner$1 (org.junit.runners)
runChildren:329, ParentRunner (org.junit.runners)
access$100:66, ParentRunner (org.junit.runners)
evaluate:293, ParentRunner$2 (org.junit.runners)
evaluate:61, RunBeforeTestClassCallbacks (org.springframework.test.context.junit4.statements)
evaluate:70, RunAfterTestClassCallbacks (org.springframework.test.context.junit4.statements)
evaluate:306, ParentRunner$3 (org.junit.runners)
run:413, ParentRunner (org.junit.runners)
run:190, SpringJUnit4ClassRunner (org.springframework.test.context.junit4)
run:137, JUnitCore (org.junit.runner)
startRunnerWithArgs:69, JUnit4IdeaTestRunner (com.intellij.junit4)
execute:38, IdeaTestRunner$Repeater$1 (com.intellij.rt.junit)
repeat:11, TestsRepeater (com.intellij.rt.execution.junit)
startRunnerWithArgs:35, IdeaTestRunner$Repeater (com.intellij.rt.junit)
prepareStreamsAndStart:232, JUnitStarter (com.intellij.rt.junit)
main:55, JUnitStarter (com.intellij.rt.junit)
+ +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/2023/10/19/lsmtree/index.html b/2023/10/19/lsmtree/index.html new file mode 100644 index 0000000000..4ba7e03390 --- /dev/null +++ b/2023/10/19/lsmtree/index.html @@ -0,0 +1,445 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + lsmtree | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + + + + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/2023/10/20/simpleDatetimeformatter-vs-datetimeformatter/index.html b/2023/10/20/simpleDatetimeformatter-vs-datetimeformatter/index.html new file mode 100644 index 0000000000..e7f26f61f5 --- /dev/null +++ b/2023/10/20/simpleDatetimeformatter-vs-datetimeformatter/index.html @@ -0,0 +1,446 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + simpleDatetimeformatter vs datetimeformatter | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ simpleDatetimeformatter vs datetimeformatter +

+ + +
+ + + + +
+ + +

SimpleDatetimeFormat 线程不安全是因为这个format持有一个对象,这个对象会被多个线程修改

+

DateTimeFormatter 线程安全是因为是一个immutable , 不变的量在不同线程是不会有线程安全问题

+

相关阅读

+ +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/2023/10/25/bm25-and-search/index.html b/2023/10/25/bm25-and-search/index.html new file mode 100644 index 0000000000..4c0d29f4dd --- /dev/null +++ b/2023/10/25/bm25-and-search/index.html @@ -0,0 +1,443 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + bm25 and search | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ bm25 and search +

+ + +
+ + + + +
+ + +

背景

搜索往往需要排序,对不同文档做排序,有很多很多模型。其中一个模型叫BM25

+

在lucene里面,实现类是
lucene/core/src/java/org/apache/lucene/search/similarities/BM25Similarity.java

+

相关阅读

+ +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/2023/10/25/fst/index.html b/2023/10/25/fst/index.html new file mode 100644 index 0000000000..3849515961 --- /dev/null +++ b/2023/10/25/fst/index.html @@ -0,0 +1,554 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + fst | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ fst +

+ + +
+ + + + +
+ + +

背景

FST 即finite state machine,lucene很多内容都是用这个格式压缩和存储的.

+

fst 例子

介绍FST之前,先看看Hashmap.

+

HashMap的语义: key-> value , 也就是输入一个key,返回一个value

+

FST结构也是一个特别的Map, 语义和Map差不多:FST(key)=value

+

例子

有下面组词汇的数组[cat:5,dog:7,dogs:13]

+
    +
  • key为cat,value为5
  • +
  • key为dog,value为7
  • +
  • key为dogs,value为13
  • +
+

最后会被序列化成这个结构

+
1
[0, 116, 15, 97, 6, 5, 115, 31, 103, 7, 111, 6, 7, 100, 22, 4, 5, 99, 16]
+ + +

下面来分析这个例子的每个字节

+ + + + + + + + + + + + + + + +
103, 7111, 67, 100, 224, 5, 99, 16
flag=7,value:’g’也就是103,target:7,nextArch=7flag=6,value:’0’也就是111,target:9,nextArch=9flag=22 , value=’d’ 也就是100,output=7,target=11(为什么是11 ?因为7前面就是pos=11,nextArc=11)flag=16 , value:’c’也就是99, output =5,target=4,nextArc=14
+
1
2
3
[0, 116, 15,| 97, 6,  |5, 115, 31, |103, 7,  |111, 6,  |7, 100, 22,| 4, 5, 99, 16]
--------| ------- | -----------| ------ | ------- |--------- | -------------
(t,null)| (a,null)| (s,5) | (g,null)| (o,null)| (d:7) | (output =5,target=4,flag=16 value:'c')
+ +

常量解释:

+ + + + + + + + + + + + + + + + + + + + + + + +
常量描述
BIT_LAST_ARC1>>1描述该弧是最后一个弧,类似:二叉树右子节点;或者类似于三叉树的第三个节点
BIT_ARC_HAS_OUTPUT1>>4有output,也就是这个节点存了一些值
BIT_TARGET_NEXT1>>2表示该节点的下一个节点就是下一个bit,不需要在另外存了,也就是这个弧的两个节点是存在一起的
+

arc class分析

arc class 源码如下:

+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
  public static final class Arc<T> {

// *** Arc fields.

private int label;

private T output;

private long target;

private byte flags;

private T nextFinalOutput;

private long nextArc;

private byte nodeFlags;

// *** Fields for arcs belonging to a node with fixed length arcs.
// So only valid when bytesPerArc != 0.
// nodeFlags == ARCS_FOR_BINARY_SEARCH || nodeFlags == ARCS_FOR_DIRECT_ADDRESSING.

private int bytesPerArc;

private long posArcsStart;

private int arcIdx;

private int numArcs;

// *** Fields for a direct addressing node. nodeFlags == ARCS_FOR_DIRECT_ADDRESSING.

/**
* Start position in the {@link FST.BytesReader} of the presence bits for a direct addressing
* node, aka the bit-table
*/
private long bitTableStart;

/** First label of a direct addressing node. */
private int firstLabel;

/**
* Index of the current label of a direct addressing node. While {@link #arcIdx} is the current
* index in the label range, {@link #presenceIndex} is its corresponding index in the list of
* actually present labels. It is equal to the number of bits set before the bit at {@link
* #arcIdx} in the bit-table. This field is a cache to avoid to count bits set repeatedly when
* iterating the next arcs.
*/
private int presenceIndex;
}
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
字段描述
label如果用map的key value来举例 , label就是key的一截 , 多个lebel会组成一个key , 举例 “cat” 会拆分成三个label : “c” , “a”, “t”
output如果是用map的key value来举例 , output就是value的一截,多个output会组成一个value
target描述的是下一个节点的偏移量,一个弧度如果是src -> dst 这样结构的话 , target 就是dst 的位置 也就是 arr[target] 就是dst 的节点的位置
flags各种奇奇怪怪的标志位来标识这个弧的状态,用位图来将各种状态压缩
nextFinalOutput前面说了,如果这个key value 结构 , 这个描述的是value的最后一截 ,否则就是null
nextArc描述的是多个弧,就像一个多叉树的兄弟节点,这个是描述下一个兄弟节点的偏移位置
numArcs描述的是这个阶段有多少个弧,也就是这个节点有多少个子节点
+

写入过程:

+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
add:473, FSTCompiler (org.apache.lucene.util.fst)
compileIndex:504, Lucene90BlockTreeTermsWriter$PendingBlock (org.apache.lucene.codecs.lucene90.blocktree)
writeBlocks:725, Lucene90BlockTreeTermsWriter$TermsWriter (org.apache.lucene.codecs.lucene90.blocktree)
finish:1105, Lucene90BlockTreeTermsWriter$TermsWriter (org.apache.lucene.codecs.lucene90.blocktree)
write:370, Lucene90BlockTreeTermsWriter (org.apache.lucene.codecs.lucene90.blocktree)
write:172, PerFieldPostingsFormat$FieldsWriter (org.apache.lucene.codecs.perfield)
flush:135, FreqProxTermsWriter (org.apache.lucene.index)
flush:310, IndexingChain (org.apache.lucene.index)
flush:392, DocumentsWriterPerThread (org.apache.lucene.index)
doFlush:492, DocumentsWriter (org.apache.lucene.index)
flushAllThreads:671, DocumentsWriter (org.apache.lucene.index)
doFlush:4194, IndexWriter (org.apache.lucene.index)
flush:4168, IndexWriter (org.apache.lucene.index)
shutdown:1322, IndexWriter (org.apache.lucene.index)
close:1362, IndexWriter (org.apache.lucene.index)
doTestSearch:133, FstTest (com.dinosaur.lucene.demo)
+ + +
1
2
3
4
5
6
7
8
9
10
11
12
findTargetArc:1418, FST (org.apache.lucene.util.fst)
seekExact:511, SegmentTermsEnum (org.apache.lucene.codecs.lucene90.blocktree)
loadTermsEnum:111, TermStates (org.apache.lucene.index)
build:96, TermStates (org.apache.lucene.index)
createWeight:227, TermQuery (org.apache.lucene.search)
createWeight:904, IndexSearcher (org.apache.lucene.search)
search:687, IndexSearcher (org.apache.lucene.search)
searchAfter:523, IndexSearcher (org.apache.lucene.search)
search:538, IndexSearcher (org.apache.lucene.search)
doPagingSearch:158, SearchFiles (com.dinosaur.lucene.demo)
testSearch:128, SearchFiles (com.dinosaur.lucene.demo)

+ + +

跳转内容

如何跳转

+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
public void decodeMetaData() throws IOException {

// if (DEBUG) System.out.println("\nBTTR.decodeMetadata seg=" + segment + " mdUpto=" +
// metaDataUpto + " vs termBlockOrd=" + state.termBlockOrd);

// lazily catch up on metadata decode:
final int limit = getTermBlockOrd();
boolean absolute = metaDataUpto == 0;
assert limit > 0;

// TODO: better API would be "jump straight to term=N"???
while (metaDataUpto < limit) {

// TODO: we could make "tiers" of metadata, ie,
// decode docFreq/totalTF but don't decode postings
// metadata; this way caller could get
// docFreq/totalTF w/o paying decode cost for
// postings

// TODO: if docFreq were bulk decoded we could
// just skipN here:
if (statsSingletonRunLength > 0) {
state.docFreq = 1;
state.totalTermFreq = 1;
statsSingletonRunLength--;
} else {
int token = statsReader.readVInt();
if ((token & 1) == 1) {
state.docFreq = 1;
state.totalTermFreq = 1;
statsSingletonRunLength = token >>> 1;
} else {
state.docFreq = token >>> 1;
if (ste.fr.fieldInfo.getIndexOptions() == IndexOptions.DOCS) {
state.totalTermFreq = state.docFreq;
} else {
state.totalTermFreq = state.docFreq + statsReader.readVLong();
}
}
}

// metadata
ste.fr.parent.postingsReader.decodeTerm(bytesReader, ste.fr.fieldInfo, state, absolute);

metaDataUpto++;
absolute = false;
}
state.termBlockOrd = metaDataUpto;
}
+ + + + + + +

相关阅读

+ +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/2023/10/25/java-wait-notify/index.html b/2023/10/25/java-wait-notify/index.html new file mode 100644 index 0000000000..a8548805bb --- /dev/null +++ b/2023/10/25/java-wait-notify/index.html @@ -0,0 +1,463 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + java-wait-notify | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ java-wait-notify +

+ + +
+ + + + +
+ + +

背景

java的线程间通信,偶尔会用到wait和notify

+

实现

注册:

+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
// Register native methods of Object
void java_lang_Object::register_natives(TRAPS) {
InstanceKlass* obj = vmClasses::Object_klass();
Method::register_native(obj, vmSymbols::hashCode_name(),
vmSymbols::void_int_signature(), (address) &JVM_IHashCode, CHECK);
Method::register_native(obj, vmSymbols::wait_name(),
vmSymbols::long_void_signature(), (address) &JVM_MonitorWait, CHECK);
Method::register_native(obj, vmSymbols::notify_name(),
vmSymbols::void_method_signature(), (address) &JVM_MonitorNotify, CHECK);
Method::register_native(obj, vmSymbols::notifyAll_name(),
vmSymbols::void_method_signature(), (address) &JVM_MonitorNotifyAll, CHECK);
Method::register_native(obj, vmSymbols::clone_name(),
vmSymbols::void_object_signature(), (address) &JVM_Clone, THREAD);
}
+ +
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
// -----------------------------------------------------------------------------
// Wait/Notify/NotifyAll
//
// Note: a subset of changes to ObjectMonitor::wait()
// will need to be replicated in complete_exit
void ObjectMonitor::wait(jlong millis, bool interruptible, TRAPS) {
JavaThread* current = THREAD;

assert(InitDone, "Unexpectedly not initialized");

CHECK_OWNER(); // Throws IMSE if not owner.

EventJavaMonitorWait event;

// check for a pending interrupt
if (interruptible && current->is_interrupted(true) && !HAS_PENDING_EXCEPTION) {
// post monitor waited event. Note that this is past-tense, we are done waiting.
if (JvmtiExport::should_post_monitor_waited()) {
// Note: 'false' parameter is passed here because the
// wait was not timed out due to thread interrupt.
JvmtiExport::post_monitor_waited(current, this, false);

// In this short circuit of the monitor wait protocol, the
// current thread never drops ownership of the monitor and
// never gets added to the wait queue so the current thread
// cannot be made the successor. This means that the
// JVMTI_EVENT_MONITOR_WAITED event handler cannot accidentally
// consume an unpark() meant for the ParkEvent associated with
// this ObjectMonitor.
}
if (event.should_commit()) {
post_monitor_wait_event(&event, this, 0, millis, false);
}
THROW(vmSymbols::java_lang_InterruptedException());
return;
}

assert(current->_Stalled == 0, "invariant");
current->_Stalled = intptr_t(this);
current->set_current_waiting_monitor(this);

// create a node to be put into the queue
// Critically, after we reset() the event but prior to park(), we must check
// for a pending interrupt.
ObjectWaiter node(current);
node.TState = ObjectWaiter::TS_WAIT;
current->_ParkEvent->reset();
OrderAccess::fence(); // ST into Event; membar ; LD interrupted-flag

// Enter the waiting queue, which is a circular doubly linked list in this case
// but it could be a priority queue or any data structure.
// _WaitSetLock protects the wait queue. Normally the wait queue is accessed only
// by the the owner of the monitor *except* in the case where park()
// returns because of a timeout of interrupt. Contention is exceptionally rare
// so we use a simple spin-lock instead of a heavier-weight blocking lock.

Thread::SpinAcquire(&_WaitSetLock, "WaitSet - add");
AddWaiter(&node);
Thread::SpinRelease(&_WaitSetLock);

_Responsible = NULL;

intx save = _recursions; // record the old recursion count
_waiters++; // increment the number of waiters
_recursions = 0; // set the recursion level to be 1
exit(current); // exit the monitor
guarantee(owner_raw() != current, "invariant");

// The thread is on the WaitSet list - now park() it.
// On MP systems it's conceivable that a brief spin before we park
// could be profitable.
//
// TODO-FIXME: change the following logic to a loop of the form
// while (!timeout && !interrupted && _notified == 0) park()

int ret = OS_OK;
int WasNotified = 0;

// Need to check interrupt state whilst still _thread_in_vm
bool interrupted = interruptible && current->is_interrupted(false);

{ // State transition wrappers
OSThread* osthread = current->osthread();
OSThreadWaitState osts(osthread, true);

assert(current->thread_state() == _thread_in_vm, "invariant");

{
ClearSuccOnSuspend csos(this);
ThreadBlockInVMPreprocess<ClearSuccOnSuspend> tbivs(current, csos, true /* allow_suspend */);
if (interrupted || HAS_PENDING_EXCEPTION) {
// Intentionally empty
} else if (node._notified == 0) {
if (millis <= 0) {
current->_ParkEvent->park();
} else {
ret = current->_ParkEvent->park(millis);
}
}
}

// Node may be on the WaitSet, the EntryList (or cxq), or in transition
// from the WaitSet to the EntryList.
// See if we need to remove Node from the WaitSet.
// We use double-checked locking to avoid grabbing _WaitSetLock
// if the thread is not on the wait queue.
//
// Note that we don't need a fence before the fetch of TState.
// In the worst case we'll fetch a old-stale value of TS_WAIT previously
// written by the is thread. (perhaps the fetch might even be satisfied
// by a look-aside into the processor's own store buffer, although given
// the length of the code path between the prior ST and this load that's
// highly unlikely). If the following LD fetches a stale TS_WAIT value
// then we'll acquire the lock and then re-fetch a fresh TState value.
// That is, we fail toward safety.

if (node.TState == ObjectWaiter::TS_WAIT) {
Thread::SpinAcquire(&_WaitSetLock, "WaitSet - unlink");
if (node.TState == ObjectWaiter::TS_WAIT) {
DequeueSpecificWaiter(&node); // unlink from WaitSet
assert(node._notified == 0, "invariant");
node.TState = ObjectWaiter::TS_RUN;
}
Thread::SpinRelease(&_WaitSetLock);
}

// The thread is now either on off-list (TS_RUN),
// on the EntryList (TS_ENTER), or on the cxq (TS_CXQ).
// The Node's TState variable is stable from the perspective of this thread.
// No other threads will asynchronously modify TState.
guarantee(node.TState != ObjectWaiter::TS_WAIT, "invariant");
OrderAccess::loadload();
if (_succ == current) _succ = NULL;
WasNotified = node._notified;

// Reentry phase -- reacquire the monitor.
// re-enter contended monitor after object.wait().
// retain OBJECT_WAIT state until re-enter successfully completes
// Thread state is thread_in_vm and oop access is again safe,
// although the raw address of the object may have changed.
// (Don't cache naked oops over safepoints, of course).

// post monitor waited event. Note that this is past-tense, we are done waiting.
if (JvmtiExport::should_post_monitor_waited()) {
JvmtiExport::post_monitor_waited(current, this, ret == OS_TIMEOUT);

if (node._notified != 0 && _succ == current) {
// In this part of the monitor wait-notify-reenter protocol it
// is possible (and normal) for another thread to do a fastpath
// monitor enter-exit while this thread is still trying to get
// to the reenter portion of the protocol.
//
// The ObjectMonitor was notified and the current thread is
// the successor which also means that an unpark() has already
// been done. The JVMTI_EVENT_MONITOR_WAITED event handler can
// consume the unpark() that was done when the successor was
// set because the same ParkEvent is shared between Java
// monitors and JVM/TI RawMonitors (for now).
//
// We redo the unpark() to ensure forward progress, i.e., we
// don't want all pending threads hanging (parked) with none
// entering the unlocked monitor.
node._event->unpark();
}
}

if (event.should_commit()) {
post_monitor_wait_event(&event, this, node._notifier_tid, millis, ret == OS_TIMEOUT);
}

OrderAccess::fence();

assert(current->_Stalled != 0, "invariant");
current->_Stalled = 0;

assert(owner_raw() != current, "invariant");
ObjectWaiter::TStates v = node.TState;
if (v == ObjectWaiter::TS_RUN) {
enter(current);
} else {
guarantee(v == ObjectWaiter::TS_ENTER || v == ObjectWaiter::TS_CXQ, "invariant");
ReenterI(current, &node);
node.wait_reenter_end(this);
}

// current has reacquired the lock.
// Lifecycle - the node representing current must not appear on any queues.
// Node is about to go out-of-scope, but even if it were immortal we wouldn't
// want residual elements associated with this thread left on any lists.
guarantee(node.TState == ObjectWaiter::TS_RUN, "invariant");
assert(owner_raw() == current, "invariant");
assert(_succ != current, "invariant");
} // OSThreadWaitState()

current->set_current_waiting_monitor(NULL);

guarantee(_recursions == 0, "invariant");
_recursions = save // restore the old recursion count
+ JvmtiDeferredUpdates::get_and_reset_relock_count_after_wait(current); // increased by the deferred relock count
_waiters--; // decrement the number of waiters

// Verify a few postconditions
assert(owner_raw() == current, "invariant");
assert(_succ != current, "invariant");
assert(object()->mark() == markWord::encode(this), "invariant");

// check if the notification happened
if (!WasNotified) {
// no, it could be timeout or Thread.interrupt() or both
// check for interrupt event, otherwise it is timeout
if (interruptible && current->is_interrupted(true) && !HAS_PENDING_EXCEPTION) {
THROW(vmSymbols::java_lang_InterruptedException());
}
}

// NOTE: Spurious wake up will be consider as timeout.
// Monitor notify has precedence over thread interrupt.
}
+ +

wait:

+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
Thread 20 "Thread-0" hit Breakpoint 2, __pthread_cond_wait (cond=0x7ffff0510058, mutex=0x7ffff0510030) at forward.c:121
121 forward.c: No such file or directory.
(gdb) bt
#0 __pthread_cond_wait (cond=0x7ffff0510058, mutex=0x7ffff0510030) at forward.c:121
#1 0x00007ffff6c21713 in os::PlatformEvent::park (this=0x7ffff0510000) at /home/ubuntu/daixiao/jdk/src/hotspot/os/posix/os_posix.cpp:1484
#2 0x00007ffff6bd003c in ObjectMonitor::wait (this=0x7fffac0013b0, millis=0, interruptible=true, __the_thread__=0x7ffff050f5b0) at /home/ubuntu/daixiao/jdk/src/hotspot/share/runtime/objectMonitor.cpp:1544
#3 0x00007ffff6e90188 in ObjectSynchronizer::wait (obj=..., millis=0, __the_thread__=0x7ffff050f5b0) at /home/ubuntu/daixiao/jdk/src/hotspot/share/runtime/synchronizer.cpp:654
#4 0x00007ffff68298ae in JVM_MonitorWait (env=0x7ffff050f8a8, handle=0x7fffd0df77c0, ms=0) at /home/ubuntu/daixiao/jdk/src/hotspot/share/prims/jvm.cpp:617
#5 0x00007fffe100f68b in ?? ()
#6 0x00000008f7c32db8 in ?? ()
#7 0x00007ffff050f5b0 in ?? ()
#8 0x00007fffd0df7760 in ?? ()
#9 0x00007fffd0df7748 in ?? ()
#10 0x0000000000000000 in ?? ()
+ + +

notify:

+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
(gdb) bt
#0 __pthread_cond_signal (cond=0x7ffff04f0958) at forward.c:110
#1 0x00007ffff6c21c13 in os::PlatformEvent::unpark (this=0x7ffff04f0900) at /home/ubuntu/daixiao/jdk/src/hotspot/os/posix/os_posix.cpp:1590
#2 0x00007ffff6bcf654 in ObjectMonitor::ExitEpilog (this=0x7fffac0010b0, current=0x7ffff04ef410, Wakee=0x0) at /home/ubuntu/daixiao/jdk/src/hotspot/share/runtime/objectMonitor.cpp:1350
#3 0x00007ffff6bcf57b in ObjectMonitor::exit (this=0x7fffac0010b0, current=0x7ffff04ef410, not_suspended=true) at /home/ubuntu/daixiao/jdk/src/hotspot/share/runtime/objectMonitor.cpp:1321
#4 0x00007ffff6bcfe8e in ObjectMonitor::wait (this=0x7fffac0010b0, millis=0, interruptible=true, __the_thread__=0x7ffff04ef410) at /home/ubuntu/daixiao/jdk/src/hotspot/share/runtime/objectMonitor.cpp:1515
#5 0x00007ffff6e90188 in ObjectSynchronizer::wait (obj=..., millis=0, __the_thread__=0x7ffff04ef410) at /home/ubuntu/daixiao/jdk/src/hotspot/share/runtime/synchronizer.cpp:654
#6 0x00007ffff68298ae in JVM_MonitorWait (env=0x7ffff04ef708, handle=0x7fffd0df77c0, ms=0) at /home/ubuntu/daixiao/jdk/src/hotspot/share/prims/jvm.cpp:617
#7 0x00007fffe100f68b in ?? ()
#8 0x00000008f7c32db8 in ?? ()
#9 0x00007ffff04ef410 in ?? ()
#10 0x00007fffd0df7760 in ?? ()
#11 0x00007fffd0df7748 in ?? ()
#12 0x0000000000000000 in ?? ()
+ + +
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
void PlatformEvent::park() {       // AKA "down()"
// Transitions for _event:
// -1 => -1 : illegal
// 1 => 0 : pass - return immediately
// 0 => -1 : block; then set _event to 0 before returning

// Invariant: Only the thread associated with the PlatformEvent
// may call park().
assert(_nParked == 0, "invariant");

int v;

// atomically decrement _event
for (;;) {
v = _event;
if (Atomic::cmpxchg(&_event, v, v - 1) == v) break;
}
guarantee(v >= 0, "invariant");

if (v == 0) { // Do this the hard way by blocking ...
int status = pthread_mutex_lock(_mutex);
assert_status(status == 0, status, "mutex_lock");
guarantee(_nParked == 0, "invariant");
++_nParked;
while (_event < 0) {
// OS-level "spurious wakeups" are ignored
status = pthread_cond_wait(_cond, _mutex);
assert_status(status == 0 MACOS_ONLY(|| status == ETIMEDOUT),
status, "cond_wait");
}
--_nParked;

_event = 0;
status = pthread_mutex_unlock(_mutex);
assert_status(status == 0, status, "mutex_unlock");
// Paranoia to ensure our locked and lock-free paths interact
// correctly with each other.
OrderAccess::fence();
}
guarantee(_event >= 0, "invariant");
}
+ + +

demo

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
#include <stdio.h>
#include <pthread.h>
#include<unistd.h>

pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER;
pthread_cond_t cond = PTHREAD_COND_INITIALIZER;
int condition = 0;
int count = 0;
pthread_t thread_id;
int consume( void )
{
while( 1 )
{
pthread_mutex_lock( &mutex );
while( condition == 0 )
pthread_cond_wait( &cond, &mutex );
printf( "Consumed %d\n", count );
condition = 0;
pthread_cond_signal( &cond );
pthread_mutex_unlock( &mutex );
}

return( 0 );
}

void* produce( void * arg )
{
while( 1 )
{
pthread_mutex_lock( &mutex );
while( condition == 1 )
pthread_cond_wait( &cond, &mutex );
printf( "Produced %d\n", count++ );
condition = 1;
pthread_cond_signal( &cond );
pthread_mutex_unlock( &mutex );
}
return( 0 );
}

int main( void )
{
pthread_create( thread_id, NULL, &produce, NULL );
return consume();
}
+ + +

相关阅读

+ +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git "a/2023/10/27/clickhouse-\347\211\251\345\214\226\350\247\206\345\233\276/index.html" "b/2023/10/27/clickhouse-\347\211\251\345\214\226\350\247\206\345\233\276/index.html" new file mode 100644 index 0000000000..a670919c14 --- /dev/null +++ "b/2023/10/27/clickhouse-\347\211\251\345\214\226\350\247\206\345\233\276/index.html" @@ -0,0 +1,438 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + clickhouse 物化视图 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+ + + + + +
+

+ clickhouse 物化视图 +

+ + +
+ + + + +
+ + +
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
Thread 3 "HTTPHandler" hit Breakpoint 2, DB::buildPushingToViewsChain (storage=..., metadata_snapshot=..., context=..., query_ptr=..., no_destination=false, thread_status_holder=..., running_group=..., elapsed_counter_ms=0x0, async_insert=false, live_view_header=...) at /ssd/ClickHouse/src/Processors/Transforms/buildPushingToViewsChain.cpp:307
307 if (lock == nullptr)
(gdb) bt
#0 DB::buildPushingToViewsChain (storage=..., metadata_snapshot=..., context=..., query_ptr=..., no_destination=false, thread_status_holder=..., running_group=..., elapsed_counter_ms=0x0, async_insert=false, live_view_header=...)
at /ssd/ClickHouse/src/Processors/Transforms/buildPushingToViewsChain.cpp:307
#1 0x000000001cc8a4e0 in DB::InterpreterInsertQuery::buildSink (this=0x7ffe5a9e63f0, table=..., metadata_snapshot=..., thread_status_holder=..., running_group=..., elapsed_counter_ms=0x0) at /ssd/ClickHouse/src/Interpreters/InterpreterInsertQuery.cpp:311
#2 0x000000001cc8cbdb in DB::InterpreterInsertQuery::execute (this=0x7ffe5a9e63f0) at /ssd/ClickHouse/src/Interpreters/InterpreterInsertQuery.cpp:554
#3 0x000000001d2d9d4a in DB::executeQueryImpl (begin=0x7ffe5aa40c00 "insert into push.site_sku_user_log_distributed (site_sku , site ,sku_id ,user_id , is_in_cart ) values \r\n( 'us_1' , 'us' , '1' , '2' , '1' )", '\245' <repeats 57 times>...,
end=0x7ffe5aa40c8f '\245' <repeats 113 times>, 'Z' <repeats 87 times>..., context=..., internal=false, stage=DB::QueryProcessingStage::Complete, istr=0x7ffe5a8fc020) at /ssd/ClickHouse/src/Interpreters/executeQuery.cpp:1096
#4 0x000000001d2dbdd1 in DB::executeQuery(DB::ReadBuffer&, DB::WriteBuffer&, bool, std::__1::shared_ptr<DB::Context>, std::__1::function<void (DB::QueryResultDetails const&)>, std::__1::optional<DB::FormatSettings> const&, std::__1::function<void (DB::IOutputFormat&)>) (istr=..., ostr=..., allow_into_outfile=false, context=..., set_result_details=..., output_format_settings=..., handle_exception_in_output_format=...) at /ssd/ClickHouse/src/Interpreters/executeQuery.cpp:1351
#5 0x000000001e7b8f91 in DB::HTTPHandler::processQuery (this=0x7ffe43e3ce80, request=..., params=..., response=..., used_output=..., query_scope=...) at /ssd/ClickHouse/src/Server/HTTPHandler.cpp:884
#6 0x000000001e7bbb67 in DB::HTTPHandler::handleRequest (this=0x7ffe43e3ce80, request=..., response=...) at /ssd/ClickHouse/src/Server/HTTPHandler.cpp:1078
#7 0x000000001e8644dc in DB::HTTPServerConnection::run (this=0x7ffe43e3cd40) at /ssd/ClickHouse/src/Server/HTTP/HTTPServerConnection.cpp:68
#8 0x000000002396f7d9 in Poco::Net::TCPServerConnection::start (this=0x7ffe43e3cd40) at /ssd/ClickHouse/base/poco/Net/src/TCPServerConnection.cpp:43
#9 0x000000002397001c in Poco::Net::TCPServerDispatcher::run (this=0x7ffe5a8c0f00) at /ssd/ClickHouse/base/poco/Net/src/TCPServerDispatcher.cpp:115
#10 0x0000000023b5ac14 in Poco::PooledThread::run (this=0x7ffff71cab80) at /ssd/ClickHouse/base/poco/Foundation/src/ThreadPool.cpp:188
#11 0x0000000023b579ba in Poco::(anonymous namespace)::RunnableHolder::run (this=0x7ffff70019c0) at /ssd/ClickHouse/base/poco/Foundation/src/Thread.cpp:45
#12 0x0000000023b566be in Poco::ThreadImpl::runnableEntry (pThread=0x7ffff71cabb8) at /ssd/ClickHouse/base/poco/Foundation/src/Thread_POSIX.cpp:335
#13 0x00007ffff7c94ac3 in start_thread (arg=<optimized out>) at ./nptl/pthread_create.c:442
#14 0x00007ffff7d26a40 in clone3 () at ../sysdeps/unix/sysv/linux/x86_64/clone3.S:81
+ + +

相关阅读

+ +
+ + + + + + + +
+ + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/CNAME b/CNAME new file mode 100644 index 0000000000..ed9398b4f4 --- /dev/null +++ b/CNAME @@ -0,0 +1 @@ +shakudada.xyz \ No newline at end of file diff --git a/archives/2019/09/index.html b/archives/2019/09/index.html new file mode 100644 index 0000000000..325077febc --- /dev/null +++ b/archives/2019/09/index.html @@ -0,0 +1,580 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 归档 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+
+
+ + 太棒了! 目前共计 255 篇日志。 继续努力。 +
+ + +
+ 2019 +
+ + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/archives/2019/09/page/2/index.html b/archives/2019/09/page/2/index.html new file mode 100644 index 0000000000..c1297fa2e8 --- /dev/null +++ b/archives/2019/09/page/2/index.html @@ -0,0 +1,400 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 归档 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+
+
+ + 太棒了! 目前共计 255 篇日志。 继续努力。 +
+ + +
+ 2019 +
+ + + + +
+
+ + + + + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/archives/2019/10/index.html b/archives/2019/10/index.html new file mode 100644 index 0000000000..3623c1f079 --- /dev/null +++ b/archives/2019/10/index.html @@ -0,0 +1,517 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 归档 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+
+
+ + 太棒了! 目前共计 255 篇日志。 继续努力。 +
+ + +
+ 2019 +
+ + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/archives/2019/11/index.html b/archives/2019/11/index.html new file mode 100644 index 0000000000..52494e1091 --- /dev/null +++ b/archives/2019/11/index.html @@ -0,0 +1,537 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 归档 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+
+
+ + 太棒了! 目前共计 255 篇日志。 继续努力。 +
+ + +
+ 2019 +
+ + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/archives/2019/12/index.html b/archives/2019/12/index.html new file mode 100644 index 0000000000..762f89c67d --- /dev/null +++ b/archives/2019/12/index.html @@ -0,0 +1,580 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 归档 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+
+
+ + 太棒了! 目前共计 255 篇日志。 继续努力。 +
+ + +
+ 2019 +
+ + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/archives/2019/12/page/2/index.html b/archives/2019/12/page/2/index.html new file mode 100644 index 0000000000..d5c8e954c5 --- /dev/null +++ b/archives/2019/12/page/2/index.html @@ -0,0 +1,420 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 归档 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+
+
+ + 太棒了! 目前共计 255 篇日志。 继续努力。 +
+ + +
+ 2019 +
+ + + + + + +
+
+ + + + + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/archives/2019/index.html b/archives/2019/index.html new file mode 100644 index 0000000000..6e96b1804b --- /dev/null +++ b/archives/2019/index.html @@ -0,0 +1,580 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 归档 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+
+
+ + 太棒了! 目前共计 255 篇日志。 继续努力。 +
+ + +
+ 2019 +
+ + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/archives/2019/page/2/index.html b/archives/2019/page/2/index.html new file mode 100644 index 0000000000..53d5d4857f --- /dev/null +++ b/archives/2019/page/2/index.html @@ -0,0 +1,580 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 归档 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+
+
+ + 太棒了! 目前共计 255 篇日志。 继续努力。 +
+ + +
+ 2019 +
+ + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/archives/2019/page/3/index.html b/archives/2019/page/3/index.html new file mode 100644 index 0000000000..c88dfa0266 --- /dev/null +++ b/archives/2019/page/3/index.html @@ -0,0 +1,580 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 归档 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+
+
+ + 太棒了! 目前共计 255 篇日志。 继续努力。 +
+ + +
+ 2019 +
+ + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/archives/2019/page/4/index.html b/archives/2019/page/4/index.html new file mode 100644 index 0000000000..2227e689ba --- /dev/null +++ b/archives/2019/page/4/index.html @@ -0,0 +1,540 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 归档 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+
+
+ + 太棒了! 目前共计 255 篇日志。 继续努力。 +
+ + +
+ 2019 +
+ + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/archives/2020/01/index.html b/archives/2020/01/index.html new file mode 100644 index 0000000000..040bbf96c6 --- /dev/null +++ b/archives/2020/01/index.html @@ -0,0 +1,397 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 归档 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+
+
+ + 太棒了! 目前共计 255 篇日志。 继续努力。 +
+ + +
+ 2020 +
+ + + + +
+
+ + + + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/archives/2020/02/index.html b/archives/2020/02/index.html new file mode 100644 index 0000000000..5552565f0e --- /dev/null +++ b/archives/2020/02/index.html @@ -0,0 +1,397 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 归档 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+
+
+ + 太棒了! 目前共计 255 篇日志。 继续努力。 +
+ + +
+ 2020 +
+ + + + +
+
+ + + + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/archives/2020/03/index.html b/archives/2020/03/index.html new file mode 100644 index 0000000000..bc750feccc --- /dev/null +++ b/archives/2020/03/index.html @@ -0,0 +1,457 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 归档 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+
+
+ + 太棒了! 目前共计 255 篇日志。 继续努力。 +
+ + +
+ 2020 +
+ + + + + + + + + + +
+
+ + + + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/archives/2020/04/index.html b/archives/2020/04/index.html new file mode 100644 index 0000000000..810ce9687b --- /dev/null +++ b/archives/2020/04/index.html @@ -0,0 +1,457 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 归档 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+
+
+ + 太棒了! 目前共计 255 篇日志。 继续努力。 +
+ + +
+ 2020 +
+ + + + + + + + + + +
+
+ + + + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/archives/2020/05/index.html b/archives/2020/05/index.html new file mode 100644 index 0000000000..ea0583c97c --- /dev/null +++ b/archives/2020/05/index.html @@ -0,0 +1,497 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 归档 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+
+
+ + 太棒了! 目前共计 255 篇日志。 继续努力。 +
+ + +
+ 2020 +
+ + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/archives/2020/06/index.html b/archives/2020/06/index.html new file mode 100644 index 0000000000..84016ea439 --- /dev/null +++ b/archives/2020/06/index.html @@ -0,0 +1,517 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 归档 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+
+
+ + 太棒了! 目前共计 255 篇日志。 继续努力。 +
+ + +
+ 2020 +
+ + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/archives/2020/07/index.html b/archives/2020/07/index.html new file mode 100644 index 0000000000..3e0541e8a3 --- /dev/null +++ b/archives/2020/07/index.html @@ -0,0 +1,457 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 归档 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+
+
+ + 太棒了! 目前共计 255 篇日志。 继续努力。 +
+ + +
+ 2020 +
+ + + + + + + + + + +
+
+ + + + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/archives/2020/08/index.html b/archives/2020/08/index.html new file mode 100644 index 0000000000..cfd92fe73b --- /dev/null +++ b/archives/2020/08/index.html @@ -0,0 +1,397 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 归档 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+
+
+ + 太棒了! 目前共计 255 篇日志。 继续努力。 +
+ + +
+ 2020 +
+ + + + +
+
+ + + + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/archives/2020/09/index.html b/archives/2020/09/index.html new file mode 100644 index 0000000000..72f8a5e9c0 --- /dev/null +++ b/archives/2020/09/index.html @@ -0,0 +1,457 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 归档 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+
+
+ + 太棒了! 目前共计 255 篇日志。 继续努力。 +
+ + +
+ 2020 +
+ + + + + + + + + + +
+
+ + + + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/archives/2020/10/index.html b/archives/2020/10/index.html new file mode 100644 index 0000000000..0357b857a0 --- /dev/null +++ b/archives/2020/10/index.html @@ -0,0 +1,437 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 归档 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+
+
+ + 太棒了! 目前共计 255 篇日志。 继续努力。 +
+ + +
+ 2020 +
+ + + + + + + + +
+
+ + + + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/archives/2020/11/index.html b/archives/2020/11/index.html new file mode 100644 index 0000000000..51cea09173 --- /dev/null +++ b/archives/2020/11/index.html @@ -0,0 +1,457 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 归档 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+
+
+ + 太棒了! 目前共计 255 篇日志。 继续努力。 +
+ + +
+ 2020 +
+ + + + + + + + + + +
+
+ + + + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/archives/2020/12/index.html b/archives/2020/12/index.html new file mode 100644 index 0000000000..68b7c9dd3c --- /dev/null +++ b/archives/2020/12/index.html @@ -0,0 +1,457 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 归档 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+
+
+ + 太棒了! 目前共计 255 篇日志。 继续努力。 +
+ + +
+ 2020 +
+ + + + + + + + + + +
+
+ + + + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/archives/2020/index.html b/archives/2020/index.html new file mode 100644 index 0000000000..2598d7a41a --- /dev/null +++ b/archives/2020/index.html @@ -0,0 +1,580 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 归档 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+
+
+ + 太棒了! 目前共计 255 篇日志。 继续努力。 +
+ + +
+ 2020 +
+ + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/archives/2020/page/2/index.html b/archives/2020/page/2/index.html new file mode 100644 index 0000000000..51a41ade29 --- /dev/null +++ b/archives/2020/page/2/index.html @@ -0,0 +1,580 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 归档 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+
+
+ + 太棒了! 目前共计 255 篇日志。 继续努力。 +
+ + +
+ 2020 +
+ + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/archives/2020/page/3/index.html b/archives/2020/page/3/index.html new file mode 100644 index 0000000000..a0ffe7afbc --- /dev/null +++ b/archives/2020/page/3/index.html @@ -0,0 +1,580 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 归档 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+
+
+ + 太棒了! 目前共计 255 篇日志。 继续努力。 +
+ + +
+ 2020 +
+ + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/archives/2020/page/4/index.html b/archives/2020/page/4/index.html new file mode 100644 index 0000000000..12ba48a4ff --- /dev/null +++ b/archives/2020/page/4/index.html @@ -0,0 +1,580 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 归档 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+
+
+ + 太棒了! 目前共计 255 篇日志。 继续努力。 +
+ + +
+ 2020 +
+ + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/archives/2020/page/5/index.html b/archives/2020/page/5/index.html new file mode 100644 index 0000000000..07a1cbf9b8 --- /dev/null +++ b/archives/2020/page/5/index.html @@ -0,0 +1,440 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 归档 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+
+
+ + 太棒了! 目前共计 255 篇日志。 继续努力。 +
+ + +
+ 2020 +
+ + + + + + + + +
+
+ + + + + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/archives/2021/01/index.html b/archives/2021/01/index.html new file mode 100644 index 0000000000..6f02695ddb --- /dev/null +++ b/archives/2021/01/index.html @@ -0,0 +1,457 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 归档 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+
+
+ + 太棒了! 目前共计 255 篇日志。 继续努力。 +
+ + +
+ 2021 +
+ + + + + + + + + + +
+
+ + + + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/archives/2021/02/index.html b/archives/2021/02/index.html new file mode 100644 index 0000000000..6586945f5c --- /dev/null +++ b/archives/2021/02/index.html @@ -0,0 +1,397 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 归档 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+
+
+ + 太棒了! 目前共计 255 篇日志。 继续努力。 +
+ + +
+ 2021 +
+ + + + +
+
+ + + + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/archives/2021/03/index.html b/archives/2021/03/index.html new file mode 100644 index 0000000000..887ee4d36c --- /dev/null +++ b/archives/2021/03/index.html @@ -0,0 +1,580 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 归档 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+
+
+ + 太棒了! 目前共计 255 篇日志。 继续努力。 +
+ + +
+ 2021 +
+ + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/archives/2021/03/page/2/index.html b/archives/2021/03/page/2/index.html new file mode 100644 index 0000000000..415513b564 --- /dev/null +++ b/archives/2021/03/page/2/index.html @@ -0,0 +1,420 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 归档 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+
+
+ + 太棒了! 目前共计 255 篇日志。 继续努力。 +
+ + +
+ 2021 +
+ + + + + + +
+
+ + + + + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/archives/2021/04/index.html b/archives/2021/04/index.html new file mode 100644 index 0000000000..e296a41a83 --- /dev/null +++ b/archives/2021/04/index.html @@ -0,0 +1,497 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 归档 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+
+
+ + 太棒了! 目前共计 255 篇日志。 继续努力。 +
+ + +
+ 2021 +
+ + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/archives/2021/05/index.html b/archives/2021/05/index.html new file mode 100644 index 0000000000..521a8a6c1c --- /dev/null +++ b/archives/2021/05/index.html @@ -0,0 +1,477 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 归档 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+
+
+ + 太棒了! 目前共计 255 篇日志。 继续努力。 +
+ + +
+ 2021 +
+ + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/archives/2021/06/index.html b/archives/2021/06/index.html new file mode 100644 index 0000000000..6410d37864 --- /dev/null +++ b/archives/2021/06/index.html @@ -0,0 +1,417 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 归档 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+
+
+ + 太棒了! 目前共计 255 篇日志。 继续努力。 +
+ + +
+ 2021 +
+ + + + + + +
+
+ + + + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/archives/2021/07/index.html b/archives/2021/07/index.html new file mode 100644 index 0000000000..37a3a890a1 --- /dev/null +++ b/archives/2021/07/index.html @@ -0,0 +1,417 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 归档 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+
+
+ + 太棒了! 目前共计 255 篇日志。 继续努力。 +
+ + +
+ 2021 +
+ + + + + + +
+
+ + + + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/archives/2021/08/index.html b/archives/2021/08/index.html new file mode 100644 index 0000000000..6ea7204927 --- /dev/null +++ b/archives/2021/08/index.html @@ -0,0 +1,397 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 归档 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+
+
+ + 太棒了! 目前共计 255 篇日志。 继续努力。 +
+ + +
+ 2021 +
+ + + + +
+
+ + + + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/archives/2021/09/index.html b/archives/2021/09/index.html new file mode 100644 index 0000000000..2be9f15875 --- /dev/null +++ b/archives/2021/09/index.html @@ -0,0 +1,417 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 归档 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+
+
+ + 太棒了! 目前共计 255 篇日志。 继续努力。 +
+ + +
+ 2021 +
+ + + + + + +
+
+ + + + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/archives/2021/10/index.html b/archives/2021/10/index.html new file mode 100644 index 0000000000..3f55a499a6 --- /dev/null +++ b/archives/2021/10/index.html @@ -0,0 +1,457 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 归档 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+
+
+ + 太棒了! 目前共计 255 篇日志。 继续努力。 +
+ + +
+ 2021 +
+ + + + + + + + + + +
+
+ + + + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/archives/2021/11/index.html b/archives/2021/11/index.html new file mode 100644 index 0000000000..891efaba5a --- /dev/null +++ b/archives/2021/11/index.html @@ -0,0 +1,437 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 归档 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+
+
+ + 太棒了! 目前共计 255 篇日志。 继续努力。 +
+ + +
+ 2021 +
+ + + + + + + + +
+
+ + + + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/archives/2021/12/index.html b/archives/2021/12/index.html new file mode 100644 index 0000000000..6622e9dfa5 --- /dev/null +++ b/archives/2021/12/index.html @@ -0,0 +1,397 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 归档 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+
+
+ + 太棒了! 目前共计 255 篇日志。 继续努力。 +
+ + +
+ 2021 +
+ + + + +
+
+ + + + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/archives/2021/index.html b/archives/2021/index.html new file mode 100644 index 0000000000..af13877d09 --- /dev/null +++ b/archives/2021/index.html @@ -0,0 +1,580 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 归档 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+
+
+ + 太棒了! 目前共计 255 篇日志。 继续努力。 +
+ + +
+ 2021 +
+ + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/archives/2021/page/2/index.html b/archives/2021/page/2/index.html new file mode 100644 index 0000000000..66c80ddfac --- /dev/null +++ b/archives/2021/page/2/index.html @@ -0,0 +1,580 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 归档 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+
+
+ + 太棒了! 目前共计 255 篇日志。 继续努力。 +
+ + +
+ 2021 +
+ + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/archives/2021/page/3/index.html b/archives/2021/page/3/index.html new file mode 100644 index 0000000000..0e02b44ca4 --- /dev/null +++ b/archives/2021/page/3/index.html @@ -0,0 +1,580 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 归档 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+
+
+ + 太棒了! 目前共计 255 篇日志。 继续努力。 +
+ + +
+ 2021 +
+ + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/archives/2021/page/4/index.html b/archives/2021/page/4/index.html new file mode 100644 index 0000000000..d7399a7177 --- /dev/null +++ b/archives/2021/page/4/index.html @@ -0,0 +1,580 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 归档 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+
+
+ + 太棒了! 目前共计 255 篇日志。 继续努力。 +
+ + +
+ 2021 +
+ + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/archives/2021/page/5/index.html b/archives/2021/page/5/index.html new file mode 100644 index 0000000000..d5926821f7 --- /dev/null +++ b/archives/2021/page/5/index.html @@ -0,0 +1,440 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 归档 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+
+
+ + 太棒了! 目前共计 255 篇日志。 继续努力。 +
+ + +
+ 2021 +
+ + + + + + + + +
+
+ + + + + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/archives/2022/01/index.html b/archives/2022/01/index.html new file mode 100644 index 0000000000..2fba3f4980 --- /dev/null +++ b/archives/2022/01/index.html @@ -0,0 +1,397 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 归档 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+
+
+ + 太棒了! 目前共计 255 篇日志。 继续努力。 +
+ + +
+ 2022 +
+ + + + +
+
+ + + + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/archives/2022/02/index.html b/archives/2022/02/index.html new file mode 100644 index 0000000000..fc06dc9c1c --- /dev/null +++ b/archives/2022/02/index.html @@ -0,0 +1,397 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 归档 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+
+
+ + 太棒了! 目前共计 255 篇日志。 继续努力。 +
+ + +
+ 2022 +
+ + + + +
+
+ + + + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/archives/2022/03/index.html b/archives/2022/03/index.html new file mode 100644 index 0000000000..77e7379b40 --- /dev/null +++ b/archives/2022/03/index.html @@ -0,0 +1,417 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 归档 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+
+
+ + 太棒了! 目前共计 255 篇日志。 继续努力。 +
+ + +
+ 2022 +
+ + + + + + +
+
+ + + + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/archives/2022/04/index.html b/archives/2022/04/index.html new file mode 100644 index 0000000000..e01138ba32 --- /dev/null +++ b/archives/2022/04/index.html @@ -0,0 +1,477 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 归档 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+
+
+ + 太棒了! 目前共计 255 篇日志。 继续努力。 +
+ + +
+ 2022 +
+ + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/archives/2022/05/index.html b/archives/2022/05/index.html new file mode 100644 index 0000000000..dfab14f8b3 --- /dev/null +++ b/archives/2022/05/index.html @@ -0,0 +1,457 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 归档 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+
+
+ + 太棒了! 目前共计 255 篇日志。 继续努力。 +
+ + +
+ 2022 +
+ + + + + + + + + + +
+
+ + + + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/archives/2022/06/index.html b/archives/2022/06/index.html new file mode 100644 index 0000000000..6f606d1177 --- /dev/null +++ b/archives/2022/06/index.html @@ -0,0 +1,457 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 归档 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+
+
+ + 太棒了! 目前共计 255 篇日志。 继续努力。 +
+ + +
+ 2022 +
+ + + + + + + + + + +
+
+ + + + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/archives/2022/07/index.html b/archives/2022/07/index.html new file mode 100644 index 0000000000..8c932c4077 --- /dev/null +++ b/archives/2022/07/index.html @@ -0,0 +1,397 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 归档 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+
+
+ + 太棒了! 目前共计 255 篇日志。 继续努力。 +
+ + +
+ 2022 +
+ + + + +
+
+ + + + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/archives/2022/08/index.html b/archives/2022/08/index.html new file mode 100644 index 0000000000..176624fc60 --- /dev/null +++ b/archives/2022/08/index.html @@ -0,0 +1,457 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 归档 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+
+
+ + 太棒了! 目前共计 255 篇日志。 继续努力。 +
+ + +
+ 2022 +
+ + + + + + + + + + +
+
+ + + + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/archives/2022/09/index.html b/archives/2022/09/index.html new file mode 100644 index 0000000000..02a72bf296 --- /dev/null +++ b/archives/2022/09/index.html @@ -0,0 +1,477 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 归档 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+
+
+ + 太棒了! 目前共计 255 篇日志。 继续努力。 +
+ + +
+ 2022 +
+ + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/archives/2022/10/index.html b/archives/2022/10/index.html new file mode 100644 index 0000000000..ce1b11308b --- /dev/null +++ b/archives/2022/10/index.html @@ -0,0 +1,397 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 归档 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+
+
+ + 太棒了! 目前共计 255 篇日志。 继续努力。 +
+ + +
+ 2022 +
+ + + + +
+
+ + + + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/archives/2022/11/index.html b/archives/2022/11/index.html new file mode 100644 index 0000000000..9b6890aee9 --- /dev/null +++ b/archives/2022/11/index.html @@ -0,0 +1,537 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 归档 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+
+
+ + 太棒了! 目前共计 255 篇日志。 继续努力。 +
+ + +
+ 2022 +
+ + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/archives/2022/12/index.html b/archives/2022/12/index.html new file mode 100644 index 0000000000..54b821b109 --- /dev/null +++ b/archives/2022/12/index.html @@ -0,0 +1,580 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 归档 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+
+
+ + 太棒了! 目前共计 255 篇日志。 继续努力。 +
+ + +
+ 2022 +
+ + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/archives/2022/12/page/2/index.html b/archives/2022/12/page/2/index.html new file mode 100644 index 0000000000..7f58e50304 --- /dev/null +++ b/archives/2022/12/page/2/index.html @@ -0,0 +1,580 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 归档 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+
+
+ + 太棒了! 目前共计 255 篇日志。 继续努力。 +
+ + +
+ 2022 +
+ + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/archives/2022/12/page/3/index.html b/archives/2022/12/page/3/index.html new file mode 100644 index 0000000000..961f2907db --- /dev/null +++ b/archives/2022/12/page/3/index.html @@ -0,0 +1,520 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 归档 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+
+
+ + 太棒了! 目前共计 255 篇日志。 继续努力。 +
+ + +
+ 2022 +
+ + + + + + + + + + + + + + + + +
+
+ + + + + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/archives/2022/index.html b/archives/2022/index.html new file mode 100644 index 0000000000..57f665d0e5 --- /dev/null +++ b/archives/2022/index.html @@ -0,0 +1,580 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 归档 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+
+
+ + 太棒了! 目前共计 255 篇日志。 继续努力。 +
+ + +
+ 2022 +
+ + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/archives/2022/page/2/index.html b/archives/2022/page/2/index.html new file mode 100644 index 0000000000..d764fdea33 --- /dev/null +++ b/archives/2022/page/2/index.html @@ -0,0 +1,580 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 归档 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+
+
+ + 太棒了! 目前共计 255 篇日志。 继续努力。 +
+ + +
+ 2022 +
+ + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/archives/2022/page/3/index.html b/archives/2022/page/3/index.html new file mode 100644 index 0000000000..b1fc900ef2 --- /dev/null +++ b/archives/2022/page/3/index.html @@ -0,0 +1,580 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 归档 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+
+
+ + 太棒了! 目前共计 255 篇日志。 继续努力。 +
+ + +
+ 2022 +
+ + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/archives/2022/page/4/index.html b/archives/2022/page/4/index.html new file mode 100644 index 0000000000..f373443cc7 --- /dev/null +++ b/archives/2022/page/4/index.html @@ -0,0 +1,580 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 归档 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+
+
+ + 太棒了! 目前共计 255 篇日志。 继续努力。 +
+ + +
+ 2022 +
+ + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/archives/2022/page/5/index.html b/archives/2022/page/5/index.html new file mode 100644 index 0000000000..f5fa22f276 --- /dev/null +++ b/archives/2022/page/5/index.html @@ -0,0 +1,580 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 归档 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+
+
+ + 太棒了! 目前共计 255 篇日志。 继续努力。 +
+ + +
+ 2022 +
+ + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/archives/2022/page/6/index.html b/archives/2022/page/6/index.html new file mode 100644 index 0000000000..01667be176 --- /dev/null +++ b/archives/2022/page/6/index.html @@ -0,0 +1,580 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 归档 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+
+
+ + 太棒了! 目前共计 255 篇日志。 继续努力。 +
+ + +
+ 2022 +
+ + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/archives/2022/page/7/index.html b/archives/2022/page/7/index.html new file mode 100644 index 0000000000..354570c84f --- /dev/null +++ b/archives/2022/page/7/index.html @@ -0,0 +1,440 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 归档 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+
+
+ + 太棒了! 目前共计 255 篇日志。 继续努力。 +
+ + +
+ 2022 +
+ + + + + + + + +
+
+ + + + + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/archives/2023/01/index.html b/archives/2023/01/index.html new file mode 100644 index 0000000000..80d72eb2ed --- /dev/null +++ b/archives/2023/01/index.html @@ -0,0 +1,477 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 归档 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+
+
+ + 太棒了! 目前共计 255 篇日志。 继续努力。 +
+ + +
+ 2023 +
+ + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/archives/2023/02/index.html b/archives/2023/02/index.html new file mode 100644 index 0000000000..2092aca876 --- /dev/null +++ b/archives/2023/02/index.html @@ -0,0 +1,477 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 归档 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+
+
+ + 太棒了! 目前共计 255 篇日志。 继续努力。 +
+ + +
+ 2023 +
+ + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/archives/2023/03/index.html b/archives/2023/03/index.html new file mode 100644 index 0000000000..df82ee9480 --- /dev/null +++ b/archives/2023/03/index.html @@ -0,0 +1,437 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 归档 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+
+
+ + 太棒了! 目前共计 255 篇日志。 继续努力。 +
+ + +
+ 2023 +
+ + + + + + + + +
+
+ + + + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/archives/2023/04/index.html b/archives/2023/04/index.html new file mode 100644 index 0000000000..d8458cf510 --- /dev/null +++ b/archives/2023/04/index.html @@ -0,0 +1,517 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 归档 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+
+
+ + 太棒了! 目前共计 255 篇日志。 继续努力。 +
+ + +
+ 2023 +
+ + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/archives/2023/05/index.html b/archives/2023/05/index.html new file mode 100644 index 0000000000..c9033c41e8 --- /dev/null +++ b/archives/2023/05/index.html @@ -0,0 +1,497 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 归档 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+
+
+ + 太棒了! 目前共计 255 篇日志。 继续努力。 +
+ + +
+ 2023 +
+ + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/archives/2023/06/index.html b/archives/2023/06/index.html new file mode 100644 index 0000000000..e9b5271b78 --- /dev/null +++ b/archives/2023/06/index.html @@ -0,0 +1,557 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 归档 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+
+
+ + 太棒了! 目前共计 255 篇日志。 继续努力。 +
+ + +
+ 2023 +
+ + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/archives/2023/07/index.html b/archives/2023/07/index.html new file mode 100644 index 0000000000..1601511bdd --- /dev/null +++ b/archives/2023/07/index.html @@ -0,0 +1,557 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 归档 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+
+
+ + 太棒了! 目前共计 255 篇日志。 继续努力。 +
+ + +
+ 2023 +
+ + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/archives/2023/08/index.html b/archives/2023/08/index.html new file mode 100644 index 0000000000..bc153b0174 --- /dev/null +++ b/archives/2023/08/index.html @@ -0,0 +1,557 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 归档 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+
+
+ + 太棒了! 目前共计 255 篇日志。 继续努力。 +
+ + +
+ 2023 +
+ + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/archives/2023/09/index.html b/archives/2023/09/index.html new file mode 100644 index 0000000000..e6209f93d6 --- /dev/null +++ b/archives/2023/09/index.html @@ -0,0 +1,497 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 归档 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+
+
+ + 太棒了! 目前共计 255 篇日志。 继续努力。 +
+ + +
+ 2023 +
+ + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/archives/2023/10/index.html b/archives/2023/10/index.html new file mode 100644 index 0000000000..2763c15a7f --- /dev/null +++ b/archives/2023/10/index.html @@ -0,0 +1,557 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 归档 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+
+
+ + 太棒了! 目前共计 255 篇日志。 继续努力。 +
+ + +
+ 2023 +
+ + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/archives/2023/index.html b/archives/2023/index.html new file mode 100644 index 0000000000..3e0dc60a48 --- /dev/null +++ b/archives/2023/index.html @@ -0,0 +1,580 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 归档 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+
+
+ + 太棒了! 目前共计 255 篇日志。 继续努力。 +
+ + +
+ 2023 +
+ + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/archives/2023/page/2/index.html b/archives/2023/page/2/index.html new file mode 100644 index 0000000000..b0d709aeba --- /dev/null +++ b/archives/2023/page/2/index.html @@ -0,0 +1,580 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 归档 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+
+
+ + 太棒了! 目前共计 255 篇日志。 继续努力。 +
+ + +
+ 2023 +
+ + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/archives/2023/page/3/index.html b/archives/2023/page/3/index.html new file mode 100644 index 0000000000..8b5231b76e --- /dev/null +++ b/archives/2023/page/3/index.html @@ -0,0 +1,580 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 归档 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+
+
+ + 太棒了! 目前共计 255 篇日志。 继续努力。 +
+ + +
+ 2023 +
+ + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/archives/2023/page/4/index.html b/archives/2023/page/4/index.html new file mode 100644 index 0000000000..bb668cb5ec --- /dev/null +++ b/archives/2023/page/4/index.html @@ -0,0 +1,580 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 归档 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+
+
+ + 太棒了! 目前共计 255 篇日志。 继续努力。 +
+ + +
+ 2023 +
+ + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/archives/2023/page/5/index.html b/archives/2023/page/5/index.html new file mode 100644 index 0000000000..378048c436 --- /dev/null +++ b/archives/2023/page/5/index.html @@ -0,0 +1,580 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 归档 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+
+
+ + 太棒了! 目前共计 255 篇日志。 继续努力。 +
+ + +
+ 2023 +
+ + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/archives/2023/page/6/index.html b/archives/2023/page/6/index.html new file mode 100644 index 0000000000..7126176e56 --- /dev/null +++ b/archives/2023/page/6/index.html @@ -0,0 +1,580 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 归档 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+
+
+ + 太棒了! 目前共计 255 篇日志。 继续努力。 +
+ + +
+ 2023 +
+ + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/archives/2023/page/7/index.html b/archives/2023/page/7/index.html new file mode 100644 index 0000000000..149cc1505e --- /dev/null +++ b/archives/2023/page/7/index.html @@ -0,0 +1,540 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 归档 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+
+
+ + 太棒了! 目前共计 255 篇日志。 继续努力。 +
+ + +
+ 2023 +
+ + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/archives/index.html b/archives/index.html new file mode 100644 index 0000000000..a1477bbc3f --- /dev/null +++ b/archives/index.html @@ -0,0 +1,580 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 归档 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+
+
+ + 太棒了! 目前共计 255 篇日志。 继续努力。 +
+ + +
+ 2023 +
+ + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/archives/page/10/index.html b/archives/page/10/index.html new file mode 100644 index 0000000000..8d9f4c2fe1 --- /dev/null +++ b/archives/page/10/index.html @@ -0,0 +1,580 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 归档 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+
+
+ + 太棒了! 目前共计 255 篇日志。 继续努力。 +
+ + +
+ 2022 +
+ + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/archives/page/11/index.html b/archives/page/11/index.html new file mode 100644 index 0000000000..4d1cd4cc2c --- /dev/null +++ b/archives/page/11/index.html @@ -0,0 +1,580 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 归档 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+
+
+ + 太棒了! 目前共计 255 篇日志。 继续努力。 +
+ + +
+ 2022 +
+ + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/archives/page/12/index.html b/archives/page/12/index.html new file mode 100644 index 0000000000..b9f1b4790e --- /dev/null +++ b/archives/page/12/index.html @@ -0,0 +1,580 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 归档 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+
+
+ + 太棒了! 目前共计 255 篇日志。 继续努力。 +
+ + +
+ 2022 +
+ + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/archives/page/13/index.html b/archives/page/13/index.html new file mode 100644 index 0000000000..da171b2afa --- /dev/null +++ b/archives/page/13/index.html @@ -0,0 +1,580 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 归档 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+
+
+ + 太棒了! 目前共计 255 篇日志。 继续努力。 +
+ + +
+ 2022 +
+ + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/archives/page/14/index.html b/archives/page/14/index.html new file mode 100644 index 0000000000..f788487e4b --- /dev/null +++ b/archives/page/14/index.html @@ -0,0 +1,583 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 归档 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+
+
+ + 太棒了! 目前共计 255 篇日志。 继续努力。 +
+ + +
+ 2022 +
+ + +
+ 2021 +
+ + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/archives/page/15/index.html b/archives/page/15/index.html new file mode 100644 index 0000000000..7c4600ba82 --- /dev/null +++ b/archives/page/15/index.html @@ -0,0 +1,580 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 归档 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+
+
+ + 太棒了! 目前共计 255 篇日志。 继续努力。 +
+ + +
+ 2021 +
+ + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/archives/page/16/index.html b/archives/page/16/index.html new file mode 100644 index 0000000000..714dbe593b --- /dev/null +++ b/archives/page/16/index.html @@ -0,0 +1,580 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 归档 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+
+
+ + 太棒了! 目前共计 255 篇日志。 继续努力。 +
+ + +
+ 2021 +
+ + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/archives/page/17/index.html b/archives/page/17/index.html new file mode 100644 index 0000000000..618ad7f595 --- /dev/null +++ b/archives/page/17/index.html @@ -0,0 +1,580 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 归档 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+
+
+ + 太棒了! 目前共计 255 篇日志。 继续努力。 +
+ + +
+ 2021 +
+ + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/archives/page/18/index.html b/archives/page/18/index.html new file mode 100644 index 0000000000..6fe45b1975 --- /dev/null +++ b/archives/page/18/index.html @@ -0,0 +1,583 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 归档 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+
+
+ + 太棒了! 目前共计 255 篇日志。 继续努力。 +
+ + +
+ 2021 +
+ + + + + + + + +
+ 2020 +
+ + + + + + + + + + + + + + +
+
+ + + + + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/archives/page/19/index.html b/archives/page/19/index.html new file mode 100644 index 0000000000..7ce3120cec --- /dev/null +++ b/archives/page/19/index.html @@ -0,0 +1,580 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 归档 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+
+
+ + 太棒了! 目前共计 255 篇日志。 继续努力。 +
+ + +
+ 2020 +
+ + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/archives/page/2/index.html b/archives/page/2/index.html new file mode 100644 index 0000000000..75904b33e9 --- /dev/null +++ b/archives/page/2/index.html @@ -0,0 +1,580 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 归档 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+
+
+ + 太棒了! 目前共计 255 篇日志。 继续努力。 +
+ + +
+ 2023 +
+ + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/archives/page/20/index.html b/archives/page/20/index.html new file mode 100644 index 0000000000..2c41f462a7 --- /dev/null +++ b/archives/page/20/index.html @@ -0,0 +1,580 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 归档 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+
+
+ + 太棒了! 目前共计 255 篇日志。 继续努力。 +
+ + +
+ 2020 +
+ + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/archives/page/21/index.html b/archives/page/21/index.html new file mode 100644 index 0000000000..66350276e3 --- /dev/null +++ b/archives/page/21/index.html @@ -0,0 +1,580 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 归档 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+
+
+ + 太棒了! 目前共计 255 篇日志。 继续努力。 +
+ + +
+ 2020 +
+ + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/archives/page/22/index.html b/archives/page/22/index.html new file mode 100644 index 0000000000..450be49788 --- /dev/null +++ b/archives/page/22/index.html @@ -0,0 +1,583 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 归档 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+
+
+ + 太棒了! 目前共计 255 篇日志。 继续努力。 +
+ + +
+ 2020 +
+ + + + + + + + + + + + + + +
+ 2019 +
+ + + + + + + + +
+
+ + + + + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/archives/page/23/index.html b/archives/page/23/index.html new file mode 100644 index 0000000000..9603719fc3 --- /dev/null +++ b/archives/page/23/index.html @@ -0,0 +1,580 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 归档 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+
+
+ + 太棒了! 目前共计 255 篇日志。 继续努力。 +
+ + +
+ 2019 +
+ + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/archives/page/24/index.html b/archives/page/24/index.html new file mode 100644 index 0000000000..eb2f9271a3 --- /dev/null +++ b/archives/page/24/index.html @@ -0,0 +1,580 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 归档 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+
+
+ + 太棒了! 目前共计 255 篇日志。 继续努力。 +
+ + +
+ 2019 +
+ + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/archives/page/25/index.html b/archives/page/25/index.html new file mode 100644 index 0000000000..2c76e05bf2 --- /dev/null +++ b/archives/page/25/index.html @@ -0,0 +1,580 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 归档 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+
+
+ + 太棒了! 目前共计 255 篇日志。 继续努力。 +
+ + +
+ 2019 +
+ + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/archives/page/26/index.html b/archives/page/26/index.html new file mode 100644 index 0000000000..ffd1a5151d --- /dev/null +++ b/archives/page/26/index.html @@ -0,0 +1,480 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 归档 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+
+
+ + 太棒了! 目前共计 255 篇日志。 继续努力。 +
+ + +
+ 2019 +
+ + + + + + + + + + + + +
+
+ + + + + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/archives/page/3/index.html b/archives/page/3/index.html new file mode 100644 index 0000000000..63e86ec0ca --- /dev/null +++ b/archives/page/3/index.html @@ -0,0 +1,580 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 归档 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+
+
+ + 太棒了! 目前共计 255 篇日志。 继续努力。 +
+ + +
+ 2023 +
+ + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/archives/page/4/index.html b/archives/page/4/index.html new file mode 100644 index 0000000000..9272de0845 --- /dev/null +++ b/archives/page/4/index.html @@ -0,0 +1,580 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 归档 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+
+
+ + 太棒了! 目前共计 255 篇日志。 继续努力。 +
+ + +
+ 2023 +
+ + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/archives/page/5/index.html b/archives/page/5/index.html new file mode 100644 index 0000000000..6df330c796 --- /dev/null +++ b/archives/page/5/index.html @@ -0,0 +1,580 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 归档 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+
+
+ + 太棒了! 目前共计 255 篇日志。 继续努力。 +
+ + +
+ 2023 +
+ + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/archives/page/6/index.html b/archives/page/6/index.html new file mode 100644 index 0000000000..794f14b58a --- /dev/null +++ b/archives/page/6/index.html @@ -0,0 +1,580 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 归档 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+
+
+ + 太棒了! 目前共计 255 篇日志。 继续努力。 +
+ + +
+ 2023 +
+ + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/archives/page/7/index.html b/archives/page/7/index.html new file mode 100644 index 0000000000..73c1a403e1 --- /dev/null +++ b/archives/page/7/index.html @@ -0,0 +1,583 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 归档 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+
+
+ + 太棒了! 目前共计 255 篇日志。 继续努力。 +
+ + +
+ 2023 +
+ + + + + + + + + + + + + + + + +
+ 2022 +
+ + + + + + +
+
+ + + + + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/archives/page/8/index.html b/archives/page/8/index.html new file mode 100644 index 0000000000..25407cd8bf --- /dev/null +++ b/archives/page/8/index.html @@ -0,0 +1,580 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 归档 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+
+
+ + 太棒了! 目前共计 255 篇日志。 继续努力。 +
+ + +
+ 2022 +
+ + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/archives/page/9/index.html b/archives/page/9/index.html new file mode 100644 index 0000000000..70190dde93 --- /dev/null +++ b/archives/page/9/index.html @@ -0,0 +1,580 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 归档 | dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + + +
+
+
+ + 太棒了! 目前共计 255 篇日志。 继续努力。 +
+ + +
+ 2022 +
+ + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/baidusitemap.xml b/baidusitemap.xml new file mode 100644 index 0000000000..0ba9214089 --- /dev/null +++ b/baidusitemap.xml @@ -0,0 +1,1023 @@ + + + + https://shakudada.xyz/2023/10/27/clickhouse-%E7%89%A9%E5%8C%96%E8%A7%86%E5%9B%BE/ + 2023-10-27 + + + https://shakudada.xyz/2023/08/15/java-juc/ + 2023-10-27 + + + https://shakudada.xyz/2021/03/17/%E8%AE%BA%E6%96%87%E7%9A%84%E6%9C%89%E8%B6%A3%E6%80%A7/ + 2023-10-25 + + + https://shakudada.xyz/2021/03/31/%E8%B7%B3%E8%A1%A8/ + 2023-10-25 + + + https://shakudada.xyz/2021/05/24/%E9%9C%8D%E5%B0%94%E9%80%BB%E8%BE%91-%E4%BB%8E%E5%BF%AB%E6%8E%92%E5%BC%80%E5%A7%8B/ + 2023-10-25 + + + https://shakudada.xyz/2020/05/18/%E8%8C%83%E7%95%B4%E5%92%8C%E7%B1%B3%E7%94%B0%E5%BC%95%E7%90%86/ + 2023-10-25 + + + https://shakudada.xyz/2020/03/15/%E8%A7%84%E5%88%99%E7%B3%BB%E7%BB%9F/ + 2023-10-25 + + + https://shakudada.xyz/2021/11/30/%E7%A7%9F%E7%BA%A6/ + 2023-10-25 + + + https://shakudada.xyz/2020/11/27/%E7%BA%A6%E6%9D%9F%E5%92%8C%E7%BB%93%E6%9E%84/ + 2023-10-25 + + + https://shakudada.xyz/2020/02/20/%E7%BC%96%E8%AF%91%E5%8E%9F%E7%90%86/ + 2023-10-25 + + + https://shakudada.xyz/2022/12/16/%E7%BC%96%E8%AF%91sping-boot/ + 2023-10-25 + + + https://shakudada.xyz/2022/03/30/%E8%8C%83%E5%9E%8B%E6%A3%80%E6%9F%A5/ + 2023-10-25 + + + https://shakudada.xyz/2019/11/30/%E6%AD%A3%E5%88%99%E6%A8%A1%E5%BC%8F/ + 2023-10-25 + + + https://shakudada.xyz/2020/12/07/%E6%B3%9B%E5%9E%8B/ + 2023-10-25 + + + https://shakudada.xyz/2019/09/21/%E7%8E%AF%E5%A2%83%E5%8F%98%E9%87%8F%E6%98%AF%E4%BB%80%E4%B9%88/ + 2023-10-25 + + + https://shakudada.xyz/2020/08/11/%E6%9C%80%E5%A4%A7%E7%86%B5/ + 2023-10-25 + + + https://shakudada.xyz/2021/10/10/%E6%97%B6%E9%97%B4%E8%BD%AE%E7%AE%97%E6%B3%95/ + 2023-10-25 + + + https://shakudada.xyz/2020/12/25/%E5%BD%A2%E5%BC%8F%E5%8C%96%E8%AF%AD%E4%B9%89/ + 2023-10-25 + + + https://shakudada.xyz/2020/09/27/%E5%BE%AA%E7%8E%AF%E4%B8%8D%E5%8F%98%E5%BC%8Floop-invariants/ + 2023-10-25 + + + https://shakudada.xyz/2021/03/15/%E5%BF%83%E8%B7%B3%E5%92%8Ctcp/ + 2023-10-25 + + + https://shakudada.xyz/2020/10/19/%E6%84%9F%E7%9F%A5%E6%9C%BA/ + 2023-10-25 + + + https://shakudada.xyz/2020/09/01/%E6%88%91%E7%9A%84es%E4%B9%8B%E8%B7%AF/ + 2023-10-25 + + + https://shakudada.xyz/2020/11/12/%E6%95%B0%E7%90%86%E9%80%BB%E8%BE%91/ + 2023-10-25 + + + https://shakudada.xyz/2021/03/05/%E5%A6%82%E4%BD%95%E5%86%99%E4%B8%80%E4%B8%AA%E6%AD%A3%E7%A1%AE%E7%9A%84%E4%BB%A3%E7%A0%81/ + 2023-10-25 + + + https://shakudada.xyz/2021/05/24/%E5%A0%86%E6%A0%88/ + 2023-10-25 + + + https://shakudada.xyz/2020/04/27/%E5%AD%97%E7%AC%A6%E4%B8%B2%E5%88%B0%E4%BB%A3%E7%A0%81/ + 2023-10-25 + + + https://shakudada.xyz/2020/07/13/%E5%BB%B6%E8%BF%9F%E6%B1%82%E5%80%BC/ + 2023-10-25 + + + https://shakudada.xyz/2020/06/01/%E4%BE%9D%E8%B5%96%E5%92%8C%E5%86%B2%E7%AA%81/ + 2023-10-25 + + + https://shakudada.xyz/2020/06/18/%E5%8F%8C%E5%90%91%E7%BB%91%E5%AE%9A/ + 2023-10-25 + + + https://shakudada.xyz/2019/12/09/%E5%8D%8F%E7%A8%8B%E5%88%87%E6%8D%A2/ + 2023-10-25 + + + https://shakudada.xyz/2020/06/11/%E5%8F%AF%E6%89%A9%E5%B1%95%E6%80%A7/ + 2023-10-25 + + + https://shakudada.xyz/2022/11/09/zookeeper-connetion-loss/ + 2023-10-25 + + + https://shakudada.xyz/2020/05/06/%E4%B8%80%E4%B8%AAsql%E7%9A%84%E7%BB%84%E6%88%90/ + 2023-10-25 + + + https://shakudada.xyz/2021/03/22/%E4%B8%80%E6%AC%A1tcp%E9%94%99%E8%AF%AF%E6%8E%92%E6%9F%A5/ + 2023-10-25 + + + https://shakudada.xyz/2022/05/20/%E4%BD%BF%E7%94%A8gtest/ + 2023-10-25 + + + https://shakudada.xyz/2023/01/15/%E4%BD%BF%E7%94%A8k8s-%E6%90%AD%E5%BB%BAredis-%E9%9B%86%E7%BE%A4/ + 2023-10-25 + + + https://shakudada.xyz/2023/04/20/volatile-java-%E5%AE%9E%E7%8E%B0/ + 2023-10-25 + + + https://shakudada.xyz/2023/06/09/why-bison-can-be-find-in-cmake/ + 2023-10-25 + + + https://shakudada.xyz/2021/05/21/vxlan/ + 2023-10-25 + + + https://shakudada.xyz/2022/11/21/xid-equal-to-close-xid/ + 2023-10-25 + + + https://shakudada.xyz/2023/02/28/utf8-encoding-and-java/ + 2023-10-25 + + + https://shakudada.xyz/2022/11/30/tomcat-%E7%BC%96%E8%AF%91/ + 2023-10-25 + + + https://shakudada.xyz/2022/12/26/thread-pool/ + 2023-10-25 + + + https://shakudada.xyz/2021/01/06/three-value-prediate/ + 2023-10-25 + + + https://shakudada.xyz/2020/10/28/todolist/ + 2023-10-25 + + + https://shakudada.xyz/2020/05/27/tired/ + 2023-10-25 + + + https://shakudada.xyz/2021/04/01/tcp%E5%8D%8F%E8%AE%AE/ + 2023-10-25 + + + https://shakudada.xyz/2023/03/23/tersorflow-%E5%85%A5%E9%97%A8/ + 2023-10-25 + + + https://shakudada.xyz/2019/12/04/stop/ + 2023-10-25 + + + https://shakudada.xyz/2021/07/15/tcp-nodelay/ + 2023-10-25 + + + https://shakudada.xyz/2020/12/17/tcp/ + 2023-10-25 + + + https://shakudada.xyz/2019/09/14/tcpdump-resp/ + 2023-10-25 + + + https://shakudada.xyz/2022/12/16/spring-boot-repackage-%E5%92%8C%E5%85%A5%E5%8F%A3/ + 2023-10-25 + + + https://shakudada.xyz/2022/12/15/spring-boot-%E5%9F%BA%E7%A1%80/ + 2023-10-25 + + + https://shakudada.xyz/2022/12/07/spring-boot/ + 2023-10-25 + + + https://shakudada.xyz/2022/11/29/springboot-%E8%AF%B7%E6%B1%82%E6%B5%81%E7%A8%8B/ + 2023-10-25 + + + https://shakudada.xyz/2020/07/21/sql-join/ + 2023-10-25 + + + https://shakudada.xyz/2020/03/16/ssa-optimistic/ + 2023-10-25 + + + https://shakudada.xyz/2021/06/22/skiplist/ + 2023-10-25 + + + https://shakudada.xyz/2021/12/30/roaring-bitmap/ + 2023-10-25 + + + https://shakudada.xyz/2023/10/20/simpleDatetimeformatter-vs-datetimeformatter/ + 2023-10-25 + + + https://shakudada.xyz/2021/08/09/redis-%E4%B8%BB%E4%BB%8E%E5%88%87%E6%8D%A2%E5%92%8C%E9%AB%98%E5%8F%AF%E7%94%A8/ + 2023-10-25 + + + https://shakudada.xyz/2020/05/11/raft/ + 2023-10-25 + + + https://shakudada.xyz/2021/09/25/redis/ + 2023-10-25 + + + https://shakudada.xyz/2021/09/26/redis-cluster/ + 2023-10-25 + + + https://shakudada.xyz/2022/12/29/redission-%E8%B0%83%E7%94%A8%E6%B5%81%E7%A8%8B/ + 2023-10-25 + + + https://shakudada.xyz/2021/10/15/rabbitmq-ack-reject/ + 2023-10-25 + + + https://shakudada.xyz/2022/12/22/rabbitmq-spring-boot/ + 2023-10-25 + + + https://shakudada.xyz/2021/11/04/rabbitmq%E5%BF%83%E8%B7%B3%E9%97%AE%E9%A2%98%E5%92%8Cphp/ + 2023-10-25 + + + https://shakudada.xyz/2021/11/09/rabbit%E6%B5%81%E7%A8%8B/ + 2023-10-25 + + + https://shakudada.xyz/2019/11/27/php-tokenlizer/ + 2023-10-25 + + + https://shakudada.xyz/2020/01/07/php-%E5%8F%8D%E5%B0%84/ + 2023-10-25 + + + https://shakudada.xyz/2019/10/19/php-try-catch/ + 2023-10-25 + + + https://shakudada.xyz/2020/12/21/pushdown/ + 2023-10-25 + + + https://shakudada.xyz/2019/12/11/php-cgi-windows-curl/ + 2023-10-25 + + + https://shakudada.xyz/2021/01/06/pdf-format/ + 2023-10-25 + + + https://shakudada.xyz/2019/09/12/php-imply-cast/ + 2023-10-25 + + + https://shakudada.xyz/2020/05/08/php-opcode-to-handler/ + 2023-10-25 + + + https://shakudada.xyz/2019/12/12/php-pdo-%E7%9B%B8%E5%85%B3%E5%8F%82%E6%95%B0/ + 2023-10-25 + + + https://shakudada.xyz/2022/08/16/paper/ + 2023-10-25 + + + https://shakudada.xyz/2023/09/19/nginx-temp-proxy-%E6%9D%83%E9%99%90%E5%AF%BC%E8%87%B4%E6%8A%A5%E9%94%99/ + 2023-10-25 + + + https://shakudada.xyz/2020/10/21/parser/ + 2023-10-25 + + + https://shakudada.xyz/2021/01/28/mysql%E7%9A%84select/ + 2023-10-25 + + + https://shakudada.xyz/2023/06/14/nacos-client-and-serve/ + 2023-10-25 + + + https://shakudada.xyz/2019/12/08/namespace%E4%B8%8Edocker/ + 2023-10-25 + + + https://shakudada.xyz/2022/12/22/nacos-%E8%8E%B7%E5%8F%96%E9%85%8D%E7%BD%AE/ + 2023-10-25 + + + https://shakudada.xyz/2021/04/14/mysql%E4%B8%BB%E4%BB%8E/ + 2023-10-25 + + + https://shakudada.xyz/2021/04/28/mysql%E6%8F%A1%E6%89%8B/ + 2023-10-25 + + + https://shakudada.xyz/2021/03/27/mysql-%E4%B8%BB%E4%BB%8E%E5%A4%8D%E5%88%B6/ + 2023-10-25 + + + https://shakudada.xyz/2021/03/11/mysqlbinlog/ + 2023-10-25 + + + https://shakudada.xyz/2019/12/05/mysql-%E9%9A%90%E5%BC%8F%E8%BD%AC%E6%8D%A2/ + 2023-10-25 + + + https://shakudada.xyz/2019/12/12/mysql%E4%B8%A5%E6%A0%BC%E6%A8%A1%E5%BC%8F/ + 2023-10-25 + + + https://shakudada.xyz/2021/05/11/mysql-binlog%E8%8E%B7%E5%8F%96/ + 2023-10-25 + + + https://shakudada.xyz/2019/12/23/mysql-error-sqlstate/ + 2023-10-25 + + + https://shakudada.xyz/2019/10/08/mysql-string-max-length/ + 2023-10-25 + + + https://shakudada.xyz/2019/11/28/mysql-explain-impossible-condition/ + 2023-10-25 + + + https://shakudada.xyz/2020/11/12/mvcc/ + 2023-10-25 + + + https://shakudada.xyz/2023/08/15/mybatisplus-Column-status-cannot-be-null/ + 2023-10-25 + + + https://shakudada.xyz/2022/12/12/mybatis-dollor-and-sharp/ + 2023-10-25 + + + https://shakudada.xyz/2021/07/29/mysql-5-7-in-%E7%9A%84%E4%BC%98%E5%8C%96%E5%BC%95%E8%B5%B7%E7%9A%84bug/ + 2023-10-25 + + + https://shakudada.xyz/2021/02/18/mvcc-translate/ + 2023-10-25 + + + https://shakudada.xyz/2022/12/12/mockito-%E4%BD%BF%E7%94%A8/ + 2023-10-25 + + + https://shakudada.xyz/2021/06/04/mongoinsert/ + 2023-10-25 + + + https://shakudada.xyz/2022/12/04/micro-k8s-%E4%BD%BF%E7%94%A8/ + 2023-10-25 + + + https://shakudada.xyz/2022/12/19/memory-model/ + 2023-10-25 + + + https://shakudada.xyz/2023/06/27/milvus-%E7%BC%96%E8%AF%91%E4%BD%BF%E7%94%A8/ + 2023-10-25 + + + https://shakudada.xyz/2020/03/10/math/ + 2023-10-25 + + + https://shakudada.xyz/2022/12/27/maven-scope/ + 2023-10-25 + + + https://shakudada.xyz/2019/09/28/maven%E6%89%93%E5%8C%85NoClassDefFoundError-on-Maven-dependency/ + 2023-10-25 + + + https://shakudada.xyz/2022/09/05/max-min-heap/ + 2023-10-25 + + + https://shakudada.xyz/2023/07/04/lucene-%E5%88%86%E8%AF%8D/ + 2023-10-25 + + + https://shakudada.xyz/2023/06/19/lucene-%E6%90%9C%E7%B4%A2%E8%BF%87%E7%A8%8B/ + 2023-10-25 + + + https://shakudada.xyz/2022/06/19/lucene-%E7%BC%96%E8%AF%91%E5%AE%89%E8%A3%85/ + 2023-10-25 + + + https://shakudada.xyz/2021/10/21/lucence%E6%BA%90%E7%A0%81%E5%88%86%E6%9E%90/ + 2023-10-25 + + + https://shakudada.xyz/2023/10/19/lsmtree/ + 2023-10-25 + + + https://shakudada.xyz/2022/06/27/lucene-10%E6%BA%90%E7%A0%81%E5%88%86%E6%9E%90/ + 2023-10-25 + + + https://shakudada.xyz/2022/08/19/lucene-tim%E6%A0%BC%E5%BC%8F/ + 2023-10-25 + + + https://shakudada.xyz/2019/10/18/lex%E5%92%8Cyacc%E4%BE%8B%E5%AD%90/ + 2023-10-25 + + + https://shakudada.xyz/2021/10/18/llvm/ + 2023-10-25 + + + https://shakudada.xyz/2023/07/05/llvm-ir-%E4%BE%8B%E5%AD%90/ + 2023-10-25 + + + https://shakudada.xyz/2022/03/27/lr-parser/ + 2023-10-25 + + + https://shakudada.xyz/2023/07/13/kmp-correct/ + 2023-10-25 + + + https://shakudada.xyz/2019/10/02/learn-es-invert-index/ + 2023-10-25 + + + https://shakudada.xyz/2023/04/14/kafka%E7%BC%96%E8%AF%91%E5%92%8C%E5%90%AF%E5%8A%A8-1/ + 2023-10-25 + + + https://shakudada.xyz/2020/06/12/js-define%E5%87%BD%E6%95%B0/ + 2023-10-25 + + + https://shakudada.xyz/2019/12/12/js-vue%E5%9F%BA%E7%A1%80/ + 2023-10-25 + + + https://shakudada.xyz/2022/04/12/jvm%E7%BA%BF%E7%A8%8B%E5%AE%9E%E7%8E%B0/ + 2023-10-25 + + + https://shakudada.xyz/2023/02/13/jstak/ + 2023-10-25 + + + https://shakudada.xyz/2022/11/16/jdk-%E5%8F%8D%E6%B1%87%E7%BC%96/ + 2023-10-25 + + + https://shakudada.xyz/2023/08/10/jdbc-Communications-link-failure/ + 2023-10-25 + + + https://shakudada.xyz/2022/02/18/jdk-%E7%BC%96%E8%AF%91/ + 2023-10-25 + + + https://shakudada.xyz/2022/09/20/jdk%E7%BC%96%E8%AF%91/ + 2023-10-25 + + + https://shakudada.xyz/2022/12/26/java%E5%9F%BA%E7%A1%80/ + 2023-10-25 + + + https://shakudada.xyz/2019/11/14/java%E7%9A%84package%E4%B8%8E%E6%96%87%E4%BB%B6%E8%B7%AF%E5%BE%84/ + 2023-10-25 + + + https://shakudada.xyz/2022/04/12/java%E7%B1%BB%E5%88%9D%E5%A7%8B%E5%8C%96/ + 2023-10-25 + + + https://shakudada.xyz/2023/05/27/java-%E7%BA%BF%E7%A8%8B%E6%B1%A0/ + 2023-10-25 + + + https://shakudada.xyz/2022/04/12/java%E5%92%8Cspringboot/ + 2023-10-25 + + + https://shakudada.xyz/2022/11/29/javac/ + 2023-10-25 + + + https://shakudada.xyz/2023/02/14/java-%E5%B8%B8%E7%94%A8%E5%91%BD%E4%BB%A4/ + 2023-10-25 + + + https://shakudada.xyz/2023/05/24/java-%E6%95%B0%E7%BB%84%E5%A3%B0%E6%98%8E%E4%BD%8D%E7%BD%AE%E5%8C%BA%E5%88%AB/ + 2023-10-25 + + + https://shakudada.xyz/2019/11/26/java-%E5%BC%82%E5%B8%B8/ + 2023-10-25 + + + https://shakudada.xyz/2022/12/01/java-%E6%96%B9%E6%B3%95%E7%AD%BE%E5%90%8D/ + 2023-10-25 + + + https://shakudada.xyz/2023/04/03/java-%E5%9F%BA%E6%9C%AC%E7%B1%BB%E5%9E%8B/ + 2023-10-25 + + + https://shakudada.xyz/2022/09/21/java-%E5%A0%86%E6%A0%88/ + 2023-10-25 + + + https://shakudada.xyz/2023/01/11/java-%E5%AF%B9%E8%B1%A1%E5%A4%A7%E5%B0%8F/ + 2023-10-25 + + + https://shakudada.xyz/2023/01/11/java-%E4%B8%80%E6%AC%A1gc%E6%8E%92%E6%9F%A5/ + 2023-10-25 + + + https://shakudada.xyz/2023/09/21/java-%E4%B8%9A%E5%8A%A1oom%E6%8E%92%E6%9F%A5/ + 2023-10-25 + + + https://shakudada.xyz/2022/11/30/java-%E4%BD%BF%E7%94%A8lua-script/ + 2023-10-25 + + + https://shakudada.xyz/2022/12/21/java-%E5%8A%A8%E6%80%81%E4%BB%A3%E7%90%86/ + 2023-10-25 + + + https://shakudada.xyz/2022/12/06/java-volalite/ + 2023-10-25 + + + https://shakudada.xyz/2023/03/29/java-unsafe/ + 2023-10-25 + + + https://shakudada.xyz/2023/10/25/java-wait-notify/ + 2023-10-25 + + + https://shakudada.xyz/2022/09/11/java-thread-local-%E5%88%9D%E5%A7%8B%E5%8C%96%E6%97%B6%E6%9C%BA/ + 2023-10-25 + + + https://shakudada.xyz/2022/12/16/java-thread-pool/ + 2023-10-25 + + + https://shakudada.xyz/2023/08/25/java-unbox/ + 2023-10-25 + + + https://shakudada.xyz/2023/06/27/java-sort-default-order/ + 2023-10-25 + + + https://shakudada.xyz/2022/08/23/java-static-%E5%9D%97/ + 2023-10-25 + + + https://shakudada.xyz/2019/11/25/java-string-%E7%9B%B8%E5%85%B3%E5%86%85%E5%AE%B9/ + 2023-10-25 + + + https://shakudada.xyz/2023/09/22/java-oom-hprof%E6%96%87%E4%BB%B6%E7%94%9F%E6%88%90%E6%97%B6%E6%9C%BA/ + 2023-10-25 + + + https://shakudada.xyz/2022/12/20/java-parser/ + 2023-10-25 + + + https://shakudada.xyz/2023/04/14/java-rabbitmq-%E5%88%9D%E5%A7%8B%E5%8C%96/ + 2023-10-25 + + + https://shakudada.xyz/2022/12/07/java-redis-client/ + 2023-10-25 + + + https://shakudada.xyz/2023/10/18/java-mybatis-plus-date-handler/ + 2023-10-25 + + + https://shakudada.xyz/2023/08/24/java-main/ + 2023-10-25 + + + https://shakudada.xyz/2023/08/04/java-nio/ + 2023-10-25 + + + https://shakudada.xyz/2022/12/15/java-int-overflow-%E6%8E%A2%E7%A9%B6/ + 2023-10-25 + + + https://shakudada.xyz/2023/04/18/java-integer-divison/ + 2023-10-25 + + + https://shakudada.xyz/2023/09/06/java-jdk-%E4%B8%BA%E4%BB%80%E4%B9%88%E4%B8%80%E4%B8%AA%E7%BA%BF%E7%A8%8B%E7%A9%BA%E6%8C%87%E9%92%88%E4%B8%8D%E9%80%80%E5%87%BA/ + 2023-10-25 + + + https://shakudada.xyz/2022/11/30/java-assert/ + 2023-10-25 + + + https://shakudada.xyz/2023/07/30/java-generic/ + 2023-10-25 + + + https://shakudada.xyz/2023/09/06/java-branch-bytecode/ + 2023-10-25 + + + https://shakudada.xyz/2020/09/25/invariants/ + 2023-10-25 + + + https://shakudada.xyz/2023/05/04/insert-ignore-%E6%AD%BB%E9%94%81/ + 2023-10-25 + + + https://shakudada.xyz/2019/12/12/java-Class-forName/ + 2023-10-25 + + + https://shakudada.xyz/2021/03/30/ipc/ + 2023-10-25 + + + https://shakudada.xyz/2022/12/22/java-arraycopy/ + 2023-10-25 + + + https://shakudada.xyz/2019/12/02/ik%E5%88%86%E8%AF%8D/ + 2023-10-25 + + + https://shakudada.xyz/2019/10/13/https-tls-ssl/ + 2023-10-25 + + + https://shakudada.xyz/2020/03/04/induction/ + 2023-10-25 + + + https://shakudada.xyz/2019/09/13/hello-world/ + 2023-10-25 + + + https://shakudada.xyz/2023/07/10/hidden-and-shadow-in-java/ + 2023-10-25 + + + https://shakudada.xyz/2022/12/21/how-to-debug-javac/ + 2023-10-25 + + + https://shakudada.xyz/2022/09/06/httpServerletRequest-autowired-%E5%8E%9F%E5%9B%A0/ + 2023-10-25 + + + https://shakudada.xyz/2022/12/19/gradle-%E4%BD%BF%E7%94%A8/ + 2023-10-25 + + + https://shakudada.xyz/2019/09/21/hello-world-java/ + 2023-10-25 + + + https://shakudada.xyz/2020/07/10/group-concat%E7%9C%8Bmysql%E5%87%BD%E6%95%B0/ + 2023-10-25 + + + https://shakudada.xyz/2019/09/12/golang-lock/ + 2023-10-25 + + + https://shakudada.xyz/2020/04/10/golang-stack/ + 2023-10-25 + + + https://shakudada.xyz/2019/09/18/golang-interface-%E6%AF%94%E8%BE%83/ + 2023-10-25 + + + https://shakudada.xyz/2020/06/01/functor-1/ + 2023-10-25 + + + https://shakudada.xyz/2019/09/17/go-micro-hello-world/ + 2023-10-25 + + + https://shakudada.xyz/2020/05/28/functor/ + 2023-10-25 + + + https://shakudada.xyz/2023/10/25/fst/ + 2023-10-25 + + + https://shakudada.xyz/2023/05/25/fst-%E7%BB%93%E6%9E%84/ + 2023-10-25 + + + https://shakudada.xyz/2023/07/04/found-duplicate-key-xxx-spring-boot/ + 2023-10-25 + + + https://shakudada.xyz/2023/05/16/flink-%E7%BC%96%E8%AF%91/ + 2023-10-25 + + + https://shakudada.xyz/2023/09/10/elastic-search-%E7%BC%96%E8%AF%91%E5%92%8C%E8%B0%83%E8%AF%95/ + 2023-10-25 + + + https://shakudada.xyz/2022/04/02/dubbo-rpc/ + 2023-10-25 + + + https://shakudada.xyz/2021/03/27/docker%E6%8C%81%E4%B9%85%E5%8C%96/ + 2023-10-25 + + + https://shakudada.xyz/2023/03/09/each-jvm-bytecode-implement-in-x86-with-asm/ + 2023-10-25 + + + https://shakudada.xyz/2021/05/11/c%E5%AD%97%E8%8A%82%E5%AF%B9%E9%BD%90/ + 2023-10-25 + + + https://shakudada.xyz/2023/04/09/direct-memory-in-java/ + 2023-10-25 + + + https://shakudada.xyz/2021/03/10/docker-compose-spec/ + 2023-10-25 + + + https://shakudada.xyz/2019/11/26/docker%E4%B8%8Eiptable/ + 2023-10-25 + + + https://shakudada.xyz/2020/06/12/curry/ + 2023-10-25 + + + https://shakudada.xyz/2022/01/04/croaring-bitmap/ + 2023-10-25 + + + https://shakudada.xyz/2020/09/18/crf/ + 2023-10-25 + + + https://shakudada.xyz/2019/09/19/cors-%E7%9B%B8%E5%85%B3/ + 2023-10-25 + + + https://shakudada.xyz/2022/12/10/create-a-maven-plugin/ + 2023-10-25 + + + https://shakudada.xyz/2022/10/24/cpp-flag/ + 2023-10-25 + + + https://shakudada.xyz/2020/04/15/coding/ + 2023-10-25 + + + https://shakudada.xyz/2019/09/15/compile-and-debug-linux-kernel/ + 2023-10-25 + + + https://shakudada.xyz/2019/11/21/composer-ext/ + 2023-10-25 + + + https://shakudada.xyz/2019/10/22/composer%E7%9A%84psr4/ + 2023-10-25 + + + https://shakudada.xyz/2023/10/16/clickhouse-%E7%89%A9%E5%8C%96%E8%A7%86%E5%9B%BE%E5%92%8C%E4%BD%8D%E5%9B%BE/ + 2023-10-25 + + + https://shakudada.xyz/2022/05/22/clickhouse-%E8%AF%B7%E6%B1%82%E7%9A%84%E7%94%9F%E5%91%BD%E5%91%A8%E6%9C%9F/ + 2023-10-25 + + + https://shakudada.xyz/2019/12/19/clickhouse-%E7%BC%96%E8%AF%91%E5%AE%89%E8%A3%85/ + 2023-10-25 + + + https://shakudada.xyz/2022/12/26/cms-gc/ + 2023-10-25 + + + https://shakudada.xyz/2022/07/06/clickhouse-parser/ + 2023-10-25 + + + https://shakudada.xyz/2023/02/21/clickhouse-mybatis-batch-insert-cpu-raise-up/ + 2023-10-25 + + + https://shakudada.xyz/2022/05/15/clickhouse-%E5%BB%BA%E8%A1%A8/ + 2023-10-25 + + + https://shakudada.xyz/2023/10/16/clickhouse-cloud-dbeaver%E8%BF%9E%E6%8E%A5%E4%B8%8D%E4%B8%8A/ + 2023-10-25 + + + https://shakudada.xyz/2022/06/01/clickhouse-function/ + 2023-10-25 + + + https://shakudada.xyz/2023/06/14/clickhouse-jdbc-1002-error/ + 2023-10-25 + + + https://shakudada.xyz/2021/04/02/canal%E9%9C%80%E8%A6%81%E6%B3%A8%E6%84%8F%E7%9A%84%E7%82%B9/ + 2023-10-25 + + + https://shakudada.xyz/2022/05/13/clickhosue-insert-insert-deduplicate/ + 2023-10-25 + + + https://shakudada.xyz/2023/01/12/clickhouse-400-error/ + 2023-10-25 + + + https://shakudada.xyz/2021/03/15/c-%E6%A0%87%E5%87%86%E5%BA%93%E7%9A%84vector/ + 2023-10-25 + + + https://shakudada.xyz/2022/06/26/c-%E5%9F%BA%E7%A1%80/ + 2023-10-25 + + + https://shakudada.xyz/2021/04/14/c99%E6%9F%94%E6%80%A7%E6%95%B0%E7%BB%84/ + 2023-10-25 + + + https://shakudada.xyz/2023/10/25/bm25-and-search/ + 2023-10-25 + + + https://shakudada.xyz/2020/11/18/btree/ + 2023-10-25 + + + https://shakudada.xyz/2020/07/07/c-auto-cast/ + 2023-10-25 + + + https://shakudada.xyz/2021/03/27/build-grpc/ + 2023-10-25 + + + https://shakudada.xyz/2023/05/18/bigint-%E6%98%A0%E5%B0%84/ + 2023-10-25 + + + https://shakudada.xyz/2022/04/19/bloom-filter/ + 2023-10-25 + + + https://shakudada.xyz/2020/06/29/basic-paxos/ + 2023-10-25 + + + https://shakudada.xyz/2023/04/12/arroyo-%E7%BC%96%E8%AF%91%E5%92%8C%E4%BD%BF%E7%94%A8/ + 2023-10-25 + + + https://shakudada.xyz/2023/08/16/antlr-%E4%BD%BF%E7%94%A8/ + 2023-10-25 + + + https://shakudada.xyz/2019/10/31/ast%E6%9E%84%E9%80%A0/ + 2023-10-25 + + + https://shakudada.xyz/2023/08/16/WARNING-An-illegal-reflective-access-operation-has-occurred-groovy/ + 2023-10-25 + + + https://shakudada.xyz/2023/06/05/WFST-%E5%92%8Clucene-%E5%92%8Cfst/ + 2023-10-25 + + + https://shakudada.xyz/2020/04/08/a-language-to-machine-code/ + 2023-10-25 + + + https://shakudada.xyz/2023/06/06/Payload-value-must-not-be-empty/ + 2023-10-25 + + + https://shakudada.xyz/2021/01/13/RSA/ + 2023-10-25 + + + https://shakudada.xyz/2023/07/16/System-arraycopy-in-java/ + 2023-10-25 + + + https://shakudada.xyz/2023/07/03/Unable-to-make-protected-final-java-lang-Class-java-lang-ClassLoader-defineClass/ + 2023-10-25 + + + https://shakudada.xyz/2023/08/25/Hydration-completed-but-contains-mismatches/ + 2023-10-25 + + + https://shakudada.xyz/2023/06/07/Invalid-JSON-text-in-argument-2-in-mysql8/ + 2023-10-25 + + + https://shakudada.xyz/2023/07/04/Numeric-overflow-in-expression-idea-java/ + 2023-10-25 + + + https://shakudada.xyz/2023/01/30/Garbage-First-Garbage-Collection-%E7%AE%80%E5%8D%95%E6%A6%82%E5%86%B5/ + 2023-10-25 + + + https://shakudada.xyz/2022/12/16/ConcurrentHashMap-npe/ + 2023-10-25 + + + https://shakudada.xyz/2023/02/04/Double-Checked-Locking-is-Broken/ + 2023-10-25 + + + https://shakudada.xyz/2021/04/13/20210413%E5%8F%8D%E6%80%9D%E8%BF%87%E5%8E%BB/ + 2023-10-25 + + + https://shakudada.xyz/2022/08/26/3-Method-Reference-Expressions/ + 2023-10-25 + + \ No newline at end of file diff --git a/css/main.css b/css/main.css new file mode 100644 index 0000000000..5818508efe --- /dev/null +++ b/css/main.css @@ -0,0 +1,2420 @@ +:root { + --body-bg-color: #fff; + --content-bg-color: #fff; + --card-bg-color: #f5f5f5; + --text-color: #555; + --blockquote-color: #666; + --link-color: #555; + --link-hover-color: #222; + --brand-color: #fff; + --brand-hover-color: #fff; + --table-row-odd-bg-color: #f9f9f9; + --table-row-hover-bg-color: #f5f5f5; + --menu-item-bg-color: #f5f5f5; + --btn-default-bg: #222; + --btn-default-color: #fff; + --btn-default-border-color: #222; + --btn-default-hover-bg: #fff; + --btn-default-hover-color: #222; + --btn-default-hover-border-color: #222; +} +html { + line-height: 1.15; /* 1 */ + -webkit-text-size-adjust: 100%; /* 2 */ +} +body { + margin: 0; +} +main { + display: block; +} +h1 { + font-size: 2em; + margin: 0.67em 0; +} +hr { + box-sizing: content-box; /* 1 */ + height: 0; /* 1 */ + overflow: visible; /* 2 */ +} +pre { + font-family: monospace, monospace; /* 1 */ + font-size: 1em; /* 2 */ +} +a { + background: transparent; +} +abbr[title] { + border-bottom: none; /* 1 */ + text-decoration: underline; /* 2 */ + text-decoration: underline dotted; /* 2 */ +} +b, +strong { + font-weight: bolder; +} +code, +kbd, +samp { + font-family: monospace, monospace; /* 1 */ + font-size: 1em; /* 2 */ +} +small { + font-size: 80%; +} +sub, +sup { + font-size: 75%; + line-height: 0; + position: relative; + vertical-align: baseline; +} +sub { + bottom: -0.25em; +} +sup { + top: -0.5em; +} +img { + border-style: none; +} +button, +input, +optgroup, +select, +textarea { + font-family: inherit; /* 1 */ + font-size: 100%; /* 1 */ + line-height: 1.15; /* 1 */ + margin: 0; /* 2 */ +} +button, +input { +/* 1 */ + overflow: visible; +} +button, +select { +/* 1 */ + text-transform: none; +} +button, +[type='button'], +[type='reset'], +[type='submit'] { + -webkit-appearance: button; +} +button::-moz-focus-inner, +[type='button']::-moz-focus-inner, +[type='reset']::-moz-focus-inner, +[type='submit']::-moz-focus-inner { + border-style: none; + padding: 0; +} +button:-moz-focusring, +[type='button']:-moz-focusring, +[type='reset']:-moz-focusring, +[type='submit']:-moz-focusring { + outline: 1px dotted ButtonText; +} +fieldset { + padding: 0.35em 0.75em 0.625em; +} +legend { + box-sizing: border-box; /* 1 */ + color: inherit; /* 2 */ + display: table; /* 1 */ + max-width: 100%; /* 1 */ + padding: 0; /* 3 */ + white-space: normal; /* 1 */ +} +progress { + vertical-align: baseline; +} +textarea { + overflow: auto; +} +[type='checkbox'], +[type='radio'] { + box-sizing: border-box; /* 1 */ + padding: 0; /* 2 */ +} +[type='number']::-webkit-inner-spin-button, +[type='number']::-webkit-outer-spin-button { + height: auto; +} +[type='search'] { + outline-offset: -2px; /* 2 */ + -webkit-appearance: textfield; /* 1 */ +} +[type='search']::-webkit-search-decoration { + -webkit-appearance: none; +} +::-webkit-file-upload-button { + font: inherit; /* 2 */ + -webkit-appearance: button; /* 1 */ +} +details { + display: block; +} +summary { + display: list-item; +} +template { + display: none; +} +[hidden] { + display: none; +} +::selection { + background: #262a30; + color: #eee; +} +html, +body { + height: 100%; +} +body { + background: var(--body-bg-color); + color: var(--text-color); + font-family: 'Lato', "PingFang SC", "Microsoft YaHei", sans-serif; + font-size: 1em; + line-height: 2; +} +@media (max-width: 991px) { + body { + padding-left: 0 !important; + padding-right: 0 !important; + } +} +h1, +h2, +h3, +h4, +h5, +h6 { + font-family: 'Lato', "PingFang SC", "Microsoft YaHei", sans-serif; + font-weight: bold; + line-height: 1.5; + margin: 20px 0 15px; +} +h1 { + font-size: 1.5em; +} +h2 { + font-size: 1.375em; +} +h3 { + font-size: 1.25em; +} +h4 { + font-size: 1.125em; +} +h5 { + font-size: 1em; +} +h6 { + font-size: 0.875em; +} +p { + margin: 0 0 20px 0; +} +a, +span.exturl { + border-bottom: 1px solid #999; + color: var(--link-color); + outline: 0; + text-decoration: none; + overflow-wrap: break-word; + word-wrap: break-word; + cursor: pointer; +} +a:hover, +span.exturl:hover { + border-bottom-color: var(--link-hover-color); + color: var(--link-hover-color); +} +iframe, +img, +video { + display: block; + margin-left: auto; + margin-right: auto; + max-width: 100%; +} +hr { + background-image: repeating-linear-gradient(-45deg, #ddd, #ddd 4px, transparent 4px, transparent 8px); + border: 0; + height: 3px; + margin: 40px 0; +} +blockquote { + border-left: 4px solid #ddd; + color: var(--blockquote-color); + margin: 0; + padding: 0 15px; +} +blockquote cite::before { + content: '-'; + padding: 0 5px; +} +dt { + font-weight: bold; +} +dd { + margin: 0; + padding: 0; +} +kbd { + background-color: #f5f5f5; + background-image: linear-gradient(#eee, #fff, #eee); + border: 1px solid #ccc; + border-radius: 0.2em; + box-shadow: 0.1em 0.1em 0.2em rgba(0,0,0,0.1); + color: #555; + font-family: inherit; + padding: 0.1em 0.3em; + white-space: nowrap; +} +.table-container { + overflow: auto; +} +table { + border-collapse: collapse; + border-spacing: 0; + font-size: 0.875em; + margin: 0 0 20px 0; + width: 100%; +} +tbody tr:nth-of-type(odd) { + background: var(--table-row-odd-bg-color); +} +tbody tr:hover { + background: var(--table-row-hover-bg-color); +} +caption, +th, +td { + font-weight: normal; + padding: 8px; + vertical-align: middle; +} +th, +td { + border: 1px solid #ddd; + border-bottom: 3px solid #ddd; +} +th { + font-weight: 700; + padding-bottom: 10px; +} +td { + border-bottom-width: 1px; +} +.btn { + background: var(--btn-default-bg); + border: 2px solid var(--btn-default-border-color); + border-radius: 0; + color: var(--btn-default-color); + display: inline-block; + font-size: 0.875em; + line-height: 2; + padding: 0 20px; + text-decoration: none; + transition-property: background-color; + transition-delay: 0s; + transition-duration: 0.2s; + transition-timing-function: ease-in-out; +} +.btn:hover { + background: var(--btn-default-hover-bg); + border-color: var(--btn-default-hover-border-color); + color: var(--btn-default-hover-color); +} +.btn + .btn { + margin: 0 0 8px 8px; +} +.btn .fa-fw { + text-align: left; + width: 1.285714285714286em; +} +.toggle { + line-height: 0; +} +.toggle .toggle-line { + background: #fff; + display: inline-block; + height: 2px; + left: 0; + position: relative; + top: 0; + transition: all 0.4s; + vertical-align: top; + width: 100%; +} +.toggle .toggle-line:not(:first-child) { + margin-top: 3px; +} +.toggle.toggle-arrow .toggle-line-first { + left: 50%; + top: 2px; + transform: rotate(45deg); + width: 50%; +} +.toggle.toggle-arrow .toggle-line-middle { + left: 2px; + width: 90%; +} +.toggle.toggle-arrow .toggle-line-last { + left: 50%; + top: -2px; + transform: rotate(-45deg); + width: 50%; +} +.toggle.toggle-close .toggle-line-first { + transform: rotate(-45deg); + top: 5px; +} +.toggle.toggle-close .toggle-line-middle { + opacity: 0; +} +.toggle.toggle-close .toggle-line-last { + transform: rotate(45deg); + top: -5px; +} +.highlight, +pre { + background: #f7f7f7; + color: #4d4d4c; + line-height: 1.6; + margin: 0 auto 20px; +} +pre, +code { + font-family: consolas, Menlo, monospace, "PingFang SC", "Microsoft YaHei"; +} +code { + background: #eee; + border-radius: 3px; + color: #555; + padding: 2px 4px; + overflow-wrap: break-word; + word-wrap: break-word; +} +.highlight *::selection { + background: #d6d6d6; +} +.highlight pre { + border: 0; + margin: 0; + padding: 10px 0; +} +.highlight table { + border: 0; + margin: 0; + width: auto; +} +.highlight td { + border: 0; + padding: 0; +} +.highlight figcaption { + background: #eff2f3; + color: #4d4d4c; + display: flex; + font-size: 0.875em; + justify-content: space-between; + line-height: 1.2; + padding: 0.5em; +} +.highlight figcaption a { + color: #4d4d4c; +} +.highlight figcaption a:hover { + border-bottom-color: #4d4d4c; +} +.highlight .gutter { + -moz-user-select: none; + -ms-user-select: none; + -webkit-user-select: none; + user-select: none; +} +.highlight .gutter pre { + background: #eff2f3; + color: #869194; + padding-left: 10px; + padding-right: 10px; + text-align: right; +} +.highlight .code pre { + background: #f7f7f7; + padding-left: 10px; + width: 100%; +} +.gist table { + width: auto; +} +.gist table td { + border: 0; +} +pre { + overflow: auto; + padding: 10px; +} +pre code { + background: none; + color: #4d4d4c; + font-size: 0.875em; + padding: 0; + text-shadow: none; +} +pre .deletion { + background: #fdd; +} +pre .addition { + background: #dfd; +} +pre .meta { + color: #eab700; + -moz-user-select: none; + -ms-user-select: none; + -webkit-user-select: none; + user-select: none; +} +pre .comment { + color: #8e908c; +} +pre .variable, +pre .attribute, +pre .tag, +pre .name, +pre .regexp, +pre .ruby .constant, +pre .xml .tag .title, +pre .xml .pi, +pre .xml .doctype, +pre .html .doctype, +pre .css .id, +pre .css .class, +pre .css .pseudo { + color: #c82829; +} +pre .number, +pre .preprocessor, +pre .built_in, +pre .builtin-name, +pre .literal, +pre .params, +pre .constant, +pre .command { + color: #f5871f; +} +pre .ruby .class .title, +pre .css .rules .attribute, +pre .string, +pre .symbol, +pre .value, +pre .inheritance, +pre .header, +pre .ruby .symbol, +pre .xml .cdata, +pre .special, +pre .formula { + color: #718c00; +} +pre .title, +pre .css .hexcolor { + color: #3e999f; +} +pre .function, +pre .python .decorator, +pre .python .title, +pre .ruby .function .title, +pre .ruby .title .keyword, +pre .perl .sub, +pre .javascript .title, +pre .coffeescript .title { + color: #4271ae; +} +pre .keyword, +pre .javascript .function { + color: #8959a8; +} +.blockquote-center { + border-left: none; + margin: 40px 0; + padding: 0; + position: relative; + text-align: center; +} +.blockquote-center .fa { + display: block; + opacity: 0.6; + position: absolute; + width: 100%; +} +.blockquote-center .fa-quote-left { + border-top: 1px solid #ccc; + text-align: left; + top: -20px; +} +.blockquote-center .fa-quote-right { + border-bottom: 1px solid #ccc; + text-align: right; + bottom: -20px; +} +.blockquote-center p, +.blockquote-center div { + text-align: center; +} +.post-body .group-picture img { + margin: 0 auto; + padding: 0 3px; +} +.group-picture-row { + margin-bottom: 6px; + overflow: hidden; +} +.group-picture-column { + float: left; + margin-bottom: 10px; +} +.post-body .label { + color: #555; + display: inline; + padding: 0 2px; +} +.post-body .label.default { + background: #f0f0f0; +} +.post-body .label.primary { + background: #efe6f7; +} +.post-body .label.info { + background: #e5f2f8; +} +.post-body .label.success { + background: #e7f4e9; +} +.post-body .label.warning { + background: #fcf6e1; +} +.post-body .label.danger { + background: #fae8eb; +} +.post-body .tabs { + margin-bottom: 20px; +} +.post-body .tabs, +.tabs-comment { + display: block; + padding-top: 10px; + position: relative; +} +.post-body .tabs ul.nav-tabs, +.tabs-comment ul.nav-tabs { + display: flex; + flex-wrap: wrap; + margin: 0; + margin-bottom: -1px; + padding: 0; +} +@media (max-width: 413px) { + .post-body .tabs ul.nav-tabs, + .tabs-comment ul.nav-tabs { + display: block; + margin-bottom: 5px; + } +} +.post-body .tabs ul.nav-tabs li.tab, +.tabs-comment ul.nav-tabs li.tab { + border-bottom: 1px solid #ddd; + border-left: 1px solid transparent; + border-right: 1px solid transparent; + border-top: 3px solid transparent; + flex-grow: 1; + list-style-type: none; + border-radius: 0 0 0 0; +} +@media (max-width: 413px) { + .post-body .tabs ul.nav-tabs li.tab, + .tabs-comment ul.nav-tabs li.tab { + border-bottom: 1px solid transparent; + border-left: 3px solid transparent; + border-right: 1px solid transparent; + border-top: 1px solid transparent; + } +} +@media (max-width: 413px) { + .post-body .tabs ul.nav-tabs li.tab, + .tabs-comment ul.nav-tabs li.tab { + border-radius: 0; + } +} +.post-body .tabs ul.nav-tabs li.tab a, +.tabs-comment ul.nav-tabs li.tab a { + border-bottom: initial; + display: block; + line-height: 1.8; + outline: 0; + padding: 0.25em 0.75em; + text-align: center; + transition-delay: 0s; + transition-duration: 0.2s; + transition-timing-function: ease-out; +} +.post-body .tabs ul.nav-tabs li.tab a i, +.tabs-comment ul.nav-tabs li.tab a i { + width: 1.285714285714286em; +} +.post-body .tabs ul.nav-tabs li.tab.active, +.tabs-comment ul.nav-tabs li.tab.active { + border-bottom: 1px solid transparent; + border-left: 1px solid #ddd; + border-right: 1px solid #ddd; + border-top: 3px solid #fc6423; +} +@media (max-width: 413px) { + .post-body .tabs ul.nav-tabs li.tab.active, + .tabs-comment ul.nav-tabs li.tab.active { + border-bottom: 1px solid #ddd; + border-left: 3px solid #fc6423; + border-right: 1px solid #ddd; + border-top: 1px solid #ddd; + } +} +.post-body .tabs ul.nav-tabs li.tab.active a, +.tabs-comment ul.nav-tabs li.tab.active a { + color: var(--link-color); + cursor: default; +} +.post-body .tabs .tab-content .tab-pane, +.tabs-comment .tab-content .tab-pane { + border: 1px solid #ddd; + border-top: 0; + padding: 20px 20px 0 20px; + border-radius: 0; +} +.post-body .tabs .tab-content .tab-pane:not(.active), +.tabs-comment .tab-content .tab-pane:not(.active) { + display: none; +} +.post-body .tabs .tab-content .tab-pane.active, +.tabs-comment .tab-content .tab-pane.active { + display: block; +} +.post-body .tabs .tab-content .tab-pane.active:nth-of-type(1), +.tabs-comment .tab-content .tab-pane.active:nth-of-type(1) { + border-radius: 0 0 0 0; +} +@media (max-width: 413px) { + .post-body .tabs .tab-content .tab-pane.active:nth-of-type(1), + .tabs-comment .tab-content .tab-pane.active:nth-of-type(1) { + border-radius: 0; + } +} +.post-body .note { + border-radius: 3px; + margin-bottom: 20px; + padding: 1em; + position: relative; + border: 1px solid #eee; + border-left-width: 5px; +} +.post-body .note h2, +.post-body .note h3, +.post-body .note h4, +.post-body .note h5, +.post-body .note h6 { + margin-top: 0; + border-bottom: initial; + margin-bottom: 0; + padding-top: 0; +} +.post-body .note p:first-child, +.post-body .note ul:first-child, +.post-body .note ol:first-child, +.post-body .note table:first-child, +.post-body .note pre:first-child, +.post-body .note blockquote:first-child, +.post-body .note img:first-child { + margin-top: 0; +} +.post-body .note p:last-child, +.post-body .note ul:last-child, +.post-body .note ol:last-child, +.post-body .note table:last-child, +.post-body .note pre:last-child, +.post-body .note blockquote:last-child, +.post-body .note img:last-child { + margin-bottom: 0; +} +.post-body .note.default { + border-left-color: #777; +} +.post-body .note.default h2, +.post-body .note.default h3, +.post-body .note.default h4, +.post-body .note.default h5, +.post-body .note.default h6 { + color: #777; +} +.post-body .note.primary { + border-left-color: #6f42c1; +} +.post-body .note.primary h2, +.post-body .note.primary h3, +.post-body .note.primary h4, +.post-body .note.primary h5, +.post-body .note.primary h6 { + color: #6f42c1; +} +.post-body .note.info { + border-left-color: #428bca; +} +.post-body .note.info h2, +.post-body .note.info h3, +.post-body .note.info h4, +.post-body .note.info h5, +.post-body .note.info h6 { + color: #428bca; +} +.post-body .note.success { + border-left-color: #5cb85c; +} +.post-body .note.success h2, +.post-body .note.success h3, +.post-body .note.success h4, +.post-body .note.success h5, +.post-body .note.success h6 { + color: #5cb85c; +} +.post-body .note.warning { + border-left-color: #f0ad4e; +} +.post-body .note.warning h2, +.post-body .note.warning h3, +.post-body .note.warning h4, +.post-body .note.warning h5, +.post-body .note.warning h6 { + color: #f0ad4e; +} +.post-body .note.danger { + border-left-color: #d9534f; +} +.post-body .note.danger h2, +.post-body .note.danger h3, +.post-body .note.danger h4, +.post-body .note.danger h5, +.post-body .note.danger h6 { + color: #d9534f; +} +.pagination .prev, +.pagination .next, +.pagination .page-number, +.pagination .space { + display: inline-block; + margin: 0 10px; + padding: 0 11px; + position: relative; + top: -1px; +} +@media (max-width: 767px) { + .pagination .prev, + .pagination .next, + .pagination .page-number, + .pagination .space { + margin: 0 5px; + } +} +.pagination { + border-top: 1px solid #eee; + margin: 120px 0 0; + text-align: center; +} +.pagination .prev, +.pagination .next, +.pagination .page-number { + border-bottom: 0; + border-top: 1px solid #eee; + transition-property: border-color; + transition-delay: 0s; + transition-duration: 0.2s; + transition-timing-function: ease-in-out; +} +.pagination .prev:hover, +.pagination .next:hover, +.pagination .page-number:hover { + border-top-color: #222; +} +.pagination .space { + margin: 0; + padding: 0; +} +.pagination .prev { + margin-left: 0; +} +.pagination .next { + margin-right: 0; +} +.pagination .page-number.current, +.algolia-pagination .current .page-number { + background: #ccc; + border-top-color: #ccc; + color: #fff; +} +@media (max-width: 767px) { + .pagination { + border-top: none; + } + .pagination .prev, + .pagination .next, + .pagination .page-number { + border-bottom: 1px solid #eee; + border-top: 0; + margin-bottom: 10px; + padding: 0 10px; + } + .pagination .prev:hover, + .pagination .next:hover, + .pagination .page-number:hover { + border-bottom-color: #222; + } +} +.comments { + margin-top: 60px; + overflow: hidden; +} +.comment-button-group { + display: flex; + flex-wrap: wrap-reverse; + justify-content: center; + margin: 1em 0; +} +.comment-button-group .comment-button { + margin: 0.1em 0.2em; +} +.comment-button-group .comment-button.active { + background: var(--btn-default-hover-bg); + border-color: var(--btn-default-hover-border-color); + color: var(--btn-default-hover-color); +} +.comment-position { + display: none; +} +.comment-position.active { + display: block; +} +.tabs-comment { + background: var(--content-bg-color); + margin-top: 4em; + padding-top: 0; +} +.tabs-comment .comments { + border: 0; + box-shadow: none; + margin-top: 0; + padding-top: 0; +} +.container { + min-height: 100%; + position: relative; +} +.main-inner { + margin: 0 auto; + width: 700px; +} +@media (min-width: 1200px) { + .main-inner { + width: 800px; + } +} +@media (min-width: 1600px) { + .main-inner { + width: 900px; + } +} +@media (max-width: 767px) { + .content-wrap { + padding: 0 20px; + } +} +.header { + background: transparent; +} +.header-inner { + margin: 0 auto; + width: 700px; +} +@media (min-width: 1200px) { + .header-inner { + width: 800px; + } +} +@media (min-width: 1600px) { + .header-inner { + width: 900px; + } +} +.site-brand-container { + display: flex; + flex-shrink: 0; + padding: 0 10px; +} +.headband { + background: #222; + height: 3px; +} +.site-meta { + flex-grow: 1; + text-align: center; +} +@media (max-width: 767px) { + .site-meta { + text-align: center; + } +} +.brand { + border-bottom: none; + color: var(--brand-color); + display: inline-block; + line-height: 1.375em; + padding: 0 40px; + position: relative; +} +.brand:hover { + color: var(--brand-hover-color); +} +.site-title { + font-family: 'Lato', "PingFang SC", "Microsoft YaHei", sans-serif; + font-size: 1.375em; + font-weight: normal; + margin: 0; +} +.site-subtitle { + color: #999; + font-size: 0.8125em; + margin: 10px 0; +} +.use-motion .brand { + opacity: 0; +} +.use-motion .site-title, +.use-motion .site-subtitle, +.use-motion .custom-logo-image { + opacity: 0; + position: relative; + top: -10px; +} +.site-nav-toggle, +.site-nav-right { + display: none; +} +@media (max-width: 767px) { + .site-nav-toggle, + .site-nav-right { + display: flex; + flex-direction: column; + justify-content: center; + } +} +.site-nav-toggle .toggle, +.site-nav-right .toggle { + color: var(--text-color); + padding: 10px; + width: 22px; +} +.site-nav-toggle .toggle .toggle-line, +.site-nav-right .toggle .toggle-line { + background: var(--text-color); + border-radius: 1px; +} +.site-nav { + display: block; +} +@media (max-width: 767px) { + .site-nav { + clear: both; + display: none; + } +} +.site-nav.site-nav-on { + display: block; +} +.menu { + margin-top: 20px; + padding-left: 0; + text-align: center; +} +.menu-item { + display: inline-block; + list-style: none; + margin: 0 10px; +} +@media (max-width: 767px) { + .menu-item { + display: block; + margin-top: 10px; + } + .menu-item.menu-item-search { + display: none; + } +} +.menu-item a, +.menu-item span.exturl { + border-bottom: 0; + display: block; + font-size: 0.8125em; + transition-property: border-color; + transition-delay: 0s; + transition-duration: 0.2s; + transition-timing-function: ease-in-out; +} +@media (hover: none) { + .menu-item a:hover, + .menu-item span.exturl:hover { + border-bottom-color: transparent !important; + } +} +.menu-item .fa, +.menu-item .fab, +.menu-item .far, +.menu-item .fas { + margin-right: 8px; +} +.menu-item .badge { + display: inline-block; + font-weight: bold; + line-height: 1; + margin-left: 0.35em; + margin-top: 0.35em; + text-align: center; + white-space: nowrap; +} +@media (max-width: 767px) { + .menu-item .badge { + float: right; + margin-left: 0; + } +} +.menu-item-active a, +.menu .menu-item a:hover, +.menu .menu-item span.exturl:hover { + background: var(--menu-item-bg-color); +} +.use-motion .menu-item { + opacity: 0; +} +.sidebar { + background: #222; + bottom: 0; + box-shadow: inset 0 2px 6px #000; + position: fixed; + top: 0; +} +@media (max-width: 991px) { + .sidebar { + display: none; + } +} +.sidebar-inner { + color: #999; + padding: 18px 10px; + text-align: center; +} +.cc-license { + margin-top: 10px; + text-align: center; +} +.cc-license .cc-opacity { + border-bottom: none; + opacity: 0.7; +} +.cc-license .cc-opacity:hover { + opacity: 0.9; +} +.cc-license img { + display: inline-block; +} +.site-author-image { + border: 2px solid #333; + display: block; + margin: 0 auto; + max-width: 96px; + padding: 2px; +} +.site-author-name { + color: #f5f5f5; + font-weight: normal; + margin: 5px 0 0; + text-align: center; +} +.site-description { + color: #999; + font-size: 1em; + margin-top: 5px; + text-align: center; +} +.links-of-author { + margin-top: 15px; +} +.links-of-author a, +.links-of-author span.exturl { + border-bottom-color: #555; + display: inline-block; + font-size: 0.8125em; + margin-bottom: 10px; + margin-right: 10px; + vertical-align: middle; +} +.links-of-author a::before, +.links-of-author span.exturl::before { + background: #bb6ec1; + border-radius: 50%; + content: ' '; + display: inline-block; + height: 4px; + margin-right: 3px; + vertical-align: middle; + width: 4px; +} +.sidebar-button { + margin-top: 15px; +} +.sidebar-button a { + border: 1px solid #fc6423; + border-radius: 4px; + color: #fc6423; + display: inline-block; + padding: 0 15px; +} +.sidebar-button a .fa, +.sidebar-button a .fab, +.sidebar-button a .far, +.sidebar-button a .fas { + margin-right: 5px; +} +.sidebar-button a:hover { + background: #fc6423; + border: 1px solid #fc6423; + color: #fff; +} +.sidebar-button a:hover .fa, +.sidebar-button a:hover .fab, +.sidebar-button a:hover .far, +.sidebar-button a:hover .fas { + color: #fff; +} +.links-of-blogroll { + font-size: 0.8125em; + margin-top: 10px; +} +.links-of-blogroll-title { + font-size: 0.875em; + font-weight: 600; + margin-top: 0; +} +.links-of-blogroll-list { + list-style: none; + margin: 0; + padding: 0; +} +#sidebar-dimmer { + display: none; +} +@media (max-width: 767px) { + #sidebar-dimmer { + background: #000; + display: block; + height: 100%; + left: 100%; + opacity: 0; + position: fixed; + top: 0; + width: 100%; + z-index: 1100; + } + .sidebar-active + #sidebar-dimmer { + opacity: 0.7; + transform: translateX(-100%); + transition: opacity 0.5s; + } +} +.sidebar-nav { + margin: 0; + padding-bottom: 20px; + padding-left: 0; +} +.sidebar-nav li { + border-bottom: 1px solid transparent; + color: #666; + cursor: pointer; + display: inline-block; + font-size: 0.875em; +} +.sidebar-nav li.sidebar-nav-overview { + margin-left: 10px; +} +.sidebar-nav li:hover { + color: #f5f5f5; +} +.sidebar-nav .sidebar-nav-active { + border-bottom-color: #87daff; + color: #87daff; +} +.sidebar-nav .sidebar-nav-active:hover { + color: #87daff; +} +.sidebar-panel { + display: none; + overflow-x: hidden; + overflow-y: auto; +} +.sidebar-panel-active { + display: block; +} +.sidebar-toggle { + background: #222; + bottom: 45px; + cursor: pointer; + height: 14px; + left: 30px; + padding: 5px; + position: fixed; + width: 14px; + z-index: 1300; +} +@media (max-width: 991px) { + .sidebar-toggle { + left: 20px; + opacity: 0.8; + display: none; + } +} +.sidebar-toggle:hover .toggle-line { + background: #87daff; +} +.post-toc { + font-size: 0.875em; +} +.post-toc ol { + list-style: none; + margin: 0; + padding: 0 2px 5px 10px; + text-align: left; +} +.post-toc ol > ol { + padding-left: 0; +} +.post-toc ol a { + transition-property: all; + transition-delay: 0s; + transition-duration: 0.2s; + transition-timing-function: ease-in-out; +} +.post-toc .nav-item { + line-height: 1.8; + overflow: hidden; + text-overflow: ellipsis; + white-space: nowrap; +} +.post-toc .nav .nav-child { + display: none; +} +.post-toc .nav .active > .nav-child { + display: block; +} +.post-toc .nav .active-current > .nav-child { + display: block; +} +.post-toc .nav .active-current > .nav-child > .nav-item { + display: block; +} +.post-toc .nav .active > a { + border-bottom-color: #87daff; + color: #87daff; +} +.post-toc .nav .active-current > a { + color: #87daff; +} +.post-toc .nav .active-current > a:hover { + color: #87daff; +} +.site-state { + display: flex; + justify-content: center; + line-height: 1.4; + margin-top: 10px; + overflow: hidden; + text-align: center; + white-space: nowrap; +} +.site-state-item { + padding: 0 15px; +} +.site-state-item:not(:first-child) { + border-left: 1px solid #333; +} +.site-state-item a { + border-bottom: none; +} +.site-state-item-count { + display: block; + font-size: 1.25em; + font-weight: 600; + text-align: center; +} +.site-state-item-name { + color: inherit; + font-size: 0.875em; +} +.footer { + color: #999; + font-size: 0.875em; + padding: 20px 0; +} +.footer.footer-fixed { + bottom: 0; + left: 0; + position: absolute; + right: 0; +} +.footer-inner { + box-sizing: border-box; + margin: 0 auto; + text-align: center; + width: 700px; +} +@media (min-width: 1200px) { + .footer-inner { + width: 800px; + } +} +@media (min-width: 1600px) { + .footer-inner { + width: 900px; + } +} +.languages { + display: inline-block; + font-size: 1.125em; + position: relative; +} +.languages .lang-select-label span { + margin: 0 0.5em; +} +.languages .lang-select { + height: 100%; + left: 0; + opacity: 0; + position: absolute; + top: 0; + width: 100%; +} +.with-love { + color: #ff0000; + display: inline-block; + margin: 0 5px; +} +.powered-by, +.theme-info { + display: inline-block; +} +@-moz-keyframes iconAnimate { + 0%, 100% { + transform: scale(1); + } + 10%, 30% { + transform: scale(0.9); + } + 20%, 40%, 60%, 80% { + transform: scale(1.1); + } + 50%, 70% { + transform: scale(1.1); + } +} +@-webkit-keyframes iconAnimate { + 0%, 100% { + transform: scale(1); + } + 10%, 30% { + transform: scale(0.9); + } + 20%, 40%, 60%, 80% { + transform: scale(1.1); + } + 50%, 70% { + transform: scale(1.1); + } +} +@-o-keyframes iconAnimate { + 0%, 100% { + transform: scale(1); + } + 10%, 30% { + transform: scale(0.9); + } + 20%, 40%, 60%, 80% { + transform: scale(1.1); + } + 50%, 70% { + transform: scale(1.1); + } +} +@keyframes iconAnimate { + 0%, 100% { + transform: scale(1); + } + 10%, 30% { + transform: scale(0.9); + } + 20%, 40%, 60%, 80% { + transform: scale(1.1); + } + 50%, 70% { + transform: scale(1.1); + } +} +.back-to-top { + font-size: 12px; + text-align: center; + transition-delay: 0s; + transition-duration: 0.2s; + transition-timing-function: ease-in-out; +} +.back-to-top { + background: #222; + bottom: -100px; + box-sizing: border-box; + color: #fff; + cursor: pointer; + left: 30px; + opacity: 1; + padding: 0 6px; + position: fixed; + transition-property: bottom; + z-index: 1300; + width: 24px; +} +.back-to-top span { + display: none; +} +.back-to-top:hover { + color: #87daff; +} +.back-to-top.back-to-top-on { + bottom: 19px; +} +@media (max-width: 991px) { + .back-to-top { + left: 20px; + opacity: 0.8; + } +} +.post-body { + font-family: 'Lato', "PingFang SC", "Microsoft YaHei", sans-serif; + overflow-wrap: break-word; + word-wrap: break-word; +} +@media (min-width: 1200px) { + .post-body { + font-size: 1.125em; + } +} +.post-body .exturl .fa { + font-size: 0.875em; + margin-left: 4px; +} +.post-body .image-caption, +.post-body .figure .caption { + color: #999; + font-size: 0.875em; + font-weight: bold; + line-height: 1; + margin: -20px auto 15px; + text-align: center; +} +.post-sticky-flag { + display: inline-block; + transform: rotate(30deg); +} +.post-button { + margin-top: 40px; + text-align: center; +} +.use-motion .post-block, +.use-motion .pagination, +.use-motion .comments { + opacity: 0; +} +.use-motion .post-header { + opacity: 0; +} +.use-motion .post-body { + opacity: 0; +} +.use-motion .collection-header { + opacity: 0; +} +.posts-collapse { + margin-left: 35px; + position: relative; +} +@media (max-width: 767px) { + .posts-collapse { + margin-left: 0px; + margin-right: 0px; + } +} +.posts-collapse .collection-title { + font-size: 1.125em; + position: relative; +} +.posts-collapse .collection-title::before { + background: #999; + border: 1px solid #fff; + border-radius: 50%; + content: ' '; + height: 10px; + left: 0; + margin-left: -6px; + margin-top: -4px; + position: absolute; + top: 50%; + width: 10px; +} +.posts-collapse .collection-year { + font-size: 1.5em; + font-weight: bold; + margin: 60px 0; + position: relative; +} +.posts-collapse .collection-year::before { + background: #bbb; + border-radius: 50%; + content: ' '; + height: 8px; + left: 0; + margin-left: -4px; + margin-top: -4px; + position: absolute; + top: 50%; + width: 8px; +} +.posts-collapse .collection-header { + display: block; + margin: 0 0 0 20px; +} +.posts-collapse .collection-header small { + color: #bbb; + margin-left: 5px; +} +.posts-collapse .post-header { + border-bottom: 1px dashed #ccc; + margin: 30px 0; + padding-left: 15px; + position: relative; + transition-property: border; + transition-delay: 0s; + transition-duration: 0.2s; + transition-timing-function: ease-in-out; +} +.posts-collapse .post-header::before { + background: #bbb; + border: 1px solid #fff; + border-radius: 50%; + content: ' '; + height: 6px; + left: 0; + margin-left: -4px; + position: absolute; + top: 0.75em; + transition-property: background; + width: 6px; + transition-delay: 0s; + transition-duration: 0.2s; + transition-timing-function: ease-in-out; +} +.posts-collapse .post-header:hover { + border-bottom-color: #666; +} +.posts-collapse .post-header:hover::before { + background: #222; +} +.posts-collapse .post-meta { + display: inline; + font-size: 0.75em; + margin-right: 10px; +} +.posts-collapse .post-title { + display: inline; +} +.posts-collapse .post-title a, +.posts-collapse .post-title span.exturl { + border-bottom: none; + color: var(--link-color); +} +.posts-collapse .post-title .fa-external-link-alt { + font-size: 0.875em; + margin-left: 5px; +} +.posts-collapse::before { + background: #f5f5f5; + content: ' '; + height: 100%; + left: 0; + margin-left: -2px; + position: absolute; + top: 1.25em; + width: 4px; +} +.post-eof { + background: #ccc; + height: 1px; + margin: 80px auto 60px; + text-align: center; + width: 8%; +} +.post-block:last-of-type .post-eof { + display: none; +} +.content { + padding-top: 40px; +} +@media (min-width: 992px) { + .post-body { + text-align: justify; + } +} +@media (max-width: 991px) { + .post-body { + text-align: justify; + } +} +.post-body h1, +.post-body h2, +.post-body h3, +.post-body h4, +.post-body h5, +.post-body h6 { + padding-top: 10px; +} +.post-body h1 .header-anchor, +.post-body h2 .header-anchor, +.post-body h3 .header-anchor, +.post-body h4 .header-anchor, +.post-body h5 .header-anchor, +.post-body h6 .header-anchor { + border-bottom-style: none; + color: #ccc; + float: right; + margin-left: 10px; + visibility: hidden; +} +.post-body h1 .header-anchor:hover, +.post-body h2 .header-anchor:hover, +.post-body h3 .header-anchor:hover, +.post-body h4 .header-anchor:hover, +.post-body h5 .header-anchor:hover, +.post-body h6 .header-anchor:hover { + color: inherit; +} +.post-body h1:hover .header-anchor, +.post-body h2:hover .header-anchor, +.post-body h3:hover .header-anchor, +.post-body h4:hover .header-anchor, +.post-body h5:hover .header-anchor, +.post-body h6:hover .header-anchor { + visibility: visible; +} +.post-body iframe, +.post-body img, +.post-body video { + margin-bottom: 20px; +} +.post-body .video-container { + height: 0; + margin-bottom: 20px; + overflow: hidden; + padding-top: 75%; + position: relative; + width: 100%; +} +.post-body .video-container iframe, +.post-body .video-container object, +.post-body .video-container embed { + height: 100%; + left: 0; + margin: 0; + position: absolute; + top: 0; + width: 100%; +} +.post-gallery { + align-items: center; + display: grid; + grid-gap: 10px; + grid-template-columns: 1fr 1fr 1fr; + margin-bottom: 20px; +} +@media (max-width: 767px) { + .post-gallery { + grid-template-columns: 1fr 1fr; + } +} +.post-gallery a { + border: 0; +} +.post-gallery img { + margin: 0; +} +.posts-expand .post-header { + font-size: 1.125em; +} +.posts-expand .post-title { + font-size: 1.5em; + font-weight: normal; + margin: initial; + text-align: center; + overflow-wrap: break-word; + word-wrap: break-word; +} +.posts-expand .post-title-link { + border-bottom: none; + color: var(--link-color); + display: inline-block; + position: relative; + vertical-align: top; +} +.posts-expand .post-title-link::before { + background: var(--link-color); + bottom: 0; + content: ''; + height: 2px; + left: 0; + position: absolute; + transform: scaleX(0); + visibility: hidden; + width: 100%; + transition-delay: 0s; + transition-duration: 0.2s; + transition-timing-function: ease-in-out; +} +.posts-expand .post-title-link:hover::before { + transform: scaleX(1); + visibility: visible; +} +.posts-expand .post-title-link .fa-external-link-alt { + font-size: 0.875em; + margin-left: 5px; +} +.posts-expand .post-meta { + color: #999; + font-family: 'Lato', "PingFang SC", "Microsoft YaHei", sans-serif; + font-size: 0.75em; + margin: 3px 0 60px 0; + text-align: center; +} +.posts-expand .post-meta .post-description { + font-size: 0.875em; + margin-top: 2px; +} +.posts-expand .post-meta time { + border-bottom: 1px dashed #999; + cursor: pointer; +} +.post-meta .post-meta-item + .post-meta-item::before { + content: '|'; + margin: 0 0.5em; +} +.post-meta-divider { + margin: 0 0.5em; +} +.post-meta-item-icon { + margin-right: 3px; +} +@media (max-width: 991px) { + .post-meta-item-icon { + display: inline-block; + } +} +@media (max-width: 991px) { + .post-meta-item-text { + display: none; + } +} +.post-nav { + border-top: 1px solid #eee; + display: flex; + justify-content: space-between; + margin-top: 15px; + padding: 10px 5px 0; +} +.post-nav-item { + flex: 1; +} +.post-nav-item a { + border-bottom: none; + display: block; + font-size: 0.875em; + line-height: 1.6; + position: relative; +} +.post-nav-item a:active { + top: 2px; +} +.post-nav-item .fa { + font-size: 0.75em; +} +.post-nav-item:first-child { + margin-right: 15px; +} +.post-nav-item:first-child .fa { + margin-right: 5px; +} +.post-nav-item:last-child { + margin-left: 15px; + text-align: right; +} +.post-nav-item:last-child .fa { + margin-left: 5px; +} +.rtl.post-body p, +.rtl.post-body a, +.rtl.post-body h1, +.rtl.post-body h2, +.rtl.post-body h3, +.rtl.post-body h4, +.rtl.post-body h5, +.rtl.post-body h6, +.rtl.post-body li, +.rtl.post-body ul, +.rtl.post-body ol { + direction: rtl; + font-family: UKIJ Ekran; +} +.rtl.post-title { + font-family: UKIJ Ekran; +} +.post-tags { + margin-top: 40px; + text-align: center; +} +.post-tags a { + display: inline-block; + font-size: 0.8125em; +} +.post-tags a:not(:last-child) { + margin-right: 10px; +} +.post-widgets { + border-top: 1px solid #eee; + margin-top: 15px; + text-align: center; +} +.wp_rating { + height: 20px; + line-height: 20px; + margin-top: 10px; + padding-top: 6px; + text-align: center; +} +.social-like { + display: flex; + font-size: 0.875em; + justify-content: center; + text-align: center; +} +.reward-container { + margin: 20px auto; + padding: 10px 0; + text-align: center; + width: 90%; +} +.reward-container button { + background: transparent; + border: 1px solid #fc6423; + border-radius: 0; + color: #fc6423; + cursor: pointer; + line-height: 2; + outline: 0; + padding: 0 15px; + vertical-align: text-top; +} +.reward-container button:hover { + background: #fc6423; + border: 1px solid transparent; + color: #fa9366; +} +#qr { + padding-top: 20px; +} +#qr a { + border: 0; +} +#qr img { + display: inline-block; + margin: 0.8em 2em 0 2em; + max-width: 100%; + width: 180px; +} +#qr p { + text-align: center; +} +.category-all-page .category-all-title { + text-align: center; +} +.category-all-page .category-all { + margin-top: 20px; +} +.category-all-page .category-list { + list-style: none; + margin: 0; + padding: 0; +} +.category-all-page .category-list-item { + margin: 5px 10px; +} +.category-all-page .category-list-count { + color: #bbb; +} +.category-all-page .category-list-count::before { + content: ' ('; + display: inline; +} +.category-all-page .category-list-count::after { + content: ') '; + display: inline; +} +.category-all-page .category-list-child { + padding-left: 10px; +} +.event-list { + padding: 0; +} +.event-list hr { + background: #222; + margin: 20px 0 45px 0; +} +.event-list hr::after { + background: #222; + color: #fff; + content: 'NOW'; + display: inline-block; + font-weight: bold; + padding: 0 5px; + text-align: right; +} +.event-list .event { + background: #222; + margin: 20px 0; + min-height: 40px; + padding: 15px 0 15px 10px; +} +.event-list .event .event-summary { + color: #fff; + margin: 0; + padding-bottom: 3px; +} +.event-list .event .event-summary::before { + animation: dot-flash 1s alternate infinite ease-in-out; + color: #fff; + content: '\f111'; + display: inline-block; + font-size: 10px; + margin-right: 25px; + vertical-align: middle; + font-family: 'Font Awesome 5 Free'; + font-weight: 900; +} +.event-list .event .event-relative-time { + color: #bbb; + display: inline-block; + font-size: 12px; + font-weight: normal; + padding-left: 12px; +} +.event-list .event .event-details { + color: #fff; + display: block; + line-height: 18px; + margin-left: 56px; + padding-bottom: 6px; + padding-top: 3px; + text-indent: -24px; +} +.event-list .event .event-details::before { + color: #fff; + display: inline-block; + margin-right: 9px; + text-align: center; + text-indent: 0; + width: 14px; + font-family: 'Font Awesome 5 Free'; + font-weight: 900; +} +.event-list .event .event-details.event-location::before { + content: '\f041'; +} +.event-list .event .event-details.event-duration::before { + content: '\f017'; +} +.event-list .event-past { + background: #f5f5f5; +} +.event-list .event-past .event-summary, +.event-list .event-past .event-details { + color: #bbb; + opacity: 0.9; +} +.event-list .event-past .event-summary::before, +.event-list .event-past .event-details::before { + animation: none; + color: #bbb; +} +@-moz-keyframes dot-flash { + from { + opacity: 1; + transform: scale(1); + } + to { + opacity: 0; + transform: scale(0.8); + } +} +@-webkit-keyframes dot-flash { + from { + opacity: 1; + transform: scale(1); + } + to { + opacity: 0; + transform: scale(0.8); + } +} +@-o-keyframes dot-flash { + from { + opacity: 1; + transform: scale(1); + } + to { + opacity: 0; + transform: scale(0.8); + } +} +@keyframes dot-flash { + from { + opacity: 1; + transform: scale(1); + } + to { + opacity: 0; + transform: scale(0.8); + } +} +ul.breadcrumb { + font-size: 0.75em; + list-style: none; + margin: 1em 0; + padding: 0 2em; + text-align: center; +} +ul.breadcrumb li { + display: inline; +} +ul.breadcrumb li + li::before { + content: '/\00a0'; + font-weight: normal; + padding: 0.5em; +} +ul.breadcrumb li + li:last-child { + font-weight: bold; +} +.tag-cloud { + text-align: center; +} +.tag-cloud a { + display: inline-block; + margin: 10px; +} +.tag-cloud a:hover { + color: var(--link-hover-color) !important; +} +.search-pop-overlay { + background: rgba(0,0,0,0); + height: 100%; + left: 0; + position: fixed; + top: 0; + transition: visibility 0s linear 0.2s, background 0.2s; + visibility: hidden; + width: 100%; + z-index: 1400; +} +.search-pop-overlay.search-active { + background: rgba(0,0,0,0.3); + transition: background 0.2s; + visibility: visible; +} +.search-popup { + background: var(--card-bg-color); + border-radius: 5px; + height: 80%; + left: calc(50% - 350px); + position: fixed; + top: 10%; + transform: scale(0); + transition: transform 0.2s; + width: 700px; + z-index: 1500; +} +.search-active .search-popup { + transform: scale(1); +} +@media (max-width: 767px) { + .search-popup { + border-radius: 0; + height: 100%; + left: 0; + margin: 0; + top: 0; + width: 100%; + } +} +.search-popup .search-icon, +.search-popup .popup-btn-close { + color: #999; + font-size: 18px; + padding: 0 10px; +} +.search-popup .popup-btn-close { + cursor: pointer; +} +.search-popup .popup-btn-close:hover .fa { + color: #222; +} +.search-popup .search-header { + background: #eee; + border-top-left-radius: 5px; + border-top-right-radius: 5px; + display: flex; + padding: 5px; +} +.search-popup input.search-input { + background: transparent; + border: 0; + outline: 0; + width: 100%; +} +.search-popup input.search-input::-webkit-search-cancel-button { + display: none; +} +.search-input-container { + flex-grow: 1; +} +.search-input-container form { + padding: 2px; +} +.algolia-powered { + float: right; +} +.algolia-powered img { + display: inline-block; + height: 18px; + vertical-align: middle; +} +.algolia-results { + height: calc(100% - 55px); + overflow: auto; + padding: 5px 30px; +} +.algolia-results hr { + margin: 10px 0; +} +.algolia-hit-item { + margin: 15px 0; +} +.algolia-hit-item-link { + border-bottom: 1px dashed #ccc; + display: block; + transition-delay: 0s; + transition-duration: 0.2s; + transition-timing-function: ease-in-out; +} +.algolia-pagination .pagination { + border-top: none; + margin: 40px 0; + opacity: 1; + padding: 0; +} +.algolia-pagination .pagination-item { + display: inline-block; +} +.algolia-pagination .page-number { + border-top: 1px solid transparent; + transition-delay: 0s; + transition-duration: 0.2s; + transition-timing-function: ease-in-out; +} +.algolia-pagination .page-number:hover { + border-top: 1px solid #222; +} +.algolia-pagination .current .page-number { + cursor: default; +} +.algolia-pagination .current .page-number:hover { + border-top-color: #ccc; +} +.algolia-pagination .disabled-item { + visibility: hidden; +} +@media (max-width: 767px) { + .header-inner, + .main-inner, + .footer-inner { + width: auto; + } +} +.header-inner { + padding-top: 100px; +} +@media (max-width: 767px) { + .header-inner { + padding-top: 50px; + } +} +.main-inner { + padding-bottom: 60px; +} +.content { + padding-top: 70px; +} +@media (max-width: 767px) { + .content { + padding-top: 35px; + } +} +embed { + display: block; + margin: 0 auto 25px auto; +} +.custom-logo .site-meta-headline { + text-align: center; +} +.custom-logo .site-title { + color: #222; + margin: 10px auto 0; +} +.custom-logo .site-title a { + border: 0; +} +.custom-logo-image { + background: #fff; + margin: 0 auto; + max-width: 150px; + padding: 5px; +} +.brand { + background: var(--btn-default-bg); +} +@media (max-width: 767px) { + .site-nav { + border-bottom: 1px solid #ddd; + border-top: 1px solid #ddd; + left: 0; + margin: 0; + padding: 0; + width: 100%; + } +} +@media (max-width: 767px) { + .menu { + text-align: left; + } +} +.menu-item-active a, +.menu .menu-item a:hover, +.menu .menu-item span.exturl:hover { + background: transparent; + border-bottom: 1px solid var(--link-hover-color) !important; +} +@media (max-width: 767px) { + .menu-item-active a, + .menu .menu-item a:hover, + .menu .menu-item span.exturl:hover { + border-bottom: 1px dotted #ddd !important; + } +} +@media (max-width: 767px) { + .menu .menu-item { + margin: 0 10px; + } +} +.menu .menu-item a, +.menu .menu-item span.exturl { + border-bottom: 1px solid transparent; +} +@media (max-width: 767px) { + .menu .menu-item a, + .menu .menu-item span.exturl { + padding: 5px 10px; + } +} +@media (min-width: 768px) { + .menu .menu-item .fa, + .menu .menu-item .fab, + .menu .menu-item .far, + .menu .menu-item .fas { + display: block; + line-height: 2; + margin-right: 0; + width: 100%; + } +} +.menu .menu-item .badge { + background: #eee; + padding: 1px 4px; +} +.sub-menu { + margin: 10px 0; +} +.sub-menu .menu-item { + display: inline-block; +} +.sidebar { + left: -320px; +} +.sidebar.sidebar-active { + left: 0; +} +.sidebar { + width: 320px; + z-index: 1200; + transition-delay: 0s; + transition-duration: 0.2s; + transition-timing-function: ease-out; +} +.sidebar a, +.sidebar span.exturl { + border-bottom-color: #555; + color: #999; +} +.sidebar a:hover, +.sidebar span.exturl:hover { + border-bottom-color: #eee; + color: #eee; +} +.links-of-blogroll-item { + padding: 2px 10px; +} +.links-of-blogroll-item a, +.links-of-blogroll-item span.exturl { + box-sizing: border-box; + display: inline-block; + max-width: 280px; + overflow: hidden; + text-overflow: ellipsis; + white-space: nowrap; +} diff --git a/images/algolia_logo.svg b/images/algolia_logo.svg new file mode 100644 index 0000000000..470242341d --- /dev/null +++ b/images/algolia_logo.svg @@ -0,0 +1,9 @@ + + + + + + + + + diff --git a/images/apple-touch-icon-next.png b/images/apple-touch-icon-next.png new file mode 100644 index 0000000000000000000000000000000000000000..86a0d1d33bc2ae8a0416ebba67d1bbb60aa29c38 GIT binary patch literal 1544 zcmV+j2KV`iP)Px#OHfQyMIs_1FE20E)zv2_C(h2!$jHdT!ovUm|GT@p zWo2de_xJq#{8d#|IyyQ>M@Qx533`J9dBE>4W{r{KFjMLpp4N$2iMRJCGfPM7@le{)BPzVgz zp?ByVdWYVj|4-=tUS5SkR}Te{-Fr#UrxsqJ9+ed6YY9KmoXq6n^B9DRMwF@T2u)qH z!=wh-sAdPH_8fFy0-I1d68Zp);!FTtdweUZc2nr)?5^hh99w*FOU z@NYFT^!v%iPrGly0Q*+sLVtesYh+the|AET4WK8lR^dv1IZ-3FhAuiK5V|-?gf2#j z&_yW`x^K3G&OKl4kP$$ih@p!?B6M+)2(4Rz(6$=G6WT(;6uP)cgkEi(xI(Yi7`D(w zo(Y6r>FyarFV`4e&^@lk@PfWf|DSeM5}?agV`>aDXt-c%3^!*Ij0g0Q8X5*mjbRC$9oB0MOK51O_53LU8p`RzcozW; zx3wC>721@58xhdzx?E$}LW9xpjUu4UhOZO>Z8Us+D+1bR_}&sQXJ}B}yadb|8fraT zV|YUw4WF(>KpPF;mjp}*eTo5XG<+u!&}PFoihwq!uhI`XPYZ1{e1FDs0nqR?Ym5YF zqv0DwK${KUMFjNpqb>9(0vg8Y^d$otmiJc}L#s|8wD~Z~fHtQu8PG6DgwAq-&}PFY z1KMo(WI!7YpAcwxN`zJiV(2pgG_<|npE+y^Z8UsBppAx42(;Po$$&N*J{izP!?y+W zi2YsL=fkK`KXsymhWcmL#_y-S*Y&V_u#us^S9pDvbMSkxv7uidoc36Qa`CQ+5AC^d z=CTJ%0ZkrQ=v?t(E_)>OCL8YaU+nX$$--iAUNb;m^jBG2!5*5enU|exmY3aQcv=O- znjO}ft-yw2|VZs2g-W`Epf@n*5gsJ2LLtC^nFlq8+?%Xc!{Z3_|hFI~!oeU$ep z%4jE9Y9n1{ah68@W{++)8|fc~HIM4)QycHpg}3P<_K$M$L~}7jBXs?6bDwjvo+I;D zsoi}398La6+~*osM@45l#ptQ6MrZFz597*no3v)ffsUEletxWeW~`A+ntYvaeQj?e z*kSqGXZsrrhZ>IsxH$({IR{z11zkG`GR^j$#{%z41lp5(Ne#glJc5nj-BUaKR!X?# zzN6F-<0?&z;|30?9q0Z}oHG*G)Q)s%Db~RbjMU)mp-u&Dmm0zkF~o9eRh}p?ByVnjn4wmEZ*c=^{+H00012dQ@0+Qek%>aB^>E zX>4U6ba`-PAZc)PV*mhnoa6Eg2ys>@D9TUE%t_@^00ScnE@KN5BNI!L6ay0=M1VBI uWCJ6!R3OXP)X2ol#2my2%YaCrN-hBE7ZG&wLN%2D0000`v literal 0 HcmV?d00001 diff --git a/images/avatar.gif b/images/avatar.gif new file mode 100644 index 0000000000000000000000000000000000000000..28411fd0eadae59f521f6914983f61731b759e77 GIT binary patch literal 1793 zcmc(e`#al*0*AjzgfxoGy{05C>!#LYnzk<02ECx3bGphio%U$lDm65-%4+M{#3i&v zF)dm;w^A*|s?pmzaZMtK$W?-bB(5QZNKm$&f8xA9e13kP=kvU$gHL&RM+pD{pap>K z?d^{rKQig`f&P9LlQ}99ZEbD!GZ=ckeqnxIr_;%&rX*sqfX7oR6eGjKTn=YwU|>ul znHV4Eu-TiNo08GdRgGqCZH+xRs9IQ9RH^1>XE!!BhKGj6$Hs&L!K6&KzP>J#N(Fqr zR;wLkv2^R}tE;Qa%ge0(e$C3t(&C~Rwe+~VRQXLwjF5-F8RA)i0YW~)@HafxJI ztDT*l9U2_uak;ZIGkh*rIx(SEs}~j)c6N3ceSN_H8_49XM%yH8dPZgz zB|GQ#ox8bt`S%J6i+(M>|DdF_?BS#GipRfIR#n&3KB=pJ+VJdoW7CV~me${2w!NaZ zzkc)f-TOa2{MkY4?CS36?V~f8{j7n(A@(q5gv;X#grZTgWNdswDw~{=E0ohSvvWmY zGjmfr8#E|v4TQF_Gc`8@&-08hUU*RDV_F8x1dNvf-Q-&Xu=7I9zp8rf8bT6VFu-K1 zp`BZ_y|*WcV(5^u3r8Z?_Lqctc&JNK!cAN~et3&LSM%8m)RAlO)U=qKp>M&~miA0b zXA%^BkIE!eHSh{;JcHumYCCz&Hv4Tw58W8&TJUDzpa4EMr)`^Hk&pLH2#~xp+Q){T zG-#gq>4cV;j$FAa0T2e?IaPM3FG2TOT?&y331G&9%UI%3h^7{jcjWlZnLQlvaK26P z&Du^Nq0124n0#&HRTS@~)vg(wn2`PSjR|i?^*UZ1csS?N{1geB^pd#*wq=(WWX@1r zv5uJro0nX~&}D6Yd!Wu)zT+Ue>E{=z;@y4ScOiE3p8m&MiDr4no6=S6wRJ0iXM)6C z>5pRjYy47cB%WEd*@}L z)G5pbE$Ge-JTxqUc)`~&KYX(KG|XUNFEaMuwR^Y2g@rcImI_#CkxM7>ixb}G;9?cd z^^|X7xcI>?8NVEgi-@HW4-}Ub7>j!l$Wovi5owxWR~&d9OE??vXD<~+R;ZIoXph72 zS^L1LPNaHdal;oq)mV?S`yd78YGomCf{IVUjRFlwBDq0OKa}J>R-c=kw&xj^j*rW0 zJokM-@lH<&)bi_qsQp$(*Ii?w!M$!>g`Nv;bl`;yk4f(}y&Kco3r_*64>%zjTOCfdB@J*VqOvb=oZa>C@u2Lduq1`izsNs8+QAq_ECS490Gm#@C{iK=v!G1IsXpV z76Y`Dp+T;=aPS1u7-^6eAPsE=Fh7@&a6*rEQ0-lhxp>WU>m<>B4%df*W$pD4< zwi<>)ym@Zqoa-)!J73I;#~fq9mAj64U9-2aXsJvo1GRX7EFSwyF_W3m*mQ`Z=p-3( z>GXe!z@|YJ9AMi5`@}EICbT2b$BaPO()mC;$h}Hdi}&W0Cv1_IH70cDzqaN2JSbZ{ z@pl2olL|1mz}I#g;^OjPiAXY4L|Uj@Cb#<|4jBr+K^v+&jNRfP(-(RnH>Uj+<-koQ zK-&m;?TK*D0A<8f;Q`Fsn4l2KVgu@{NK)X|`%2r)S=kN5!CVj+xSEq1+Eb$0bDoB-XOPkoZY08jeqI*A;y_?flt= zh0FOL^(F`4ls>(Et~Gpqs*@n|@qZg1LiHe{HoHzns0oD{ebkq7Snv{|$@>0t7h-12 zVdLd19AD|Po + + + + + + + + + + + + image/svg+xml + + + + + + + + + + + + + + + + + + + diff --git a/images/cc-by-nc-sa.svg b/images/cc-by-nc-sa.svg new file mode 100644 index 0000000000..bf6bc26f54 --- /dev/null +++ b/images/cc-by-nc-sa.svg @@ -0,0 +1,121 @@ + + + + + + + + + + + + + image/svg+xml + + + + + + + + + + + + + + + + + + + diff --git a/images/cc-by-nc.svg b/images/cc-by-nc.svg new file mode 100644 index 0000000000..36973490ad --- /dev/null +++ b/images/cc-by-nc.svg @@ -0,0 +1,121 @@ + + + + + + + + + + + + + image/svg+xml + + + + + + + + + + + + + + + + + + + diff --git a/images/cc-by-nd.svg b/images/cc-by-nd.svg new file mode 100644 index 0000000000..934c61e15e --- /dev/null +++ b/images/cc-by-nd.svg @@ -0,0 +1,117 @@ + + + + + + + + + + + + + image/svg+xml + + + + + + + + + + + + + + + + + diff --git a/images/cc-by-sa.svg b/images/cc-by-sa.svg new file mode 100644 index 0000000000..463276a8cf --- /dev/null +++ b/images/cc-by-sa.svg @@ -0,0 +1,121 @@ + + + + + + + + + + + + + image/svg+xml + + + + + + + + + + + + + + + + + + + diff --git a/images/cc-by.svg b/images/cc-by.svg new file mode 100644 index 0000000000..4bccd14f6d --- /dev/null +++ b/images/cc-by.svg @@ -0,0 +1,121 @@ + + + + + + + + + + + + + image/svg+xml + + + + + + + + + + + + + + + + + + + diff --git a/images/cc-zero.svg b/images/cc-zero.svg new file mode 100644 index 0000000000..0f866392f1 --- /dev/null +++ b/images/cc-zero.svg @@ -0,0 +1,72 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/images/favicon-16x16-next.png b/images/favicon-16x16-next.png new file mode 100644 index 0000000000000000000000000000000000000000..de8c5d3a5f8215c3f7b095c9e284e061bda65f86 GIT binary patch literal 435 zcmeAS@N?(olHy`uVBq!ia0vp^0wB!63?wyl`GbKJOS+@4BLl<6e(pbstUx|vage(c z!@6@aFM%A+0*}aIAe|4wj2{=>aRxHH1AIbUeKHFb!9Yn#DLy_vBO^maMdk0`zd#Zw z0tY~@yu3V6p{Ay$mX_Az$B+O0`?r4m`qisfPnj~MwY3$f@^9nqaG!lvI6;x#X;^) z4C~IxyaaMM3p^r=f%IJvW-JI^#13S11^9%x`eYU=fq|TyoQjGHgbgIWfB*jF%NI8{ zx2;>Zu3x`iO-&6bs0aoM3JPUqWq0n}*}Qo(P|Le_@AmKC57(%ytPE5IHwwrGiqzKD z{{8#c)YSCg!Gk}3{P_R>|Gj(no6k4eJyZq^Jd4Jw%*zpM57yY9pK zMcQgs21j>QigM54W!RCVazM0UCtHDVhG46xh5pA!(TqDDnlwLEm~eRS(JM@6p3293 zNMwDW(!P~x!s$%jghh-wOB)P19fJ88oOHECttPa5*~LF^Vp#0%pwrbyN~~|szWB|o z_J4%n&U>rEu5s2%I6jof{A;(j!|>g|;yo?%ZfFGheBJp^9_Zijs*s41pu}>8f};Gi z%$!t(lFEWqh0KDIWCn(cIgdZ_a1@4VXq@stea7=?5CgL^w_Y;0u(GiCWD#az1(ybs x!zs+ln?n>%-?(z($eANDN7zp{cr5VJV|XPlSn|oqbSlsa22WQ%mvv4FO#p=N1nU3* literal 0 HcmV?d00001 diff --git a/images/logo.svg b/images/logo.svg new file mode 100644 index 0000000000..cbb3937ecd --- /dev/null +++ b/images/logo.svg @@ -0,0 +1,23 @@ + +image/svg+xml diff --git a/index.html b/index.html new file mode 100644 index 0000000000..c99875b16e --- /dev/null +++ b/index.html @@ -0,0 +1,1185 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + dinosaur + + + + + + + + + + + + +
+
+ +
+
+ + + + + +
+ + + + + + + + +
+ +
+ +
+
+ + +
+ + 0% +
+ + +
+
+
+ + +
+ + + + +
+ + + + + +
+

+ + +

+ + +
+ + + + +
+ + +
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
Thread 3 "HTTPHandler" hit Breakpoint 2, DB::buildPushingToViewsChain (storage=..., metadata_snapshot=..., context=..., query_ptr=..., no_destination=false, thread_status_holder=..., running_group=..., elapsed_counter_ms=0x0, async_insert=false, live_view_header=...) at /ssd/ClickHouse/src/Processors/Transforms/buildPushingToViewsChain.cpp:307
307 if (lock == nullptr)
(gdb) bt
#0 DB::buildPushingToViewsChain (storage=..., metadata_snapshot=..., context=..., query_ptr=..., no_destination=false, thread_status_holder=..., running_group=..., elapsed_counter_ms=0x0, async_insert=false, live_view_header=...)
at /ssd/ClickHouse/src/Processors/Transforms/buildPushingToViewsChain.cpp:307
#1 0x000000001cc8a4e0 in DB::InterpreterInsertQuery::buildSink (this=0x7ffe5a9e63f0, table=..., metadata_snapshot=..., thread_status_holder=..., running_group=..., elapsed_counter_ms=0x0) at /ssd/ClickHouse/src/Interpreters/InterpreterInsertQuery.cpp:311
#2 0x000000001cc8cbdb in DB::InterpreterInsertQuery::execute (this=0x7ffe5a9e63f0) at /ssd/ClickHouse/src/Interpreters/InterpreterInsertQuery.cpp:554
#3 0x000000001d2d9d4a in DB::executeQueryImpl (begin=0x7ffe5aa40c00 "insert into push.site_sku_user_log_distributed (site_sku , site ,sku_id ,user_id , is_in_cart ) values \r\n( 'us_1' , 'us' , '1' , '2' , '1' )", '\245' <repeats 57 times>...,
end=0x7ffe5aa40c8f '\245' <repeats 113 times>, 'Z' <repeats 87 times>..., context=..., internal=false, stage=DB::QueryProcessingStage::Complete, istr=0x7ffe5a8fc020) at /ssd/ClickHouse/src/Interpreters/executeQuery.cpp:1096
#4 0x000000001d2dbdd1 in DB::executeQuery(DB::ReadBuffer&, DB::WriteBuffer&, bool, std::__1::shared_ptr<DB::Context>, std::__1::function<void (DB::QueryResultDetails const&)>, std::__1::optional<DB::FormatSettings> const&, std::__1::function<void (DB::IOutputFormat&)>) (istr=..., ostr=..., allow_into_outfile=false, context=..., set_result_details=..., output_format_settings=..., handle_exception_in_output_format=...) at /ssd/ClickHouse/src/Interpreters/executeQuery.cpp:1351
#5 0x000000001e7b8f91 in DB::HTTPHandler::processQuery (this=0x7ffe43e3ce80, request=..., params=..., response=..., used_output=..., query_scope=...) at /ssd/ClickHouse/src/Server/HTTPHandler.cpp:884
#6 0x000000001e7bbb67 in DB::HTTPHandler::handleRequest (this=0x7ffe43e3ce80, request=..., response=...) at /ssd/ClickHouse/src/Server/HTTPHandler.cpp:1078
#7 0x000000001e8644dc in DB::HTTPServerConnection::run (this=0x7ffe43e3cd40) at /ssd/ClickHouse/src/Server/HTTP/HTTPServerConnection.cpp:68
#8 0x000000002396f7d9 in Poco::Net::TCPServerConnection::start (this=0x7ffe43e3cd40) at /ssd/ClickHouse/base/poco/Net/src/TCPServerConnection.cpp:43
#9 0x000000002397001c in Poco::Net::TCPServerDispatcher::run (this=0x7ffe5a8c0f00) at /ssd/ClickHouse/base/poco/Net/src/TCPServerDispatcher.cpp:115
#10 0x0000000023b5ac14 in Poco::PooledThread::run (this=0x7ffff71cab80) at /ssd/ClickHouse/base/poco/Foundation/src/ThreadPool.cpp:188
#11 0x0000000023b579ba in Poco::(anonymous namespace)::RunnableHolder::run (this=0x7ffff70019c0) at /ssd/ClickHouse/base/poco/Foundation/src/Thread.cpp:45
#12 0x0000000023b566be in Poco::ThreadImpl::runnableEntry (pThread=0x7ffff71cabb8) at /ssd/ClickHouse/base/poco/Foundation/src/Thread_POSIX.cpp:335
#13 0x00007ffff7c94ac3 in start_thread (arg=<optimized out>) at ./nptl/pthread_create.c:442
#14 0x00007ffff7d26a40 in clone3 () at ../sysdeps/unix/sysv/linux/x86_64/clone3.S:81
+ + +

相关阅读

+ + +
+ + + + +
+
+
+
+ + + + + + + +
+ + + + + +
+

+ + +

+ + +
+ + + + +
+ + +

背景

java的线程间通信,偶尔会用到wait和notify

+

实现

注册:

+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
// Register native methods of Object
void java_lang_Object::register_natives(TRAPS) {
InstanceKlass* obj = vmClasses::Object_klass();
Method::register_native(obj, vmSymbols::hashCode_name(),
vmSymbols::void_int_signature(), (address) &JVM_IHashCode, CHECK);
Method::register_native(obj, vmSymbols::wait_name(),
vmSymbols::long_void_signature(), (address) &JVM_MonitorWait, CHECK);
Method::register_native(obj, vmSymbols::notify_name(),
vmSymbols::void_method_signature(), (address) &JVM_MonitorNotify, CHECK);
Method::register_native(obj, vmSymbols::notifyAll_name(),
vmSymbols::void_method_signature(), (address) &JVM_MonitorNotifyAll, CHECK);
Method::register_native(obj, vmSymbols::clone_name(),
vmSymbols::void_object_signature(), (address) &JVM_Clone, THREAD);
}
+ +
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
// -----------------------------------------------------------------------------
// Wait/Notify/NotifyAll
//
// Note: a subset of changes to ObjectMonitor::wait()
// will need to be replicated in complete_exit
void ObjectMonitor::wait(jlong millis, bool interruptible, TRAPS) {
JavaThread* current = THREAD;

assert(InitDone, "Unexpectedly not initialized");

CHECK_OWNER(); // Throws IMSE if not owner.

EventJavaMonitorWait event;

// check for a pending interrupt
if (interruptible && current->is_interrupted(true) && !HAS_PENDING_EXCEPTION) {
// post monitor waited event. Note that this is past-tense, we are done waiting.
if (JvmtiExport::should_post_monitor_waited()) {
// Note: 'false' parameter is passed here because the
// wait was not timed out due to thread interrupt.
JvmtiExport::post_monitor_waited(current, this, false);

// In this short circuit of the monitor wait protocol, the
// current thread never drops ownership of the monitor and
// never gets added to the wait queue so the current thread
// cannot be made the successor. This means that the
// JVMTI_EVENT_MONITOR_WAITED event handler cannot accidentally
// consume an unpark() meant for the ParkEvent associated with
// this ObjectMonitor.
}
if (event.should_commit()) {
post_monitor_wait_event(&event, this, 0, millis, false);
}
THROW(vmSymbols::java_lang_InterruptedException());
return;
}

assert(current->_Stalled == 0, "invariant");
current->_Stalled = intptr_t(this);
current->set_current_waiting_monitor(this);

// create a node to be put into the queue
// Critically, after we reset() the event but prior to park(), we must check
// for a pending interrupt.
ObjectWaiter node(current);
node.TState = ObjectWaiter::TS_WAIT;
current->_ParkEvent->reset();
OrderAccess::fence(); // ST into Event; membar ; LD interrupted-flag

// Enter the waiting queue, which is a circular doubly linked list in this case
// but it could be a priority queue or any data structure.
// _WaitSetLock protects the wait queue. Normally the wait queue is accessed only
// by the the owner of the monitor *except* in the case where park()
// returns because of a timeout of interrupt. Contention is exceptionally rare
// so we use a simple spin-lock instead of a heavier-weight blocking lock.

Thread::SpinAcquire(&_WaitSetLock, "WaitSet - add");
AddWaiter(&node);
Thread::SpinRelease(&_WaitSetLock);

_Responsible = NULL;

intx save = _recursions; // record the old recursion count
_waiters++; // increment the number of waiters
_recursions = 0; // set the recursion level to be 1
exit(current); // exit the monitor
guarantee(owner_raw() != current, "invariant");

// The thread is on the WaitSet list - now park() it.
// On MP systems it's conceivable that a brief spin before we park
// could be profitable.
//
// TODO-FIXME: change the following logic to a loop of the form
// while (!timeout && !interrupted && _notified == 0) park()

int ret = OS_OK;
int WasNotified = 0;

// Need to check interrupt state whilst still _thread_in_vm
bool interrupted = interruptible && current->is_interrupted(false);

{ // State transition wrappers
OSThread* osthread = current->osthread();
OSThreadWaitState osts(osthread, true);

assert(current->thread_state() == _thread_in_vm, "invariant");

{
ClearSuccOnSuspend csos(this);
ThreadBlockInVMPreprocess<ClearSuccOnSuspend> tbivs(current, csos, true /* allow_suspend */);
if (interrupted || HAS_PENDING_EXCEPTION) {
// Intentionally empty
} else if (node._notified == 0) {
if (millis <= 0) {
current->_ParkEvent->park();
} else {
ret = current->_ParkEvent->park(millis);
}
}
}

// Node may be on the WaitSet, the EntryList (or cxq), or in transition
// from the WaitSet to the EntryList.
// See if we need to remove Node from the WaitSet.
// We use double-checked locking to avoid grabbing _WaitSetLock
// if the thread is not on the wait queue.
//
// Note that we don't need a fence before the fetch of TState.
// In the worst case we'll fetch a old-stale value of TS_WAIT previously
// written by the is thread. (perhaps the fetch might even be satisfied
// by a look-aside into the processor's own store buffer, although given
// the length of the code path between the prior ST and this load that's
// highly unlikely). If the following LD fetches a stale TS_WAIT value
// then we'll acquire the lock and then re-fetch a fresh TState value.
// That is, we fail toward safety.

if (node.TState == ObjectWaiter::TS_WAIT) {
Thread::SpinAcquire(&_WaitSetLock, "WaitSet - unlink");
if (node.TState == ObjectWaiter::TS_WAIT) {
DequeueSpecificWaiter(&node); // unlink from WaitSet
assert(node._notified == 0, "invariant");
node.TState = ObjectWaiter::TS_RUN;
}
Thread::SpinRelease(&_WaitSetLock);
}

// The thread is now either on off-list (TS_RUN),
// on the EntryList (TS_ENTER), or on the cxq (TS_CXQ).
// The Node's TState variable is stable from the perspective of this thread.
// No other threads will asynchronously modify TState.
guarantee(node.TState != ObjectWaiter::TS_WAIT, "invariant");
OrderAccess::loadload();
if (_succ == current) _succ = NULL;
WasNotified = node._notified;

// Reentry phase -- reacquire the monitor.
// re-enter contended monitor after object.wait().
// retain OBJECT_WAIT state until re-enter successfully completes
// Thread state is thread_in_vm and oop access is again safe,
// although the raw address of the object may have changed.
// (Don't cache naked oops over safepoints, of course).

// post monitor waited event. Note that this is past-tense, we are done waiting.
if (JvmtiExport::should_post_monitor_waited()) {
JvmtiExport::post_monitor_waited(current, this, ret == OS_TIMEOUT);

if (node._notified != 0 && _succ == current) {
// In this part of the monitor wait-notify-reenter protocol it
// is possible (and normal) for another thread to do a fastpath
// monitor enter-exit while this thread is still trying to get
// to the reenter portion of the protocol.
//
// The ObjectMonitor was notified and the current thread is
// the successor which also means that an unpark() has already
// been done. The JVMTI_EVENT_MONITOR_WAITED event handler can
// consume the unpark() that was done when the successor was
// set because the same ParkEvent is shared between Java
// monitors and JVM/TI RawMonitors (for now).
//
// We redo the unpark() to ensure forward progress, i.e., we
// don't want all pending threads hanging (parked) with none
// entering the unlocked monitor.
node._event->unpark();
}
}

if (event.should_commit()) {
post_monitor_wait_event(&event, this, node._notifier_tid, millis, ret == OS_TIMEOUT);
}

OrderAccess::fence();

assert(current->_Stalled != 0, "invariant");
current->_Stalled = 0;

assert(owner_raw() != current, "invariant");
ObjectWaiter::TStates v = node.TState;
if (v == ObjectWaiter::TS_RUN) {
enter(current);
} else {
guarantee(v == ObjectWaiter::TS_ENTER || v == ObjectWaiter::TS_CXQ, "invariant");
ReenterI(current, &node);
node.wait_reenter_end(this);
}

// current has reacquired the lock.
// Lifecycle - the node representing current must not appear on any queues.
// Node is about to go out-of-scope, but even if it were immortal we wouldn't
// want residual elements associated with this thread left on any lists.
guarantee(node.TState == ObjectWaiter::TS_RUN, "invariant");
assert(owner_raw() == current, "invariant");
assert(_succ != current, "invariant");
} // OSThreadWaitState()

current->set_current_waiting_monitor(NULL);

guarantee(_recursions == 0, "invariant");
_recursions = save // restore the old recursion count
+ JvmtiDeferredUpdates::get_and_reset_relock_count_after_wait(current); // increased by the deferred relock count
_waiters--; // decrement the number of waiters

// Verify a few postconditions
assert(owner_raw() == current, "invariant");
assert(_succ != current, "invariant");
assert(object()->mark() == markWord::encode(this), "invariant");

// check if the notification happened
if (!WasNotified) {
// no, it could be timeout or Thread.interrupt() or both
// check for interrupt event, otherwise it is timeout
if (interruptible && current->is_interrupted(true) && !HAS_PENDING_EXCEPTION) {
THROW(vmSymbols::java_lang_InterruptedException());
}
}

// NOTE: Spurious wake up will be consider as timeout.
// Monitor notify has precedence over thread interrupt.
}
+ +

wait:

+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
Thread 20 "Thread-0" hit Breakpoint 2, __pthread_cond_wait (cond=0x7ffff0510058, mutex=0x7ffff0510030) at forward.c:121
121 forward.c: No such file or directory.
(gdb) bt
#0 __pthread_cond_wait (cond=0x7ffff0510058, mutex=0x7ffff0510030) at forward.c:121
#1 0x00007ffff6c21713 in os::PlatformEvent::park (this=0x7ffff0510000) at /home/ubuntu/daixiao/jdk/src/hotspot/os/posix/os_posix.cpp:1484
#2 0x00007ffff6bd003c in ObjectMonitor::wait (this=0x7fffac0013b0, millis=0, interruptible=true, __the_thread__=0x7ffff050f5b0) at /home/ubuntu/daixiao/jdk/src/hotspot/share/runtime/objectMonitor.cpp:1544
#3 0x00007ffff6e90188 in ObjectSynchronizer::wait (obj=..., millis=0, __the_thread__=0x7ffff050f5b0) at /home/ubuntu/daixiao/jdk/src/hotspot/share/runtime/synchronizer.cpp:654
#4 0x00007ffff68298ae in JVM_MonitorWait (env=0x7ffff050f8a8, handle=0x7fffd0df77c0, ms=0) at /home/ubuntu/daixiao/jdk/src/hotspot/share/prims/jvm.cpp:617
#5 0x00007fffe100f68b in ?? ()
#6 0x00000008f7c32db8 in ?? ()
#7 0x00007ffff050f5b0 in ?? ()
#8 0x00007fffd0df7760 in ?? ()
#9 0x00007fffd0df7748 in ?? ()
#10 0x0000000000000000 in ?? ()
+ + +

notify:

+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
(gdb) bt
#0 __pthread_cond_signal (cond=0x7ffff04f0958) at forward.c:110
#1 0x00007ffff6c21c13 in os::PlatformEvent::unpark (this=0x7ffff04f0900) at /home/ubuntu/daixiao/jdk/src/hotspot/os/posix/os_posix.cpp:1590
#2 0x00007ffff6bcf654 in ObjectMonitor::ExitEpilog (this=0x7fffac0010b0, current=0x7ffff04ef410, Wakee=0x0) at /home/ubuntu/daixiao/jdk/src/hotspot/share/runtime/objectMonitor.cpp:1350
#3 0x00007ffff6bcf57b in ObjectMonitor::exit (this=0x7fffac0010b0, current=0x7ffff04ef410, not_suspended=true) at /home/ubuntu/daixiao/jdk/src/hotspot/share/runtime/objectMonitor.cpp:1321
#4 0x00007ffff6bcfe8e in ObjectMonitor::wait (this=0x7fffac0010b0, millis=0, interruptible=true, __the_thread__=0x7ffff04ef410) at /home/ubuntu/daixiao/jdk/src/hotspot/share/runtime/objectMonitor.cpp:1515
#5 0x00007ffff6e90188 in ObjectSynchronizer::wait (obj=..., millis=0, __the_thread__=0x7ffff04ef410) at /home/ubuntu/daixiao/jdk/src/hotspot/share/runtime/synchronizer.cpp:654
#6 0x00007ffff68298ae in JVM_MonitorWait (env=0x7ffff04ef708, handle=0x7fffd0df77c0, ms=0) at /home/ubuntu/daixiao/jdk/src/hotspot/share/prims/jvm.cpp:617
#7 0x00007fffe100f68b in ?? ()
#8 0x00000008f7c32db8 in ?? ()
#9 0x00007ffff04ef410 in ?? ()
#10 0x00007fffd0df7760 in ?? ()
#11 0x00007fffd0df7748 in ?? ()
#12 0x0000000000000000 in ?? ()
+ + +
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
void PlatformEvent::park() {       // AKA "down()"
// Transitions for _event:
// -1 => -1 : illegal
// 1 => 0 : pass - return immediately
// 0 => -1 : block; then set _event to 0 before returning

// Invariant: Only the thread associated with the PlatformEvent
// may call park().
assert(_nParked == 0, "invariant");

int v;

// atomically decrement _event
for (;;) {
v = _event;
if (Atomic::cmpxchg(&_event, v, v - 1) == v) break;
}
guarantee(v >= 0, "invariant");

if (v == 0) { // Do this the hard way by blocking ...
int status = pthread_mutex_lock(_mutex);
assert_status(status == 0, status, "mutex_lock");
guarantee(_nParked == 0, "invariant");
++_nParked;
while (_event < 0) {
// OS-level "spurious wakeups" are ignored
status = pthread_cond_wait(_cond, _mutex);
assert_status(status == 0 MACOS_ONLY(|| status == ETIMEDOUT),
status, "cond_wait");
}
--_nParked;

_event = 0;
status = pthread_mutex_unlock(_mutex);
assert_status(status == 0, status, "mutex_unlock");
// Paranoia to ensure our locked and lock-free paths interact
// correctly with each other.
OrderAccess::fence();
}
guarantee(_event >= 0, "invariant");
}
+ + +

demo

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
#include <stdio.h>
#include <pthread.h>
#include<unistd.h>

pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER;
pthread_cond_t cond = PTHREAD_COND_INITIALIZER;
int condition = 0;
int count = 0;
pthread_t thread_id;
int consume( void )
{
while( 1 )
{
pthread_mutex_lock( &mutex );
while( condition == 0 )
pthread_cond_wait( &cond, &mutex );
printf( "Consumed %d\n", count );
condition = 0;
pthread_cond_signal( &cond );
pthread_mutex_unlock( &mutex );
}

return( 0 );
}

void* produce( void * arg )
{
while( 1 )
{
pthread_mutex_lock( &mutex );
while( condition == 1 )
pthread_cond_wait( &cond, &mutex );
printf( "Produced %d\n", count++ );
condition = 1;
pthread_cond_signal( &cond );
pthread_mutex_unlock( &mutex );
}
return( 0 );
}

int main( void )
{
pthread_create( thread_id, NULL, &produce, NULL );
return consume();
}
+ + +

相关阅读

+ + +
+ + + + +
+
+
+
+ + + + + + + +
+ + + + + +
+

+ + +

+ + +
+ + + + +
+ + +

背景

FST 即finite state machine,lucene很多内容都是用这个格式压缩和存储的.

+

fst 例子

介绍FST之前,先看看Hashmap.

+

HashMap的语义: key-> value , 也就是输入一个key,返回一个value

+

FST结构也是一个特别的Map, 语义和Map差不多:FST(key)=value

+

例子

有下面组词汇的数组[cat:5,dog:7,dogs:13]

+
    +
  • key为cat,value为5
  • +
  • key为dog,value为7
  • +
  • key为dogs,value为13
  • +
+

最后会被序列化成这个结构

+
1
[0, 116, 15, 97, 6, 5, 115, 31, 103, 7, 111, 6, 7, 100, 22, 4, 5, 99, 16]
+ + +

下面来分析这个例子的每个字节

+ + + + + + + + + + + + + + + +
103, 7111, 67, 100, 224, 5, 99, 16
flag=7,value:’g’也就是103,target:7,nextArch=7flag=6,value:’0’也就是111,target:9,nextArch=9flag=22 , value=’d’ 也就是100,output=7,target=11(为什么是11 ?因为7前面就是pos=11,nextArc=11)flag=16 , value:’c’也就是99, output =5,target=4,nextArc=14
+
1
2
3
[0, 116, 15,| 97, 6,  |5, 115, 31, |103, 7,  |111, 6,  |7, 100, 22,| 4, 5, 99, 16]
--------| ------- | -----------| ------ | ------- |--------- | -------------
(t,null)| (a,null)| (s,5) | (g,null)| (o,null)| (d:7) | (output =5,target=4,flag=16 value:'c')
+ +

常量解释:

+ + + + + + + + + + + + + + + + + + + + + + + +
常量描述
BIT_LAST_ARC1>>1描述该弧是最后一个弧,类似:二叉树右子节点;或者类似于三叉树的第三个节点
BIT_ARC_HAS_OUTPUT1>>4有output,也就是这个节点存了一些值
BIT_TARGET_NEXT1>>2表示该节点的下一个节点就是下一个bit,不需要在另外存了,也就是这个弧的两个节点是存在一起的
+

arc class分析

arc class 源码如下:

+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
  public static final class Arc<T> {

// *** Arc fields.

private int label;

private T output;

private long target;

private byte flags;

private T nextFinalOutput;

private long nextArc;

private byte nodeFlags;

// *** Fields for arcs belonging to a node with fixed length arcs.
// So only valid when bytesPerArc != 0.
// nodeFlags == ARCS_FOR_BINARY_SEARCH || nodeFlags == ARCS_FOR_DIRECT_ADDRESSING.

private int bytesPerArc;

private long posArcsStart;

private int arcIdx;

private int numArcs;

// *** Fields for a direct addressing node. nodeFlags == ARCS_FOR_DIRECT_ADDRESSING.

/**
* Start position in the {@link FST.BytesReader} of the presence bits for a direct addressing
* node, aka the bit-table
*/
private long bitTableStart;

/** First label of a direct addressing node. */
private int firstLabel;

/**
* Index of the current label of a direct addressing node. While {@link #arcIdx} is the current
* index in the label range, {@link #presenceIndex} is its corresponding index in the list of
* actually present labels. It is equal to the number of bits set before the bit at {@link
* #arcIdx} in the bit-table. This field is a cache to avoid to count bits set repeatedly when
* iterating the next arcs.
*/
private int presenceIndex;
}
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
字段描述
label如果用map的key value来举例 , label就是key的一截 , 多个lebel会组成一个key , 举例 “cat” 会拆分成三个label : “c” , “a”, “t”
output如果是用map的key value来举例 , output就是value的一截,多个output会组成一个value
target描述的是下一个节点的偏移量,一个弧度如果是src -> dst 这样结构的话 , target 就是dst 的位置 也就是 arr[target] 就是dst 的节点的位置
flags各种奇奇怪怪的标志位来标识这个弧的状态,用位图来将各种状态压缩
nextFinalOutput前面说了,如果这个key value 结构 , 这个描述的是value的最后一截 ,否则就是null
nextArc描述的是多个弧,就像一个多叉树的兄弟节点,这个是描述下一个兄弟节点的偏移位置
numArcs描述的是这个阶段有多少个弧,也就是这个节点有多少个子节点
+

写入过程:

+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
add:473, FSTCompiler (org.apache.lucene.util.fst)
compileIndex:504, Lucene90BlockTreeTermsWriter$PendingBlock (org.apache.lucene.codecs.lucene90.blocktree)
writeBlocks:725, Lucene90BlockTreeTermsWriter$TermsWriter (org.apache.lucene.codecs.lucene90.blocktree)
finish:1105, Lucene90BlockTreeTermsWriter$TermsWriter (org.apache.lucene.codecs.lucene90.blocktree)
write:370, Lucene90BlockTreeTermsWriter (org.apache.lucene.codecs.lucene90.blocktree)
write:172, PerFieldPostingsFormat$FieldsWriter (org.apache.lucene.codecs.perfield)
flush:135, FreqProxTermsWriter (org.apache.lucene.index)
flush:310, IndexingChain (org.apache.lucene.index)
flush:392, DocumentsWriterPerThread (org.apache.lucene.index)
doFlush:492, DocumentsWriter (org.apache.lucene.index)
flushAllThreads:671, DocumentsWriter (org.apache.lucene.index)
doFlush:4194, IndexWriter (org.apache.lucene.index)
flush:4168, IndexWriter (org.apache.lucene.index)
shutdown:1322, IndexWriter (org.apache.lucene.index)
close:1362, IndexWriter (org.apache.lucene.index)
doTestSearch:133, FstTest (com.dinosaur.lucene.demo)
+ + +
1
2
3
4
5
6
7
8
9
10
11
12
findTargetArc:1418, FST (org.apache.lucene.util.fst)
seekExact:511, SegmentTermsEnum (org.apache.lucene.codecs.lucene90.blocktree)
loadTermsEnum:111, TermStates (org.apache.lucene.index)
build:96, TermStates (org.apache.lucene.index)
createWeight:227, TermQuery (org.apache.lucene.search)
createWeight:904, IndexSearcher (org.apache.lucene.search)
search:687, IndexSearcher (org.apache.lucene.search)
searchAfter:523, IndexSearcher (org.apache.lucene.search)
search:538, IndexSearcher (org.apache.lucene.search)
doPagingSearch:158, SearchFiles (com.dinosaur.lucene.demo)
testSearch:128, SearchFiles (com.dinosaur.lucene.demo)

+ + +

跳转内容

如何跳转

+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
public void decodeMetaData() throws IOException {

// if (DEBUG) System.out.println("\nBTTR.decodeMetadata seg=" + segment + " mdUpto=" +
// metaDataUpto + " vs termBlockOrd=" + state.termBlockOrd);

// lazily catch up on metadata decode:
final int limit = getTermBlockOrd();
boolean absolute = metaDataUpto == 0;
assert limit > 0;

// TODO: better API would be "jump straight to term=N"???
while (metaDataUpto < limit) {

// TODO: we could make "tiers" of metadata, ie,
// decode docFreq/totalTF but don't decode postings
// metadata; this way caller could get
// docFreq/totalTF w/o paying decode cost for
// postings

// TODO: if docFreq were bulk decoded we could
// just skipN here:
if (statsSingletonRunLength > 0) {
state.docFreq = 1;
state.totalTermFreq = 1;
statsSingletonRunLength--;
} else {
int token = statsReader.readVInt();
if ((token & 1) == 1) {
state.docFreq = 1;
state.totalTermFreq = 1;
statsSingletonRunLength = token >>> 1;
} else {
state.docFreq = token >>> 1;
if (ste.fr.fieldInfo.getIndexOptions() == IndexOptions.DOCS) {
state.totalTermFreq = state.docFreq;
} else {
state.totalTermFreq = state.docFreq + statsReader.readVLong();
}
}
}

// metadata
ste.fr.parent.postingsReader.decodeTerm(bytesReader, ste.fr.fieldInfo, state, absolute);

metaDataUpto++;
absolute = false;
}
state.termBlockOrd = metaDataUpto;
}
+ + + + + + +

相关阅读

+ + +
+ + + + +
+
+
+
+ + + + + + + +
+ + + + + +
+

+ + +

+ + +
+ + + + +
+ + +

背景

搜索往往需要排序,对不同文档做排序,有很多很多模型。其中一个模型叫BM25

+

在lucene里面,实现类是
lucene/core/src/java/org/apache/lucene/search/similarities/BM25Similarity.java

+

相关阅读

+ + +
+ + + + +
+
+
+
+ + + + + + + +
+ + + + + +
+

+ + +

+ + +
+ + + + +
+ + +

SimpleDatetimeFormat 线程不安全是因为这个format持有一个对象,这个对象会被多个线程修改

+

DateTimeFormatter 线程安全是因为是一个immutable , 不变的量在不同线程是不会有线程安全问题

+

相关阅读

+ + +
+ + + + +
+
+
+
+ + + + + + + + + + + + + + + +
+ + + + + +
+

+ + +

+ + +
+ + + + +
+ + +

背景

堆栈:

+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
setNonNullParameter:33, DateTypeHandler (org.apache.ibatis.type)
setNonNullParameter:28, DateTypeHandler (org.apache.ibatis.type)
setParameter:73, BaseTypeHandler (org.apache.ibatis.type)
setNonNullParameter:67, UnknownTypeHandler (org.apache.ibatis.type)
setParameter:73, BaseTypeHandler (org.apache.ibatis.type)
setParameters:232, MybatisParameterHandler (com.baomidou.mybatisplus.core)
parameterize:94, PreparedStatementHandler (org.apache.ibatis.executor.statement)
parameterize:64, RoutingStatementHandler (org.apache.ibatis.executor.statement)
invoke0:-1, NativeMethodAccessorImpl (jdk.internal.reflect)
invoke:62, NativeMethodAccessorImpl (jdk.internal.reflect)
invoke:43, DelegatingMethodAccessorImpl (jdk.internal.reflect)
invoke:566, Method (java.lang.reflect)
invoke:64, Plugin (org.apache.ibatis.plugin)
parameterize:-1, $Proxy255 (com.sun.proxy)
prepareStatement:88, SimpleExecutor (org.apache.ibatis.executor)
doQuery:62, SimpleExecutor (org.apache.ibatis.executor)
queryFromDatabase:325, BaseExecutor (org.apache.ibatis.executor)
query:156, BaseExecutor (org.apache.ibatis.executor)
query:109, CachingExecutor (org.apache.ibatis.executor)
intercept:81, MybatisPlusInterceptor (com.baomidou.mybatisplus.extension.plugins)
invoke:62, Plugin (org.apache.ibatis.plugin)
query:-1, $Proxy254 (com.sun.proxy)
selectList:151, DefaultSqlSession (org.apache.ibatis.session.defaults)
selectList:145, DefaultSqlSession (org.apache.ibatis.session.defaults)
selectList:140, DefaultSqlSession (org.apache.ibatis.session.defaults)
invoke0:-1, NativeMethodAccessorImpl (jdk.internal.reflect)
invoke:62, NativeMethodAccessorImpl (jdk.internal.reflect)
invoke:43, DelegatingMethodAccessorImpl (jdk.internal.reflect)
invoke:566, Method (java.lang.reflect)
invoke:427, SqlSessionTemplate$SqlSessionInterceptor (org.mybatis.spring)
selectList:-1, $Proxy146 (com.sun.proxy)
selectList:224, SqlSessionTemplate (org.mybatis.spring)
executeForMany:166, MybatisMapperMethod (com.baomidou.mybatisplus.core.override)
execute:77, MybatisMapperMethod (com.baomidou.mybatisplus.core.override)
invoke:148, MybatisMapperProxy$PlainMethodInvoker (com.baomidou.mybatisplus.core.override)
invoke:89, MybatisMapperProxy (com.baomidou.mybatisplus.core.override)
getUserAndSkuByDay:-1, $Proxy215 (com.sun.proxy)
invoke0:-1, NativeMethodAccessorImpl (jdk.internal.reflect)
invoke:62, NativeMethodAccessorImpl (jdk.internal.reflect)
invoke:43, DelegatingMethodAccessorImpl (jdk.internal.reflect)
invoke:566, Method (java.lang.reflect)
invokeJoinpointUsingReflection:344, AopUtils (org.springframework.aop.support)
invokeJoinpoint:198, ReflectiveMethodInvocation (org.springframework.aop.framework)
proceed:163, ReflectiveMethodInvocation (org.springframework.aop.framework)
invoke:50, DynamicDataSourceAnnotationInterceptor (com.baomidou.dynamic.datasource.aop)
proceed:186, ReflectiveMethodInvocation (org.springframework.aop.framework)
invoke:212, JdkDynamicAopProxy (org.springframework.aop.framework)
getUserAndSkuByDay:-1, $Proxy216 (com.sun.proxy)
testQuery:23, CdpUserBehaviorDataMapperTest (com.patpat.mms.mdp.base.core.service.mapper.cdp)
invoke0:-1, NativeMethodAccessorImpl (jdk.internal.reflect)
invoke:62, NativeMethodAccessorImpl (jdk.internal.reflect)
invoke:43, DelegatingMethodAccessorImpl (jdk.internal.reflect)
invoke:566, Method (java.lang.reflect)
runReflectiveCall:59, FrameworkMethod$1 (org.junit.runners.model)
run:12, ReflectiveCallable (org.junit.internal.runners.model)
invokeExplosively:56, FrameworkMethod (org.junit.runners.model)
evaluate:17, InvokeMethod (org.junit.internal.runners.statements)
evaluate:74, RunBeforeTestExecutionCallbacks (org.springframework.test.context.junit4.statements)
evaluate:84, RunAfterTestExecutionCallbacks (org.springframework.test.context.junit4.statements)
evaluate:75, RunBeforeTestMethodCallbacks (org.springframework.test.context.junit4.statements)
evaluate:86, RunAfterTestMethodCallbacks (org.springframework.test.context.junit4.statements)
evaluate:84, SpringRepeat (org.springframework.test.context.junit4.statements)
runLeaf:366, ParentRunner (org.junit.runners)
runChild:251, SpringJUnit4ClassRunner (org.springframework.test.context.junit4)
runChild:97, SpringJUnit4ClassRunner (org.springframework.test.context.junit4)
run:331, ParentRunner$4 (org.junit.runners)
schedule:79, ParentRunner$1 (org.junit.runners)
runChildren:329, ParentRunner (org.junit.runners)
access$100:66, ParentRunner (org.junit.runners)
evaluate:293, ParentRunner$2 (org.junit.runners)
evaluate:61, RunBeforeTestClassCallbacks (org.springframework.test.context.junit4.statements)
evaluate:70, RunAfterTestClassCallbacks (org.springframework.test.context.junit4.statements)
evaluate:306, ParentRunner$3 (org.junit.runners)
run:413, ParentRunner (org.junit.runners)
run:190, SpringJUnit4ClassRunner (org.springframework.test.context.junit4)
run:137, JUnitCore (org.junit.runner)
startRunnerWithArgs:69, JUnit4IdeaTestRunner (com.intellij.junit4)
execute:38, IdeaTestRunner$Repeater$1 (com.intellij.rt.junit)
repeat:11, TestsRepeater (com.intellij.rt.execution.junit)
startRunnerWithArgs:35, IdeaTestRunner$Repeater (com.intellij.rt.junit)
prepareStreamsAndStart:232, JUnitStarter (com.intellij.rt.junit)
main:55, JUnitStarter (com.intellij.rt.junit)
+ + +
+ + + + +
+
+
+
+ + + + + + + +
+ + + + + +
+

+ + +

+ + +
+ + + + +
+ + +

背景

相关阅读

+ + +
+ + + + +
+
+
+
+ + + + + + + +
+ + + + + +
+

+ + +

+ + +
+ + + + +
+ + +

背景

使用dbeaver 连接clickhouse cloud,连接不上

+

最后参考下面的链接内容

+

发现是要用https的端口,也就是8443 而不是默认的8123

+

而且database一栏是必填的,不然也会连不上

+

内容

+ + +
+ + + + +
+
+
+
+ + + + + + + +
+ + + + + +
+

+ + +

+ + +
+ + + + +
+ + +

背景

1 有次排查oom问题,发现没有对应的目录,oom后会不生成hprof的dump文件
2 oom后被try catch 后依然可以生成dump的prof文件,所以不是在退出生成hprof文件的,而是在生成这个异常的时候生成dump的hprof文件的

+

代码

java -Xms50m -Xmx50m -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=/home/admin/logs/jvmlogs/java.hprof Main.java

+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
import java.util.ArrayList;
import java.util.List;

public class Main {
public static void main(String[] args) {
List<Object> tem = new ArrayList<>();

boolean test = true;
try {
while (test) {
tem.add(new int[10000000]);
}
}catch (Throwable table){
System.out.println("oom test");
}
System.out.println("afasdfadsf");
}
}
+ +

堆栈

+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
Thread 2 "java" hit Breakpoint 1, HeapDumper::dump (this=this@entry=0x7ffff7bfe090, path=path@entry=0x7ffff0803c20 "/home/ubuntu/fasdfd/fadf", out=0x7ffff0000b80, compression=0, overwrite=overwrite@entry=false, num_dump_threads=num_dump_threads@entry=1) at /home/ubuntu/jdk/src/hotspot/share/services/heapDumper.cpp:2383
2383 int HeapDumper::dump(const char* path, outputStream* out, int compression, bool overwrite, uint num_dump_threads) {
(gdb) bt
#0 HeapDumper::dump (this=this@entry=0x7ffff7bfe090, path=path@entry=0x7ffff0803c20 "/home/ubuntu/fasdfd/fadf", out=0x7ffff0000b80, compression=0, overwrite=overwrite@entry=false, num_dump_threads=num_dump_threads@entry=1)
at /home/ubuntu/jdk/src/hotspot/share/services/heapDumper.cpp:2383
#1 0x00007ffff65473a8 in HeapDumper::dump_heap (oome=oome@entry=true) at /home/ubuntu/jdk/src/hotspot/share/services/heapDumper.cpp:2573
#2 0x00007ffff654750e in HeapDumper::dump_heap_from_oome () at /home/ubuntu/jdk/src/hotspot/share/services/heapDumper.cpp:2487
#3 0x00007ffff61e9c78 in report_java_out_of_memory (message=message@entry=0x7ffff75a0d5e "Java heap space") at /home/ubuntu/jdk/src/hotspot/share/utilities/debug.cpp:356
#4 0x00007ffff6c3760d in MemAllocator::Allocation::check_out_of_memory (this=this@entry=0x7ffff7bfe1b0) at /home/ubuntu/jdk/src/hotspot/share/gc/shared/memAllocator.cpp:126
#5 0x00007ffff6c3aac6 in MemAllocator::Allocation::~Allocation (this=0x7ffff7bfe1b0, __in_chrg=<optimized out>) at /home/ubuntu/jdk/src/hotspot/share/gc/shared/memAllocator.cpp:83
#6 MemAllocator::allocate (this=this@entry=0x7ffff7bfe280) at /home/ubuntu/jdk/src/hotspot/share/gc/shared/memAllocator.cpp:375
#7 0x00007ffff72658e7 in CollectedHeap::array_allocate (__the_thread__=0x7ffff0029850, do_zero=true, length=10000000, size=<optimized out>, klass=0x100040d90, this=<optimized out>) at /home/ubuntu/jdk/src/hotspot/share/gc/shared/collectedHeap.inline.hpp:41
#8 TypeArrayKlass::allocate_common (this=this@entry=0x100040d90, length=length@entry=10000000, do_zero=do_zero@entry=true, __the_thread__=__the_thread__@entry=0x7ffff0029850) at /home/ubuntu/jdk/src/hotspot/share/oops/typeArrayKlass.cpp:93
#9 0x00007ffff6d861d9 in TypeArrayKlass::allocate (__the_thread__=0x7ffff0029850, length=10000000, this=<optimized out>) at /home/ubuntu/jdk/src/hotspot/share/oops/typeArrayKlass.hpp:68
#10 oopFactory::new_typeArray (type=type@entry=T_INT, length=length@entry=10000000, __the_thread__=__the_thread__@entry=0x7ffff0029850) at /home/ubuntu/jdk/src/hotspot/share/memory/oopFactory.cpp:93
#11 0x00007ffff662b51a in InterpreterRuntime::newarray (current=0x7ffff0029850, type=T_INT, size=10000000) at /home/ubuntu/jdk/src/hotspot/share/interpreter/interpreterRuntime.cpp:248
#12 0x00007fffe856be1a in ?? ()
#13 0x00007fffe856bd91 in ?? ()
#14 0x00000000fcf98230 in ?? ()
#15 0x00007ffff7bfe3e0 in ?? ()
#16 0x00007fffc9014349 in ?? ()
#17 0x00007ffff7bfe450 in ?? ()
#18 0x00007fffc9014408 in ?? ()
#19 0x0000000000000000 in ?? ()
+ + +

核心函数是

+
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
void report_java_out_of_memory(const char* message) {
static int out_of_memory_reported = 0;

if (Atomic::cmpxchg(&out_of_memory_reported, 0, 1) == 0) {
// create heap dump before OnOutOfMemoryError commands are executed
if (HeapDumpOnOutOfMemoryError) {
tty->print_cr("java.lang.OutOfMemoryError: %s", message);
HeapDumper::dump_heap_from_oome(); // 生成hprof 文件
}

if (OnOutOfMemoryError && OnOutOfMemoryError[0]) {
VMError::report_java_out_of_memory(message); // 生成错误信息
}

if (CrashOnOutOfMemoryError) {
tty->print_cr("Aborting due to java.lang.OutOfMemoryError: %s", message);
report_fatal(OOM_JAVA_HEAP_FATAL, __FILE__, __LINE__, "OutOfMemory encountered: %s", message); // catch 导致的
}

if (ExitOnOutOfMemoryError) {
tty->print_cr("Terminating due to java.lang.OutOfMemoryError: %s", message);
os::_exit(3); // quick exit with no cleanup hooks run
}
}
}
+ + +

如何打开dump文件的

1
2
3
4
5
6
7
8
9
10
11
char const* FileWriter::open_writer() {
assert(_fd < 0, "Must not already be open");

_fd = os::create_binary_file(_path, _overwrite);

if (_fd < 0) {
return os::strerror(errno);
}

return NULL;
}
+

最后调用的是linux 的库函数open64

+
1
2
3
4
5
6
7
// jdk/src/hotspot/os/linux/os_linux.cpp
// create binary file, rewriting existing file if required
int os::create_binary_file(const char* path, bool rewrite_existing) {
int oflags = O_WRONLY | O_CREAT;
oflags |= rewrite_existing ? O_TRUNC : O_EXCL;
return ::open64(path, oflags, S_IREAD | S_IWRITE);
}
+ +

dump的目录一定要存在,不存在也不会检查

+

生成hprof文件和exception的时机

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
bool MemAllocator::Allocation::check_out_of_memory() {
JavaThread* THREAD = _thread; // For exception macros.
assert(!HAS_PENDING_EXCEPTION, "Unexpected exception, will result in uninitialized storage");

if (obj() != NULL) {
return false;
}

const char* message = _overhead_limit_exceeded ? "GC overhead limit exceeded" : "Java heap space";
if (!_thread->in_retryable_allocation()) {
// -XX:+HeapDumpOnOutOfMemoryError and -XX:OnOutOfMemoryError support
report_java_out_of_memory(message); ////////////// 生成hprof 文件 , 里面就是上面的一对内容

if (JvmtiExport::should_post_resource_exhausted()) {
JvmtiExport::post_resource_exhausted(
JVMTI_RESOURCE_EXHAUSTED_OOM_ERROR | JVMTI_RESOURCE_EXHAUSTED_JAVA_HEAP,
message);
}
oop exception = _overhead_limit_exceeded ?
Universe::out_of_memory_error_gc_overhead_limit() : // gc 超过limit 导致的oom的异常
Universe::out_of_memory_error_java_heap(); // 我们平常说的堆内存不足导致oom
THROW_OOP_(exception, true);
} else {
THROW_OOP_(Universe::out_of_memory_error_retry(), true);
}
}
+ +

所以是先生成dump文件,再抛异常

+

相关阅读

+ + +
+ + + + +
+
+
+
+ + + + + + + + + + +
+ + + + +
+ + + + + + + + +
+
+ +
+ +
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/js/algolia-search.js b/js/algolia-search.js new file mode 100644 index 0000000000..01a5f0b099 --- /dev/null +++ b/js/algolia-search.js @@ -0,0 +1,124 @@ +/* global instantsearch, algoliasearch, CONFIG */ + +document.addEventListener('DOMContentLoaded', () => { + const algoliaSettings = CONFIG.algolia; + const { indexName, appID, apiKey } = algoliaSettings; + + let search = instantsearch({ + indexName, + searchClient : algoliasearch(appID, apiKey), + searchFunction: helper => { + let searchInput = document.querySelector('.search-input'); + if (searchInput.value) { + helper.search(); + } + } + }); + + window.pjax && search.on('render', () => { + window.pjax.refresh(document.getElementById('algolia-hits')); + }); + + // Registering Widgets + search.addWidgets([ + instantsearch.widgets.configure({ + hitsPerPage: algoliaSettings.hits.per_page || 10 + }), + + instantsearch.widgets.searchBox({ + container : '.search-input-container', + placeholder : algoliaSettings.labels.input_placeholder, + // Hide default icons of algolia search + showReset : false, + showSubmit : false, + showLoadingIndicator: false, + cssClasses : { + input: 'search-input' + } + }), + + instantsearch.widgets.stats({ + container: '#algolia-stats', + templates: { + text: data => { + let stats = algoliaSettings.labels.hits_stats + .replace(/\$\{hits}/, data.nbHits) + .replace(/\$\{time}/, data.processingTimeMS); + return `${stats} + + Algolia + +
`; + } + } + }), + + instantsearch.widgets.hits({ + container: '#algolia-hits', + templates: { + item: data => { + let link = data.permalink ? data.permalink : CONFIG.root + data.path; + return `${data._highlightResult.title.value}`; + }, + empty: data => { + return `
+ ${algoliaSettings.labels.hits_empty.replace(/\$\{query}/, data.query)} +
`; + } + }, + cssClasses: { + item: 'algolia-hit-item' + } + }), + + instantsearch.widgets.pagination({ + container: '#algolia-pagination', + scrollTo : false, + showFirst: false, + showLast : false, + templates: { + first : '', + last : '', + previous: '', + next : '' + }, + cssClasses: { + root : 'pagination', + item : 'pagination-item', + link : 'page-number', + selectedItem: 'current', + disabledItem: 'disabled-item' + } + }) + ]); + + search.start(); + + // Handle and trigger popup window + document.querySelectorAll('.popup-trigger').forEach(element => { + element.addEventListener('click', () => { + document.body.style.overflow = 'hidden'; + document.querySelector('.search-pop-overlay').classList.add('search-active'); + document.querySelector('.search-input').focus(); + }); + }); + + // Monitor main search box + const onPopupClose = () => { + document.body.style.overflow = ''; + document.querySelector('.search-pop-overlay').classList.remove('search-active'); + }; + + document.querySelector('.search-pop-overlay').addEventListener('click', event => { + if (event.target === document.querySelector('.search-pop-overlay')) { + onPopupClose(); + } + }); + document.querySelector('.popup-btn-close').addEventListener('click', onPopupClose); + window.addEventListener('pjax:success', onPopupClose); + window.addEventListener('keyup', event => { + if (event.key === 'Escape') { + onPopupClose(); + } + }); +}); diff --git a/js/bookmark.js b/js/bookmark.js new file mode 100644 index 0000000000..7c2438e120 --- /dev/null +++ b/js/bookmark.js @@ -0,0 +1,56 @@ +/* global CONFIG */ + +document.addEventListener('DOMContentLoaded', () => { + 'use strict'; + + var doSaveScroll = () => { + localStorage.setItem('bookmark' + location.pathname, window.scrollY); + }; + + var scrollToMark = () => { + var top = localStorage.getItem('bookmark' + location.pathname); + top = parseInt(top, 10); + // If the page opens with a specific hash, just jump out + if (!isNaN(top) && location.hash === '') { + // Auto scroll to the position + window.anime({ + targets : document.scrollingElement, + duration : 200, + easing : 'linear', + scrollTop: top + }); + } + }; + // Register everything + var init = function(trigger) { + // Create a link element + var link = document.querySelector('.book-mark-link'); + // Scroll event + window.addEventListener('scroll', () => link.classList.toggle('book-mark-link-fixed', window.scrollY === 0)); + // Register beforeunload event when the trigger is auto + if (trigger === 'auto') { + // Register beforeunload event + window.addEventListener('beforeunload', doSaveScroll); + window.addEventListener('pjax:send', doSaveScroll); + } + // Save the position by clicking the icon + link.addEventListener('click', () => { + doSaveScroll(); + window.anime({ + targets : link, + duration: 200, + easing : 'linear', + top : -30, + complete: () => { + setTimeout(() => { + link.style.top = ''; + }, 400); + } + }); + }); + scrollToMark(); + window.addEventListener('pjax:success', scrollToMark); + }; + + init(CONFIG.bookmark.save); +}); diff --git a/js/local-search.js b/js/local-search.js new file mode 100644 index 0000000000..31f945fd7a --- /dev/null +++ b/js/local-search.js @@ -0,0 +1,278 @@ +/* global CONFIG */ + +document.addEventListener('DOMContentLoaded', () => { + // Popup Window + let isfetched = false; + let datas; + let isXml = true; + // Search DB path + let searchPath = CONFIG.path; + if (searchPath.length === 0) { + searchPath = 'search.xml'; + } else if (searchPath.endsWith('json')) { + isXml = false; + } + const input = document.querySelector('.search-input'); + const resultContent = document.getElementById('search-result'); + + const getIndexByWord = (word, text, caseSensitive) => { + if (CONFIG.localsearch.unescape) { + let div = document.createElement('div'); + div.innerText = word; + word = div.innerHTML; + } + let wordLen = word.length; + if (wordLen === 0) return []; + let startPosition = 0; + let position = []; + let index = []; + if (!caseSensitive) { + text = text.toLowerCase(); + word = word.toLowerCase(); + } + while ((position = text.indexOf(word, startPosition)) > -1) { + index.push({ position, word }); + startPosition = position + wordLen; + } + return index; + }; + + // Merge hits into slices + const mergeIntoSlice = (start, end, index, searchText) => { + let item = index[index.length - 1]; + let { position, word } = item; + let hits = []; + let searchTextCountInSlice = 0; + while (position + word.length <= end && index.length !== 0) { + if (word === searchText) { + searchTextCountInSlice++; + } + hits.push({ + position, + length: word.length + }); + let wordEnd = position + word.length; + + // Move to next position of hit + index.pop(); + while (index.length !== 0) { + item = index[index.length - 1]; + position = item.position; + word = item.word; + if (wordEnd > position) { + index.pop(); + } else { + break; + } + } + } + return { + hits, + start, + end, + searchTextCount: searchTextCountInSlice + }; + }; + + // Highlight title and content + const highlightKeyword = (text, slice) => { + let result = ''; + let prevEnd = slice.start; + slice.hits.forEach(hit => { + result += text.substring(prevEnd, hit.position); + let end = hit.position + hit.length; + result += `${text.substring(hit.position, end)}`; + prevEnd = end; + }); + result += text.substring(prevEnd, slice.end); + return result; + }; + + const inputEventFunction = () => { + if (!isfetched) return; + let searchText = input.value.trim().toLowerCase(); + let keywords = searchText.split(/[-\s]+/); + if (keywords.length > 1) { + keywords.push(searchText); + } + let resultItems = []; + if (searchText.length > 0) { + // Perform local searching + datas.forEach(({ title, content, url }) => { + let titleInLowerCase = title.toLowerCase(); + let contentInLowerCase = content.toLowerCase(); + let indexOfTitle = []; + let indexOfContent = []; + let searchTextCount = 0; + keywords.forEach(keyword => { + indexOfTitle = indexOfTitle.concat(getIndexByWord(keyword, titleInLowerCase, false)); + indexOfContent = indexOfContent.concat(getIndexByWord(keyword, contentInLowerCase, false)); + }); + + // Show search results + if (indexOfTitle.length > 0 || indexOfContent.length > 0) { + let hitCount = indexOfTitle.length + indexOfContent.length; + // Sort index by position of keyword + [indexOfTitle, indexOfContent].forEach(index => { + index.sort((itemLeft, itemRight) => { + if (itemRight.position !== itemLeft.position) { + return itemRight.position - itemLeft.position; + } + return itemLeft.word.length - itemRight.word.length; + }); + }); + + let slicesOfTitle = []; + if (indexOfTitle.length !== 0) { + let tmp = mergeIntoSlice(0, title.length, indexOfTitle, searchText); + searchTextCount += tmp.searchTextCountInSlice; + slicesOfTitle.push(tmp); + } + + let slicesOfContent = []; + while (indexOfContent.length !== 0) { + let item = indexOfContent[indexOfContent.length - 1]; + let { position, word } = item; + // Cut out 100 characters + let start = position - 20; + let end = position + 80; + if (start < 0) { + start = 0; + } + if (end < position + word.length) { + end = position + word.length; + } + if (end > content.length) { + end = content.length; + } + let tmp = mergeIntoSlice(start, end, indexOfContent, searchText); + searchTextCount += tmp.searchTextCountInSlice; + slicesOfContent.push(tmp); + } + + // Sort slices in content by search text's count and hits' count + slicesOfContent.sort((sliceLeft, sliceRight) => { + if (sliceLeft.searchTextCount !== sliceRight.searchTextCount) { + return sliceRight.searchTextCount - sliceLeft.searchTextCount; + } else if (sliceLeft.hits.length !== sliceRight.hits.length) { + return sliceRight.hits.length - sliceLeft.hits.length; + } + return sliceLeft.start - sliceRight.start; + }); + + // Select top N slices in content + let upperBound = parseInt(CONFIG.localsearch.top_n_per_article, 10); + if (upperBound >= 0) { + slicesOfContent = slicesOfContent.slice(0, upperBound); + } + + let resultItem = ''; + + if (slicesOfTitle.length !== 0) { + resultItem += `
  • ${highlightKeyword(title, slicesOfTitle[0])}`; + } else { + resultItem += `
  • ${title}`; + } + + slicesOfContent.forEach(slice => { + resultItem += `

    ${highlightKeyword(content, slice)}...

    `; + }); + + resultItem += '
  • '; + resultItems.push({ + item: resultItem, + id : resultItems.length, + hitCount, + searchTextCount + }); + } + }); + } + if (keywords.length === 1 && keywords[0] === '') { + resultContent.innerHTML = '
    '; + } else if (resultItems.length === 0) { + resultContent.innerHTML = '
    '; + } else { + resultItems.sort((resultLeft, resultRight) => { + if (resultLeft.searchTextCount !== resultRight.searchTextCount) { + return resultRight.searchTextCount - resultLeft.searchTextCount; + } else if (resultLeft.hitCount !== resultRight.hitCount) { + return resultRight.hitCount - resultLeft.hitCount; + } + return resultRight.id - resultLeft.id; + }); + resultContent.innerHTML = `
      ${resultItems.map(result => result.item).join('')}
    `; + window.pjax && window.pjax.refresh(resultContent); + } + }; + + const fetchData = () => { + fetch(CONFIG.root + searchPath) + .then(response => response.text()) + .then(res => { + // Get the contents from search data + isfetched = true; + datas = isXml ? [...new DOMParser().parseFromString(res, 'text/xml').querySelectorAll('entry')].map(element => { + return { + title : element.querySelector('title').textContent, + content: element.querySelector('content').textContent, + url : element.querySelector('url').textContent + }; + }) : JSON.parse(res); + // Only match articles with not empty titles + datas = datas.filter(data => data.title).map(data => { + data.title = data.title.trim(); + data.content = data.content ? data.content.trim().replace(/<[^>]+>/g, '') : ''; + data.url = decodeURIComponent(data.url).replace(/\/{2,}/g, '/'); + return data; + }); + // Remove loading animation + document.getElementById('no-result').innerHTML = ''; + inputEventFunction(); + }); + }; + + if (CONFIG.localsearch.preload) { + fetchData(); + } + + if (CONFIG.localsearch.trigger === 'auto') { + input.addEventListener('input', inputEventFunction); + } else { + document.querySelector('.search-icon').addEventListener('click', inputEventFunction); + input.addEventListener('keypress', event => { + if (event.key === 'Enter') { + inputEventFunction(); + } + }); + } + + // Handle and trigger popup window + document.querySelectorAll('.popup-trigger').forEach(element => { + element.addEventListener('click', () => { + document.body.style.overflow = 'hidden'; + document.querySelector('.search-pop-overlay').classList.add('search-active'); + input.focus(); + if (!isfetched) fetchData(); + }); + }); + + // Monitor main search box + const onPopupClose = () => { + document.body.style.overflow = ''; + document.querySelector('.search-pop-overlay').classList.remove('search-active'); + }; + + document.querySelector('.search-pop-overlay').addEventListener('click', event => { + if (event.target === document.querySelector('.search-pop-overlay')) { + onPopupClose(); + } + }); + document.querySelector('.popup-btn-close').addEventListener('click', onPopupClose); + window.addEventListener('pjax:success', onPopupClose); + window.addEventListener('keyup', event => { + if (event.key === 'Escape') { + onPopupClose(); + } + }); +}); diff --git a/js/motion.js b/js/motion.js new file mode 100644 index 0000000000..026199aabb --- /dev/null +++ b/js/motion.js @@ -0,0 +1,177 @@ +/* global NexT, CONFIG, Velocity */ + +if (window.$ && window.$.Velocity) window.Velocity = window.$.Velocity; + +NexT.motion = {}; + +NexT.motion.integrator = { + queue : [], + cursor: -1, + init : function() { + this.queue = []; + this.cursor = -1; + return this; + }, + add: function(fn) { + this.queue.push(fn); + return this; + }, + next: function() { + this.cursor++; + var fn = this.queue[this.cursor]; + typeof fn === 'function' && fn(NexT.motion.integrator); + }, + bootstrap: function() { + this.next(); + } +}; + +NexT.motion.middleWares = { + logo: function(integrator) { + var sequence = []; + var brand = document.querySelector('.brand'); + var image = document.querySelector('.custom-logo-image'); + var title = document.querySelector('.site-title'); + var subtitle = document.querySelector('.site-subtitle'); + var logoLineTop = document.querySelector('.logo-line-before i'); + var logoLineBottom = document.querySelector('.logo-line-after i'); + + brand && sequence.push({ + e: brand, + p: {opacity: 1}, + o: {duration: 200} + }); + + function getMistLineSettings(element, translateX) { + return { + e: element, + p: {translateX}, + o: { + duration : 500, + sequenceQueue: false + } + }; + } + + function pushImageToSequence() { + sequence.push({ + e: image, + p: {opacity: 1, top: 0}, + o: {duration: 200} + }); + } + + CONFIG.scheme === 'Mist' && logoLineTop && logoLineBottom + && sequence.push( + getMistLineSettings(logoLineTop, '100%'), + getMistLineSettings(logoLineBottom, '-100%') + ); + + CONFIG.scheme === 'Muse' && image && pushImageToSequence(); + + title && sequence.push({ + e: title, + p: {opacity: 1, top: 0}, + o: {duration: 200} + }); + + subtitle && sequence.push({ + e: subtitle, + p: {opacity: 1, top: 0}, + o: {duration: 200} + }); + + (CONFIG.scheme === 'Pisces' || CONFIG.scheme === 'Gemini') && image && pushImageToSequence(); + + if (sequence.length > 0) { + sequence[sequence.length - 1].o.complete = function() { + integrator.next(); + }; + Velocity.RunSequence(sequence); + } else { + integrator.next(); + } + + if (CONFIG.motion.async) { + integrator.next(); + } + }, + + menu: function(integrator) { + Velocity(document.querySelectorAll('.menu-item'), 'transition.slideDownIn', { + display : null, + duration: 200, + complete: function() { + integrator.next(); + } + }); + + if (CONFIG.motion.async) { + integrator.next(); + } + }, + + subMenu: function(integrator) { + var subMenuItem = document.querySelectorAll('.sub-menu .menu-item'); + if (subMenuItem.length > 0) { + subMenuItem.forEach(element => { + element.style.opacity = 1; + }); + } + integrator.next(); + }, + + postList: function(integrator) { + var postBlock = document.querySelectorAll('.post-block, .pagination, .comments'); + var postBlockTransition = CONFIG.motion.transition.post_block; + var postHeader = document.querySelectorAll('.post-header'); + var postHeaderTransition = CONFIG.motion.transition.post_header; + var postBody = document.querySelectorAll('.post-body'); + var postBodyTransition = CONFIG.motion.transition.post_body; + var collHeader = document.querySelectorAll('.collection-header'); + var collHeaderTransition = CONFIG.motion.transition.coll_header; + + if (postBlock.length > 0) { + var postMotionOptions = window.postMotionOptions || { + stagger : 100, + drag : true, + complete: function() { + integrator.next(); + } + }; + + if (CONFIG.motion.transition.post_block) { + Velocity(postBlock, 'transition.' + postBlockTransition, postMotionOptions); + } + if (CONFIG.motion.transition.post_header) { + Velocity(postHeader, 'transition.' + postHeaderTransition, postMotionOptions); + } + if (CONFIG.motion.transition.post_body) { + Velocity(postBody, 'transition.' + postBodyTransition, postMotionOptions); + } + if (CONFIG.motion.transition.coll_header) { + Velocity(collHeader, 'transition.' + collHeaderTransition, postMotionOptions); + } + } + if (CONFIG.scheme === 'Pisces' || CONFIG.scheme === 'Gemini') { + integrator.next(); + } + }, + + sidebar: function(integrator) { + var sidebarAffix = document.querySelector('.sidebar-inner'); + var sidebarAffixTransition = CONFIG.motion.transition.sidebar; + // Only for Pisces | Gemini. + if (sidebarAffixTransition && (CONFIG.scheme === 'Pisces' || CONFIG.scheme === 'Gemini')) { + Velocity(sidebarAffix, 'transition.' + sidebarAffixTransition, { + display : null, + duration: 200, + complete: function() { + // After motion complete need to remove transform from sidebar to let affix work on Pisces | Gemini. + sidebarAffix.style.transform = 'initial'; + } + }); + } + integrator.next(); + } +}; diff --git a/js/next-boot.js b/js/next-boot.js new file mode 100644 index 0000000000..52ec9aec0f --- /dev/null +++ b/js/next-boot.js @@ -0,0 +1,114 @@ +/* global NexT, CONFIG, Velocity */ + +NexT.boot = {}; + +NexT.boot.registerEvents = function() { + + NexT.utils.registerScrollPercent(); + NexT.utils.registerCanIUseTag(); + + // Mobile top menu bar. + document.querySelector('.site-nav-toggle .toggle').addEventListener('click', () => { + event.currentTarget.classList.toggle('toggle-close'); + var siteNav = document.querySelector('.site-nav'); + var animateAction = siteNav.classList.contains('site-nav-on') ? 'slideUp' : 'slideDown'; + + if (typeof Velocity === 'function') { + Velocity(siteNav, animateAction, { + duration: 200, + complete: function() { + siteNav.classList.toggle('site-nav-on'); + } + }); + } else { + siteNav.classList.toggle('site-nav-on'); + } + }); + + var TAB_ANIMATE_DURATION = 200; + document.querySelectorAll('.sidebar-nav li').forEach((element, index) => { + element.addEventListener('click', event => { + var item = event.currentTarget; + var activeTabClassName = 'sidebar-nav-active'; + var activePanelClassName = 'sidebar-panel-active'; + if (item.classList.contains(activeTabClassName)) return; + + var targets = document.querySelectorAll('.sidebar-panel'); + var target = targets[index]; + var currentTarget = targets[1 - index]; + window.anime({ + targets : currentTarget, + duration: TAB_ANIMATE_DURATION, + easing : 'linear', + opacity : 0, + complete: () => { + // Prevent adding TOC to Overview if Overview was selected when close & open sidebar. + currentTarget.classList.remove(activePanelClassName); + target.style.opacity = 0; + target.classList.add(activePanelClassName); + window.anime({ + targets : target, + duration: TAB_ANIMATE_DURATION, + easing : 'linear', + opacity : 1 + }); + } + }); + + [...item.parentNode.children].forEach(element => { + element.classList.remove(activeTabClassName); + }); + item.classList.add(activeTabClassName); + }); + }); + + window.addEventListener('resize', NexT.utils.initSidebarDimension); + + window.addEventListener('hashchange', () => { + var tHash = location.hash; + if (tHash !== '' && !tHash.match(/%\S{2}/)) { + var target = document.querySelector(`.tabs ul.nav-tabs li a[href="${tHash}"]`); + target && target.click(); + } + }); +}; + +NexT.boot.refresh = function() { + + /** + * Register JS handlers by condition option. + * Need to add config option in Front-End at 'layout/_partials/head.swig' file. + */ + CONFIG.fancybox && NexT.utils.wrapImageWithFancyBox(); + CONFIG.mediumzoom && window.mediumZoom('.post-body :not(a) > img, .post-body > img'); + CONFIG.lazyload && window.lozad('.post-body img').observe(); + CONFIG.pangu && window.pangu.spacingPage(); + + CONFIG.exturl && NexT.utils.registerExtURL(); + CONFIG.copycode.enable && NexT.utils.registerCopyCode(); + NexT.utils.registerTabsTag(); + NexT.utils.registerActiveMenuItem(); + NexT.utils.registerLangSelect(); + NexT.utils.registerSidebarTOC(); + NexT.utils.wrapTableWithBox(); + NexT.utils.registerVideoIframe(); +}; + +NexT.boot.motion = function() { + // Define Motion Sequence & Bootstrap Motion. + if (CONFIG.motion.enable) { + NexT.motion.integrator + .add(NexT.motion.middleWares.logo) + .add(NexT.motion.middleWares.menu) + .add(NexT.motion.middleWares.postList) + .add(NexT.motion.middleWares.sidebar) + .bootstrap(); + } + NexT.utils.updateSidebarPosition(); +}; + +document.addEventListener('DOMContentLoaded', () => { + NexT.boot.registerEvents(); + NexT.boot.refresh(); + NexT.boot.motion(); +}); diff --git a/js/schemes/muse.js b/js/schemes/muse.js new file mode 100644 index 0000000000..f4be56df17 --- /dev/null +++ b/js/schemes/muse.js @@ -0,0 +1,113 @@ +/* global NexT, CONFIG, Velocity */ + +document.addEventListener('DOMContentLoaded', () => { + + var isRight = CONFIG.sidebar.position === 'right'; + var SIDEBAR_WIDTH = CONFIG.sidebar.width || 320; + var SIDEBAR_DISPLAY_DURATION = 200; + var mousePos = {}; + + var sidebarToggleLines = { + lines: document.querySelector('.sidebar-toggle'), + init : function() { + this.lines.classList.remove('toggle-arrow', 'toggle-close'); + }, + arrow: function() { + this.lines.classList.remove('toggle-close'); + this.lines.classList.add('toggle-arrow'); + }, + close: function() { + this.lines.classList.remove('toggle-arrow'); + this.lines.classList.add('toggle-close'); + } + }; + + var sidebarToggleMotion = { + sidebarEl : document.querySelector('.sidebar'), + isSidebarVisible: false, + init : function() { + sidebarToggleLines.init(); + + window.addEventListener('mousedown', this.mousedownHandler.bind(this)); + window.addEventListener('mouseup', this.mouseupHandler.bind(this)); + document.querySelector('#sidebar-dimmer').addEventListener('click', this.clickHandler.bind(this)); + document.querySelector('.sidebar-toggle').addEventListener('click', this.clickHandler.bind(this)); + document.querySelector('.sidebar-toggle').addEventListener('mouseenter', this.mouseEnterHandler.bind(this)); + document.querySelector('.sidebar-toggle').addEventListener('mouseleave', this.mouseLeaveHandler.bind(this)); + window.addEventListener('sidebar:show', this.showSidebar.bind(this)); + window.addEventListener('sidebar:hide', this.hideSidebar.bind(this)); + }, + mousedownHandler: function(event) { + mousePos.X = event.pageX; + mousePos.Y = event.pageY; + }, + mouseupHandler: function(event) { + var deltaX = event.pageX - mousePos.X; + var deltaY = event.pageY - mousePos.Y; + var clickingBlankPart = Math.sqrt((deltaX * deltaX) + (deltaY * deltaY)) < 20 && event.target.matches('.main'); + if (this.isSidebarVisible && (clickingBlankPart || event.target.matches('img.medium-zoom-image, .fancybox img'))) { + this.hideSidebar(); + } + }, + clickHandler: function() { + this.isSidebarVisible ? this.hideSidebar() : this.showSidebar(); + }, + mouseEnterHandler: function() { + if (!this.isSidebarVisible) { + sidebarToggleLines.arrow(); + } + }, + mouseLeaveHandler: function() { + if (!this.isSidebarVisible) { + sidebarToggleLines.init(); + } + }, + showSidebar: function() { + this.isSidebarVisible = true; + this.sidebarEl.classList.add('sidebar-active'); + if (typeof Velocity === 'function') { + Velocity(document.querySelectorAll('.sidebar .motion-element'), isRight ? 'transition.slideRightIn' : 'transition.slideLeftIn', { + stagger: 50, + drag : true + }); + } + + sidebarToggleLines.close(); + NexT.utils.isDesktop() && window.anime(Object.assign({ + targets : document.body, + duration: SIDEBAR_DISPLAY_DURATION, + easing : 'linear' + }, isRight ? { + 'padding-right': SIDEBAR_WIDTH + } : { + 'padding-left': SIDEBAR_WIDTH + })); + }, + hideSidebar: function() { + this.isSidebarVisible = false; + this.sidebarEl.classList.remove('sidebar-active'); + + sidebarToggleLines.init(); + NexT.utils.isDesktop() && window.anime(Object.assign({ + targets : document.body, + duration: SIDEBAR_DISPLAY_DURATION, + easing : 'linear' + }, isRight ? { + 'padding-right': 0 + } : { + 'padding-left': 0 + })); + } + }; + sidebarToggleMotion.init(); + + function updateFooterPosition() { + var footer = document.querySelector('.footer'); + var containerHeight = document.querySelector('.header').offsetHeight + document.querySelector('.main').offsetHeight + footer.offsetHeight; + footer.classList.toggle('footer-fixed', containerHeight <= window.innerHeight); + } + + updateFooterPosition(); + window.addEventListener('resize', updateFooterPosition); + window.addEventListener('scroll', updateFooterPosition); +}); diff --git a/js/schemes/pisces.js b/js/schemes/pisces.js new file mode 100644 index 0000000000..41633eacbf --- /dev/null +++ b/js/schemes/pisces.js @@ -0,0 +1,86 @@ +/* global NexT, CONFIG */ + +var Affix = { + init: function(element, options) { + this.element = element; + this.offset = options || 0; + this.affixed = null; + this.unpin = null; + this.pinnedOffset = null; + this.checkPosition(); + window.addEventListener('scroll', this.checkPosition.bind(this)); + window.addEventListener('click', this.checkPositionWithEventLoop.bind(this)); + window.matchMedia('(min-width: 992px)').addListener(event => { + if (event.matches) { + this.offset = NexT.utils.getAffixParam(); + this.checkPosition(); + } + }); + }, + getState: function(scrollHeight, height, offsetTop, offsetBottom) { + let scrollTop = window.scrollY; + let targetHeight = window.innerHeight; + if (offsetTop != null && this.affixed === 'top') { + if (document.querySelector('.content-wrap').offsetHeight < offsetTop) return 'top'; + return scrollTop < offsetTop ? 'top' : false; + } + if (this.affixed === 'bottom') { + if (offsetTop != null) return this.unpin <= this.element.getBoundingClientRect().top ? false : 'bottom'; + return scrollTop + targetHeight <= scrollHeight - offsetBottom ? false : 'bottom'; + } + let initializing = this.affixed === null; + let colliderTop = initializing ? scrollTop : this.element.getBoundingClientRect().top + scrollTop; + let colliderHeight = initializing ? targetHeight : height; + if (offsetTop != null && scrollTop <= offsetTop) return 'top'; + if (offsetBottom != null && (colliderTop + colliderHeight >= scrollHeight - offsetBottom)) return 'bottom'; + return false; + }, + getPinnedOffset: function() { + if (this.pinnedOffset) return this.pinnedOffset; + this.element.classList.remove('affix-top', 'affix-bottom'); + this.element.classList.add('affix'); + return (this.pinnedOffset = this.element.getBoundingClientRect().top); + }, + checkPositionWithEventLoop() { + setTimeout(this.checkPosition.bind(this), 1); + }, + checkPosition: function() { + if (window.getComputedStyle(this.element).display === 'none') return; + let height = this.element.offsetHeight; + let { offset } = this; + let offsetTop = offset.top; + let offsetBottom = offset.bottom; + let { scrollHeight } = document.body; + let affix = this.getState(scrollHeight, height, offsetTop, offsetBottom); + if (this.affixed !== affix) { + if (this.unpin != null) this.element.style.top = ''; + let affixType = 'affix' + (affix ? '-' + affix : ''); + this.affixed = affix; + this.unpin = affix === 'bottom' ? this.getPinnedOffset() : null; + this.element.classList.remove('affix', 'affix-top', 'affix-bottom'); + this.element.classList.add(affixType); + } + if (affix === 'bottom') { + this.element.style.top = scrollHeight - height - offsetBottom + 'px'; + } + } +}; + +NexT.utils.getAffixParam = function() { + const sidebarOffset = CONFIG.sidebar.offset || 12; + + let headerOffset = document.querySelector('.header-inner').offsetHeight; + let footerOffset = document.querySelector('.footer').offsetHeight; + + document.querySelector('.sidebar').style.marginTop = headerOffset + sidebarOffset + 'px'; + + return { + top : headerOffset, + bottom: footerOffset + }; +}; + +document.addEventListener('DOMContentLoaded', () => { + + Affix.init(document.querySelector('.sidebar-inner'), NexT.utils.getAffixParam()); +}); diff --git a/js/utils.js b/js/utils.js new file mode 100644 index 0000000000..74a6dfd558 --- /dev/null +++ b/js/utils.js @@ -0,0 +1,415 @@ +/* global NexT, CONFIG */ + +HTMLElement.prototype.wrap = function(wrapper) { + this.parentNode.insertBefore(wrapper, this); + this.parentNode.removeChild(this); + wrapper.appendChild(this); +}; + +NexT.utils = { + + /** + * Wrap images with fancybox. + */ + wrapImageWithFancyBox: function() { + document.querySelectorAll('.post-body :not(a) > img, .post-body > img').forEach(element => { + var $image = $(element); + var imageLink = $image.attr('data-src') || $image.attr('src'); + var $imageWrapLink = $image.wrap(``).parent('a'); + if ($image.is('.post-gallery img')) { + $imageWrapLink.attr('data-fancybox', 'gallery').attr('rel', 'gallery'); + } else if ($image.is('.group-picture img')) { + $imageWrapLink.attr('data-fancybox', 'group').attr('rel', 'group'); + } else { + $imageWrapLink.attr('data-fancybox', 'default').attr('rel', 'default'); + } + + var imageTitle = $image.attr('title') || $image.attr('alt'); + if (imageTitle) { + $imageWrapLink.append(`

    ${imageTitle}

    `); + // Make sure img title tag will show correctly in fancybox + $imageWrapLink.attr('title', imageTitle).attr('data-caption', imageTitle); + } + }); + + $.fancybox.defaults.hash = false; + $('.fancybox').fancybox({ + loop : true, + helpers: { + overlay: { + locked: false + } + } + }); + }, + + registerExtURL: function() { + document.querySelectorAll('span.exturl').forEach(element => { + let link = document.createElement('a'); + // https://stackoverflow.com/questions/30106476/using-javascripts-atob-to-decode-base64-doesnt-properly-decode-utf-8-strings + link.href = decodeURIComponent(atob(element.dataset.url).split('').map(c => { + return '%' + ('00' + c.charCodeAt(0).toString(16)).slice(-2); + }).join('')); + link.rel = 'noopener external nofollow noreferrer'; + link.target = '_blank'; + link.className = element.className; + link.title = element.title; + link.innerHTML = element.innerHTML; + element.parentNode.replaceChild(link, element); + }); + }, + + /** + * One-click copy code support. + */ + registerCopyCode: function() { + document.querySelectorAll('figure.highlight').forEach(element => { + const box = document.createElement('div'); + element.wrap(box); + box.classList.add('highlight-container'); + box.insertAdjacentHTML('beforeend', '
    '); + var button = element.parentNode.querySelector('.copy-btn'); + button.addEventListener('click', event => { + var target = event.currentTarget; + var code = [...target.parentNode.querySelectorAll('.code .line')].map(line => line.innerText).join('\n'); + var ta = document.createElement('textarea'); + ta.style.top = window.scrollY + 'px'; // Prevent page scrolling + ta.style.position = 'absolute'; + ta.style.opacity = '0'; + ta.readOnly = true; + ta.value = code; + document.body.append(ta); + const selection = document.getSelection(); + const selected = selection.rangeCount > 0 ? selection.getRangeAt(0) : false; + ta.select(); + ta.setSelectionRange(0, code.length); + ta.readOnly = false; + var result = document.execCommand('copy'); + if (CONFIG.copycode.show_result) { + target.querySelector('i').className = result ? 'fa fa-check fa-fw' : 'fa fa-times fa-fw'; + } + ta.blur(); // For iOS + target.blur(); + if (selected) { + selection.removeAllRanges(); + selection.addRange(selected); + } + document.body.removeChild(ta); + }); + button.addEventListener('mouseleave', event => { + setTimeout(() => { + event.target.querySelector('i').className = 'fa fa-clipboard fa-fw'; + }, 300); + }); + }); + }, + + wrapTableWithBox: function() { + document.querySelectorAll('table').forEach(element => { + const box = document.createElement('div'); + box.className = 'table-container'; + element.wrap(box); + }); + }, + + registerVideoIframe: function() { + document.querySelectorAll('iframe').forEach(element => { + const supported = [ + 'www.youtube.com', + 'player.vimeo.com', + 'player.youku.com', + 'player.bilibili.com', + 'www.tudou.com' + ].some(host => element.src.includes(host)); + if (supported && !element.parentNode.matches('.video-container')) { + const box = document.createElement('div'); + box.className = 'video-container'; + element.wrap(box); + let width = Number(element.width); + let height = Number(element.height); + if (width && height) { + element.parentNode.style.paddingTop = (height / width * 100) + '%'; + } + } + }); + }, + + registerScrollPercent: function() { + var THRESHOLD = 50; + var backToTop = document.querySelector('.back-to-top'); + var readingProgressBar = document.querySelector('.reading-progress-bar'); + // For init back to top in sidebar if page was scrolled after page refresh. + window.addEventListener('scroll', () => { + if (backToTop || readingProgressBar) { + var docHeight = document.querySelector('.container').offsetHeight; + var winHeight = window.innerHeight; + var contentVisibilityHeight = docHeight > winHeight ? docHeight - winHeight : document.body.scrollHeight - winHeight; + var scrollPercent = Math.min(100 * window.scrollY / contentVisibilityHeight, 100); + if (backToTop) { + backToTop.classList.toggle('back-to-top-on', window.scrollY > THRESHOLD); + backToTop.querySelector('span').innerText = Math.round(scrollPercent) + '%'; + } + if (readingProgressBar) { + readingProgressBar.style.width = scrollPercent.toFixed(2) + '%'; + } + } + }); + + backToTop && backToTop.addEventListener('click', () => { + window.anime({ + targets : document.scrollingElement, + duration : 500, + easing : 'linear', + scrollTop: 0 + }); + }); + }, + + /** + * Tabs tag listener (without twitter bootstrap). + */ + registerTabsTag: function() { + // Binding `nav-tabs` & `tab-content` by real time permalink changing. + document.querySelectorAll('.tabs ul.nav-tabs .tab').forEach(element => { + element.addEventListener('click', event => { + event.preventDefault(); + var target = event.currentTarget; + // Prevent selected tab to select again. + if (!target.classList.contains('active')) { + // Add & Remove active class on `nav-tabs` & `tab-content`. + [...target.parentNode.children].forEach(element => { + element.classList.remove('active'); + }); + target.classList.add('active'); + var tActive = document.getElementById(target.querySelector('a').getAttribute('href').replace('#', '')); + [...tActive.parentNode.children].forEach(element => { + element.classList.remove('active'); + }); + tActive.classList.add('active'); + // Trigger event + tActive.dispatchEvent(new Event('tabs:click', { + bubbles: true + })); + } + }); + }); + + window.dispatchEvent(new Event('tabs:register')); + }, + + registerCanIUseTag: function() { + // Get responsive height passed from iframe. + window.addEventListener('message', ({ data }) => { + if ((typeof data === 'string') && data.includes('ciu_embed')) { + var featureID = data.split(':')[1]; + var height = data.split(':')[2]; + document.querySelector(`iframe[data-feature=${featureID}]`).style.height = parseInt(height, 10) + 5 + 'px'; + } + }, false); + }, + + registerActiveMenuItem: function() { + document.querySelectorAll('.menu-item').forEach(element => { + var target = element.querySelector('a[href]'); + if (!target) return; + var isSamePath = target.pathname === location.pathname || target.pathname === location.pathname.replace('index.html', ''); + var isSubPath = !CONFIG.root.startsWith(target.pathname) && location.pathname.startsWith(target.pathname); + element.classList.toggle('menu-item-active', target.hostname === location.hostname && (isSamePath || isSubPath)); + }); + }, + + registerLangSelect: function() { + let selects = document.querySelectorAll('.lang-select'); + selects.forEach(sel => { + sel.value = CONFIG.page.lang; + sel.addEventListener('change', () => { + let target = sel.options[sel.selectedIndex]; + document.querySelectorAll('.lang-select-label span').forEach(span => span.innerText = target.text); + let url = target.dataset.href; + window.pjax ? window.pjax.loadUrl(url) : window.location.href = url; + }); + }); + }, + + registerSidebarTOC: function() { + const navItems = document.querySelectorAll('.post-toc li'); + const sections = [...navItems].map(element => { + var link = element.querySelector('a.nav-link'); + var target = document.getElementById(decodeURI(link.getAttribute('href')).replace('#', '')); + // TOC item animation navigate. + link.addEventListener('click', event => { + event.preventDefault(); + var offset = target.getBoundingClientRect().top + window.scrollY; + window.anime({ + targets : document.scrollingElement, + duration : 500, + easing : 'linear', + scrollTop: offset + 10 + }); + }); + return target; + }); + + var tocElement = document.querySelector('.post-toc-wrap'); + function activateNavByIndex(target) { + if (target.classList.contains('active-current')) return; + + document.querySelectorAll('.post-toc .active').forEach(element => { + element.classList.remove('active', 'active-current'); + }); + target.classList.add('active', 'active-current'); + var parent = target.parentNode; + while (!parent.matches('.post-toc')) { + if (parent.matches('li')) parent.classList.add('active'); + parent = parent.parentNode; + } + // Scrolling to center active TOC element if TOC content is taller then viewport. + window.anime({ + targets : tocElement, + duration : 200, + easing : 'linear', + scrollTop: tocElement.scrollTop - (tocElement.offsetHeight / 2) + target.getBoundingClientRect().top - tocElement.getBoundingClientRect().top + }); + } + + function findIndex(entries) { + let index = 0; + let entry = entries[index]; + if (entry.boundingClientRect.top > 0) { + index = sections.indexOf(entry.target); + return index === 0 ? 0 : index - 1; + } + for (; index < entries.length; index++) { + if (entries[index].boundingClientRect.top <= 0) { + entry = entries[index]; + } else { + return sections.indexOf(entry.target); + } + } + return sections.indexOf(entry.target); + } + + function createIntersectionObserver(marginTop) { + marginTop = Math.floor(marginTop + 10000); + let intersectionObserver = new IntersectionObserver((entries, observe) => { + let scrollHeight = document.documentElement.scrollHeight + 100; + if (scrollHeight > marginTop) { + observe.disconnect(); + createIntersectionObserver(scrollHeight); + return; + } + let index = findIndex(entries); + activateNavByIndex(navItems[index]); + }, { + rootMargin: marginTop + 'px 0px -100% 0px', + threshold : 0 + }); + sections.forEach(element => { + element && intersectionObserver.observe(element); + }); + } + createIntersectionObserver(document.documentElement.scrollHeight); + }, + + hasMobileUA: function() { + let ua = navigator.userAgent; + let pa = /iPad|iPhone|Android|Opera Mini|BlackBerry|webOS|UCWEB|Blazer|PSP|IEMobile|Symbian/g; + return pa.test(ua); + }, + + isTablet: function() { + return window.screen.width < 992 && window.screen.width > 767 && this.hasMobileUA(); + }, + + isMobile: function() { + return window.screen.width < 767 && this.hasMobileUA(); + }, + + isDesktop: function() { + return !this.isTablet() && !this.isMobile(); + }, + + supportsPDFs: function() { + let ua = navigator.userAgent; + let isFirefoxWithPDFJS = ua.includes('irefox') && parseInt(ua.split('rv:')[1].split('.')[0], 10) > 18; + let supportsPdfMimeType = typeof navigator.mimeTypes['application/pdf'] !== 'undefined'; + let isIOS = /iphone|ipad|ipod/i.test(ua.toLowerCase()); + return isFirefoxWithPDFJS || (supportsPdfMimeType && !isIOS); + }, + + /** + * Init Sidebar & TOC inner dimensions on all pages and for all schemes. + * Need for Sidebar/TOC inner scrolling if content taller then viewport. + */ + initSidebarDimension: function() { + var sidebarNav = document.querySelector('.sidebar-nav'); + var sidebarNavHeight = sidebarNav.style.display !== 'none' ? sidebarNav.offsetHeight : 0; + var sidebarOffset = CONFIG.sidebar.offset || 12; + var sidebarb2tHeight = CONFIG.back2top.enable && CONFIG.back2top.sidebar ? document.querySelector('.back-to-top').offsetHeight : 0; + var sidebarSchemePadding = (CONFIG.sidebar.padding * 2) + sidebarNavHeight + sidebarb2tHeight; + // Margin of sidebar b2t: -4px -10px -18px, brings a different of 22px. + if (CONFIG.scheme === 'Pisces' || CONFIG.scheme === 'Gemini') sidebarSchemePadding += (sidebarOffset * 2) - 22; + // Initialize Sidebar & TOC Height. + var sidebarWrapperHeight = document.body.offsetHeight - sidebarSchemePadding + 'px'; + document.querySelector('.site-overview-wrap').style.maxHeight = sidebarWrapperHeight; + document.querySelector('.post-toc-wrap').style.maxHeight = sidebarWrapperHeight; + }, + + updateSidebarPosition: function() { + var sidebarNav = document.querySelector('.sidebar-nav'); + var hasTOC = document.querySelector('.post-toc'); + if (hasTOC) { + sidebarNav.style.display = ''; + sidebarNav.classList.add('motion-element'); + document.querySelector('.sidebar-nav-toc').click(); + } else { + sidebarNav.style.display = 'none'; + sidebarNav.classList.remove('motion-element'); + document.querySelector('.sidebar-nav-overview').click(); + } + NexT.utils.initSidebarDimension(); + if (!this.isDesktop() || CONFIG.scheme === 'Pisces' || CONFIG.scheme === 'Gemini') return; + // Expand sidebar on post detail page by default, when post has a toc. + var display = CONFIG.page.sidebar; + if (typeof display !== 'boolean') { + // There's no definition sidebar in the page front-matter. + display = CONFIG.sidebar.display === 'always' || (CONFIG.sidebar.display === 'post' && hasTOC); + } + if (display) { + window.dispatchEvent(new Event('sidebar:show')); + } + }, + + getScript: function(url, callback, condition) { + if (condition) { + callback(); + } else { + var script = document.createElement('script'); + script.onload = script.onreadystatechange = function(_, isAbort) { + if (isAbort || !script.readyState || /loaded|complete/.test(script.readyState)) { + script.onload = script.onreadystatechange = null; + script = undefined; + if (!isAbort && callback) setTimeout(callback, 0); + } + }; + script.src = url; + document.head.appendChild(script); + } + }, + + loadComments: function(element, callback) { + if (!CONFIG.comments.lazyload || !element) { + callback(); + return; + } + let intersectionObserver = new IntersectionObserver((entries, observer) => { + let entry = entries[0]; + if (entry.isIntersecting) { + callback(); + observer.disconnect(); + } + }); + intersectionObserver.observe(element); + return intersectionObserver; + } +}; diff --git a/lib/anime.min.js b/lib/anime.min.js new file mode 100644 index 0000000000..99b263aaeb --- /dev/null +++ b/lib/anime.min.js @@ -0,0 +1,8 @@ +/* + * anime.js v3.1.0 + * (c) 2019 Julian Garnier + * Released under the MIT license + * animejs.com + */ + +!function(n,e){"object"==typeof exports&&"undefined"!=typeof module?module.exports=e():"function"==typeof define&&define.amd?define(e):n.anime=e()}(this,function(){"use strict";var n={update:null,begin:null,loopBegin:null,changeBegin:null,change:null,changeComplete:null,loopComplete:null,complete:null,loop:1,direction:"normal",autoplay:!0,timelineOffset:0},e={duration:1e3,delay:0,endDelay:0,easing:"easeOutElastic(1, .5)",round:0},r=["translateX","translateY","translateZ","rotate","rotateX","rotateY","rotateZ","scale","scaleX","scaleY","scaleZ","skew","skewX","skewY","perspective"],t={CSS:{},springs:{}};function a(n,e,r){return Math.min(Math.max(n,e),r)}function o(n,e){return n.indexOf(e)>-1}function u(n,e){return n.apply(null,e)}var i={arr:function(n){return Array.isArray(n)},obj:function(n){return o(Object.prototype.toString.call(n),"Object")},pth:function(n){return i.obj(n)&&n.hasOwnProperty("totalLength")},svg:function(n){return n instanceof SVGElement},inp:function(n){return n instanceof HTMLInputElement},dom:function(n){return n.nodeType||i.svg(n)},str:function(n){return"string"==typeof n},fnc:function(n){return"function"==typeof n},und:function(n){return void 0===n},hex:function(n){return/(^#[0-9A-F]{6}$)|(^#[0-9A-F]{3}$)/i.test(n)},rgb:function(n){return/^rgb/.test(n)},hsl:function(n){return/^hsl/.test(n)},col:function(n){return i.hex(n)||i.rgb(n)||i.hsl(n)},key:function(r){return!n.hasOwnProperty(r)&&!e.hasOwnProperty(r)&&"targets"!==r&&"keyframes"!==r}};function c(n){var e=/\(([^)]+)\)/.exec(n);return e?e[1].split(",").map(function(n){return parseFloat(n)}):[]}function s(n,e){var r=c(n),o=a(i.und(r[0])?1:r[0],.1,100),u=a(i.und(r[1])?100:r[1],.1,100),s=a(i.und(r[2])?10:r[2],.1,100),f=a(i.und(r[3])?0:r[3],.1,100),l=Math.sqrt(u/o),d=s/(2*Math.sqrt(u*o)),p=d<1?l*Math.sqrt(1-d*d):0,h=1,v=d<1?(d*l-f)/p:-f+l;function g(n){var r=e?e*n/1e3:n;return r=d<1?Math.exp(-r*d*l)*(h*Math.cos(p*r)+v*Math.sin(p*r)):(h+v*r)*Math.exp(-r*l),0===n||1===n?n:1-r}return e?g:function(){var e=t.springs[n];if(e)return e;for(var r=0,a=0;;)if(1===g(r+=1/6)){if(++a>=16)break}else a=0;var o=r*(1/6)*1e3;return t.springs[n]=o,o}}function f(n){return void 0===n&&(n=10),function(e){return Math.round(e*n)*(1/n)}}var l,d,p=function(){var n=11,e=1/(n-1);function r(n,e){return 1-3*e+3*n}function t(n,e){return 3*e-6*n}function a(n){return 3*n}function o(n,e,o){return((r(e,o)*n+t(e,o))*n+a(e))*n}function u(n,e,o){return 3*r(e,o)*n*n+2*t(e,o)*n+a(e)}return function(r,t,a,i){if(0<=r&&r<=1&&0<=a&&a<=1){var c=new Float32Array(n);if(r!==t||a!==i)for(var s=0;s=.001?function(n,e,r,t){for(var a=0;a<4;++a){var i=u(e,r,t);if(0===i)return e;e-=(o(e,r,t)-n)/i}return e}(t,l,r,a):0===d?l:function(n,e,r,t,a){for(var u,i,c=0;(u=o(i=e+(r-e)/2,t,a)-n)>0?r=i:e=i,Math.abs(u)>1e-7&&++c<10;);return i}(t,i,i+e,r,a)}}}(),h=(l={linear:function(){return function(n){return n}}},d={Sine:function(){return function(n){return 1-Math.cos(n*Math.PI/2)}},Circ:function(){return function(n){return 1-Math.sqrt(1-n*n)}},Back:function(){return function(n){return n*n*(3*n-2)}},Bounce:function(){return function(n){for(var e,r=4;n<((e=Math.pow(2,--r))-1)/11;);return 1/Math.pow(4,3-r)-7.5625*Math.pow((3*e-2)/22-n,2)}},Elastic:function(n,e){void 0===n&&(n=1),void 0===e&&(e=.5);var r=a(n,1,10),t=a(e,.1,2);return function(n){return 0===n||1===n?n:-r*Math.pow(2,10*(n-1))*Math.sin((n-1-t/(2*Math.PI)*Math.asin(1/r))*(2*Math.PI)/t)}}},["Quad","Cubic","Quart","Quint","Expo"].forEach(function(n,e){d[n]=function(){return function(n){return Math.pow(n,e+2)}}}),Object.keys(d).forEach(function(n){var e=d[n];l["easeIn"+n]=e,l["easeOut"+n]=function(n,r){return function(t){return 1-e(n,r)(1-t)}},l["easeInOut"+n]=function(n,r){return function(t){return t<.5?e(n,r)(2*t)/2:1-e(n,r)(-2*t+2)/2}}}),l);function v(n,e){if(i.fnc(n))return n;var r=n.split("(")[0],t=h[r],a=c(n);switch(r){case"spring":return s(n,e);case"cubicBezier":return u(p,a);case"steps":return u(f,a);default:return u(t,a)}}function g(n){try{return document.querySelectorAll(n)}catch(n){return}}function m(n,e){for(var r=n.length,t=arguments.length>=2?arguments[1]:void 0,a=[],o=0;o1&&(r-=1),r<1/6?n+6*(e-n)*r:r<.5?e:r<2/3?n+(e-n)*(2/3-r)*6:n}if(0==u)e=r=t=i;else{var f=i<.5?i*(1+u):i+u-i*u,l=2*i-f;e=s(l,f,o+1/3),r=s(l,f,o),t=s(l,f,o-1/3)}return"rgba("+255*e+","+255*r+","+255*t+","+c+")"}(n):void 0;var e,r,t,a}function C(n){var e=/[+-]?\d*\.?\d+(?:\.\d+)?(?:[eE][+-]?\d+)?(%|px|pt|em|rem|in|cm|mm|ex|ch|pc|vw|vh|vmin|vmax|deg|rad|turn)?$/.exec(n);if(e)return e[1]}function B(n,e){return i.fnc(n)?n(e.target,e.id,e.total):n}function P(n,e){return n.getAttribute(e)}function I(n,e,r){if(M([r,"deg","rad","turn"],C(e)))return e;var a=t.CSS[e+r];if(!i.und(a))return a;var o=document.createElement(n.tagName),u=n.parentNode&&n.parentNode!==document?n.parentNode:document.body;u.appendChild(o),o.style.position="absolute",o.style.width=100+r;var c=100/o.offsetWidth;u.removeChild(o);var s=c*parseFloat(e);return t.CSS[e+r]=s,s}function T(n,e,r){if(e in n.style){var t=e.replace(/([a-z])([A-Z])/g,"$1-$2").toLowerCase(),a=n.style[e]||getComputedStyle(n).getPropertyValue(t)||"0";return r?I(n,a,r):a}}function D(n,e){return i.dom(n)&&!i.inp(n)&&(P(n,e)||i.svg(n)&&n[e])?"attribute":i.dom(n)&&M(r,e)?"transform":i.dom(n)&&"transform"!==e&&T(n,e)?"css":null!=n[e]?"object":void 0}function E(n){if(i.dom(n)){for(var e,r=n.style.transform||"",t=/(\w+)\(([^)]*)\)/g,a=new Map;e=t.exec(r);)a.set(e[1],e[2]);return a}}function F(n,e,r,t){var a,u=o(e,"scale")?1:0+(o(a=e,"translate")||"perspective"===a?"px":o(a,"rotate")||o(a,"skew")?"deg":void 0),i=E(n).get(e)||u;return r&&(r.transforms.list.set(e,i),r.transforms.last=e),t?I(n,i,t):i}function N(n,e,r,t){switch(D(n,e)){case"transform":return F(n,e,t,r);case"css":return T(n,e,r);case"attribute":return P(n,e);default:return n[e]||0}}function A(n,e){var r=/^(\*=|\+=|-=)/.exec(n);if(!r)return n;var t=C(n)||0,a=parseFloat(e),o=parseFloat(n.replace(r[0],""));switch(r[0][0]){case"+":return a+o+t;case"-":return a-o+t;case"*":return a*o+t}}function L(n,e){if(i.col(n))return O(n);if(/\s/g.test(n))return n;var r=C(n),t=r?n.substr(0,n.length-r.length):n;return e?t+e:t}function j(n,e){return Math.sqrt(Math.pow(e.x-n.x,2)+Math.pow(e.y-n.y,2))}function S(n){for(var e,r=n.points,t=0,a=0;a0&&(t+=j(e,o)),e=o}return t}function q(n){if(n.getTotalLength)return n.getTotalLength();switch(n.tagName.toLowerCase()){case"circle":return o=n,2*Math.PI*P(o,"r");case"rect":return 2*P(a=n,"width")+2*P(a,"height");case"line":return j({x:P(t=n,"x1"),y:P(t,"y1")},{x:P(t,"x2"),y:P(t,"y2")});case"polyline":return S(n);case"polygon":return r=(e=n).points,S(e)+j(r.getItem(r.numberOfItems-1),r.getItem(0))}var e,r,t,a,o}function $(n,e){var r=e||{},t=r.el||function(n){for(var e=n.parentNode;i.svg(e)&&i.svg(e.parentNode);)e=e.parentNode;return e}(n),a=t.getBoundingClientRect(),o=P(t,"viewBox"),u=a.width,c=a.height,s=r.viewBox||(o?o.split(" "):[0,0,u,c]);return{el:t,viewBox:s,x:s[0]/1,y:s[1]/1,w:u/s[2],h:c/s[3]}}function X(n,e){function r(r){void 0===r&&(r=0);var t=e+r>=1?e+r:0;return n.el.getPointAtLength(t)}var t=$(n.el,n.svg),a=r(),o=r(-1),u=r(1);switch(n.property){case"x":return(a.x-t.x)*t.w;case"y":return(a.y-t.y)*t.h;case"angle":return 180*Math.atan2(u.y-o.y,u.x-o.x)/Math.PI}}function Y(n,e){var r=/[+-]?\d*\.?\d+(?:\.\d+)?(?:[eE][+-]?\d+)?/g,t=L(i.pth(n)?n.totalLength:n,e)+"";return{original:t,numbers:t.match(r)?t.match(r).map(Number):[0],strings:i.str(n)||e?t.split(r):[]}}function Z(n){return m(n?y(i.arr(n)?n.map(b):b(n)):[],function(n,e,r){return r.indexOf(n)===e})}function Q(n){var e=Z(n);return e.map(function(n,r){return{target:n,id:r,total:e.length,transforms:{list:E(n)}}})}function V(n,e){var r=x(e);if(/^spring/.test(r.easing)&&(r.duration=s(r.easing)),i.arr(n)){var t=n.length;2===t&&!i.obj(n[0])?n={value:n}:i.fnc(e.duration)||(r.duration=e.duration/t)}var a=i.arr(n)?n:[n];return a.map(function(n,r){var t=i.obj(n)&&!i.pth(n)?n:{value:n};return i.und(t.delay)&&(t.delay=r?0:e.delay),i.und(t.endDelay)&&(t.endDelay=r===a.length-1?e.endDelay:0),t}).map(function(n){return k(n,r)})}function z(n,e){var r=[],t=e.keyframes;for(var a in t&&(e=k(function(n){for(var e=m(y(n.map(function(n){return Object.keys(n)})),function(n){return i.key(n)}).reduce(function(n,e){return n.indexOf(e)<0&&n.push(e),n},[]),r={},t=function(t){var a=e[t];r[a]=n.map(function(n){var e={};for(var r in n)i.key(r)?r==a&&(e.value=n[r]):e[r]=n[r];return e})},a=0;a-1&&(_.splice(o,1),r=_.length)}else a.tick(e);t++}n()}else U=cancelAnimationFrame(U)}return n}();function rn(r){void 0===r&&(r={});var t,o=0,u=0,i=0,c=0,s=null;function f(n){var e=window.Promise&&new Promise(function(n){return s=n});return n.finished=e,e}var l,d,p,h,v,g,y,b,M=(d=w(n,l=r),p=w(e,l),h=z(p,l),v=Q(l.targets),g=W(v,h),y=J(g,p),b=K,K++,k(d,{id:b,children:[],animatables:v,animations:g,duration:y.duration,delay:y.delay,endDelay:y.endDelay}));f(M);function x(){var n=M.direction;"alternate"!==n&&(M.direction="normal"!==n?"normal":"reverse"),M.reversed=!M.reversed,t.forEach(function(n){return n.reversed=M.reversed})}function O(n){return M.reversed?M.duration-n:n}function C(){o=0,u=O(M.currentTime)*(1/rn.speed)}function B(n,e){e&&e.seek(n-e.timelineOffset)}function P(n){for(var e=0,r=M.animations,t=r.length;e2||(b=Math.round(b*p)/p)),h.push(b)}var k=d.length;if(k){g=d[0];for(var O=0;O0&&(M.began=!0,I("begin")),!M.loopBegan&&M.currentTime>0&&(M.loopBegan=!0,I("loopBegin")),d<=r&&0!==M.currentTime&&P(0),(d>=l&&M.currentTime!==e||!e)&&P(e),d>r&&d=e&&(u=0,M.remaining&&!0!==M.remaining&&M.remaining--,M.remaining?(o=i,I("loopComplete"),M.loopBegan=!1,"alternate"===M.direction&&x()):(M.paused=!0,M.completed||(M.completed=!0,I("loopComplete"),I("complete"),!M.passThrough&&"Promise"in window&&(s(),f(M)))))}return M.reset=function(){var n=M.direction;M.passThrough=!1,M.currentTime=0,M.progress=0,M.paused=!0,M.began=!1,M.loopBegan=!1,M.changeBegan=!1,M.completed=!1,M.changeCompleted=!1,M.reversePlayback=!1,M.reversed="reverse"===n,M.remaining=M.loop,t=M.children;for(var e=c=t.length;e--;)M.children[e].reset();(M.reversed&&!0!==M.loop||"alternate"===n&&1===M.loop)&&M.remaining++,P(M.reversed?M.duration:0)},M.set=function(n,e){return R(n,e),M},M.tick=function(n){i=n,o||(o=i),T((i+(u-o))*rn.speed)},M.seek=function(n){T(O(n))},M.pause=function(){M.paused=!0,C()},M.play=function(){M.paused&&(M.completed&&M.reset(),M.paused=!1,_.push(M),C(),U||en())},M.reverse=function(){x(),C()},M.restart=function(){M.reset(),M.play()},M.reset(),M.autoplay&&M.play(),M}function tn(n,e){for(var r=e.length;r--;)M(n,e[r].animatable.target)&&e.splice(r,1)}return"undefined"!=typeof document&&document.addEventListener("visibilitychange",function(){document.hidden?(_.forEach(function(n){return n.pause()}),nn=_.slice(0),rn.running=_=[]):nn.forEach(function(n){return n.play()})}),rn.version="3.1.0",rn.speed=1,rn.running=_,rn.remove=function(n){for(var e=Z(n),r=_.length;r--;){var t=_[r],a=t.animations,o=t.children;tn(e,a);for(var u=o.length;u--;){var i=o[u],c=i.animations;tn(e,c),c.length||i.children.length||o.splice(u,1)}a.length||o.length||t.pause()}},rn.get=N,rn.set=R,rn.convertPx=I,rn.path=function(n,e){var r=i.str(n)?g(n)[0]:n,t=e||100;return function(n){return{property:n,el:r,svg:$(r),totalLength:q(r)*(t/100)}}},rn.setDashoffset=function(n){var e=q(n);return n.setAttribute("stroke-dasharray",e),e},rn.stagger=function(n,e){void 0===e&&(e={});var r=e.direction||"normal",t=e.easing?v(e.easing):null,a=e.grid,o=e.axis,u=e.from||0,c="first"===u,s="center"===u,f="last"===u,l=i.arr(n),d=l?parseFloat(n[0]):parseFloat(n),p=l?parseFloat(n[1]):0,h=C(l?n[1]:n)||0,g=e.start||0+(l?d:0),m=[],y=0;return function(n,e,i){if(c&&(u=0),s&&(u=(i-1)/2),f&&(u=i-1),!m.length){for(var v=0;v-1&&_.splice(o,1);for(var s=0;sli{position:relative}.fa-li{left:-2em;position:absolute;text-align:center;width:2em;line-height:inherit}.fa-border{border:.08em solid #eee;border-radius:.1em;padding:.2em .25em .15em}.fa-pull-left{float:left}.fa-pull-right{float:right}.fa.fa-pull-left,.fab.fa-pull-left,.fal.fa-pull-left,.far.fa-pull-left,.fas.fa-pull-left{margin-right:.3em}.fa.fa-pull-right,.fab.fa-pull-right,.fal.fa-pull-right,.far.fa-pull-right,.fas.fa-pull-right{margin-left:.3em}.fa-spin{-webkit-animation:fa-spin 2s linear infinite;animation:fa-spin 2s linear infinite}.fa-pulse{-webkit-animation:fa-spin 1s steps(8) infinite;animation:fa-spin 1s steps(8) infinite}@-webkit-keyframes fa-spin{0%{-webkit-transform:rotate(0deg);transform:rotate(0deg)}to{-webkit-transform:rotate(1turn);transform:rotate(1turn)}}@keyframes fa-spin{0%{-webkit-transform:rotate(0deg);transform:rotate(0deg)}to{-webkit-transform:rotate(1turn);transform:rotate(1turn)}}.fa-rotate-90{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=1)";-webkit-transform:rotate(90deg);transform:rotate(90deg)}.fa-rotate-180{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=2)";-webkit-transform:rotate(180deg);transform:rotate(180deg)}.fa-rotate-270{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=3)";-webkit-transform:rotate(270deg);transform:rotate(270deg)}.fa-flip-horizontal{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=0, mirror=1)";-webkit-transform:scaleX(-1);transform:scaleX(-1)}.fa-flip-vertical{-webkit-transform:scaleY(-1);transform:scaleY(-1)}.fa-flip-both,.fa-flip-horizontal.fa-flip-vertical,.fa-flip-vertical{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=2, mirror=1)"}.fa-flip-both,.fa-flip-horizontal.fa-flip-vertical{-webkit-transform:scale(-1);transform:scale(-1)}:root .fa-flip-both,:root .fa-flip-horizontal,:root .fa-flip-vertical,:root .fa-rotate-90,:root .fa-rotate-180,:root .fa-rotate-270{-webkit-filter:none;filter:none}.fa-stack{display:inline-block;height:2em;line-height:2em;position:relative;vertical-align:middle;width:2.5em}.fa-stack-1x,.fa-stack-2x{left:0;position:absolute;text-align:center;width:100%}.fa-stack-1x{line-height:inherit}.fa-stack-2x{font-size:2em}.fa-inverse{color:#fff}.fa-500px:before{content:"\f26e"}.fa-accessible-icon:before{content:"\f368"}.fa-accusoft:before{content:"\f369"}.fa-acquisitions-incorporated:before{content:"\f6af"}.fa-ad:before{content:"\f641"}.fa-address-book:before{content:"\f2b9"}.fa-address-card:before{content:"\f2bb"}.fa-adjust:before{content:"\f042"}.fa-adn:before{content:"\f170"}.fa-adobe:before{content:"\f778"}.fa-adversal:before{content:"\f36a"}.fa-affiliatetheme:before{content:"\f36b"}.fa-air-freshener:before{content:"\f5d0"}.fa-airbnb:before{content:"\f834"}.fa-algolia:before{content:"\f36c"}.fa-align-center:before{content:"\f037"}.fa-align-justify:before{content:"\f039"}.fa-align-left:before{content:"\f036"}.fa-align-right:before{content:"\f038"}.fa-alipay:before{content:"\f642"}.fa-allergies:before{content:"\f461"}.fa-amazon:before{content:"\f270"}.fa-amazon-pay:before{content:"\f42c"}.fa-ambulance:before{content:"\f0f9"}.fa-american-sign-language-interpreting:before{content:"\f2a3"}.fa-amilia:before{content:"\f36d"}.fa-anchor:before{content:"\f13d"}.fa-android:before{content:"\f17b"}.fa-angellist:before{content:"\f209"}.fa-angle-double-down:before{content:"\f103"}.fa-angle-double-left:before{content:"\f100"}.fa-angle-double-right:before{content:"\f101"}.fa-angle-double-up:before{content:"\f102"}.fa-angle-down:before{content:"\f107"}.fa-angle-left:before{content:"\f104"}.fa-angle-right:before{content:"\f105"}.fa-angle-up:before{content:"\f106"}.fa-angry:before{content:"\f556"}.fa-angrycreative:before{content:"\f36e"}.fa-angular:before{content:"\f420"}.fa-ankh:before{content:"\f644"}.fa-app-store:before{content:"\f36f"}.fa-app-store-ios:before{content:"\f370"}.fa-apper:before{content:"\f371"}.fa-apple:before{content:"\f179"}.fa-apple-alt:before{content:"\f5d1"}.fa-apple-pay:before{content:"\f415"}.fa-archive:before{content:"\f187"}.fa-archway:before{content:"\f557"}.fa-arrow-alt-circle-down:before{content:"\f358"}.fa-arrow-alt-circle-left:before{content:"\f359"}.fa-arrow-alt-circle-right:before{content:"\f35a"}.fa-arrow-alt-circle-up:before{content:"\f35b"}.fa-arrow-circle-down:before{content:"\f0ab"}.fa-arrow-circle-left:before{content:"\f0a8"}.fa-arrow-circle-right:before{content:"\f0a9"}.fa-arrow-circle-up:before{content:"\f0aa"}.fa-arrow-down:before{content:"\f063"}.fa-arrow-left:before{content:"\f060"}.fa-arrow-right:before{content:"\f061"}.fa-arrow-up:before{content:"\f062"}.fa-arrows-alt:before{content:"\f0b2"}.fa-arrows-alt-h:before{content:"\f337"}.fa-arrows-alt-v:before{content:"\f338"}.fa-artstation:before{content:"\f77a"}.fa-assistive-listening-systems:before{content:"\f2a2"}.fa-asterisk:before{content:"\f069"}.fa-asymmetrik:before{content:"\f372"}.fa-at:before{content:"\f1fa"}.fa-atlas:before{content:"\f558"}.fa-atlassian:before{content:"\f77b"}.fa-atom:before{content:"\f5d2"}.fa-audible:before{content:"\f373"}.fa-audio-description:before{content:"\f29e"}.fa-autoprefixer:before{content:"\f41c"}.fa-avianex:before{content:"\f374"}.fa-aviato:before{content:"\f421"}.fa-award:before{content:"\f559"}.fa-aws:before{content:"\f375"}.fa-baby:before{content:"\f77c"}.fa-baby-carriage:before{content:"\f77d"}.fa-backspace:before{content:"\f55a"}.fa-backward:before{content:"\f04a"}.fa-bacon:before{content:"\f7e5"}.fa-bahai:before{content:"\f666"}.fa-balance-scale:before{content:"\f24e"}.fa-balance-scale-left:before{content:"\f515"}.fa-balance-scale-right:before{content:"\f516"}.fa-ban:before{content:"\f05e"}.fa-band-aid:before{content:"\f462"}.fa-bandcamp:before{content:"\f2d5"}.fa-barcode:before{content:"\f02a"}.fa-bars:before{content:"\f0c9"}.fa-baseball-ball:before{content:"\f433"}.fa-basketball-ball:before{content:"\f434"}.fa-bath:before{content:"\f2cd"}.fa-battery-empty:before{content:"\f244"}.fa-battery-full:before{content:"\f240"}.fa-battery-half:before{content:"\f242"}.fa-battery-quarter:before{content:"\f243"}.fa-battery-three-quarters:before{content:"\f241"}.fa-battle-net:before{content:"\f835"}.fa-bed:before{content:"\f236"}.fa-beer:before{content:"\f0fc"}.fa-behance:before{content:"\f1b4"}.fa-behance-square:before{content:"\f1b5"}.fa-bell:before{content:"\f0f3"}.fa-bell-slash:before{content:"\f1f6"}.fa-bezier-curve:before{content:"\f55b"}.fa-bible:before{content:"\f647"}.fa-bicycle:before{content:"\f206"}.fa-biking:before{content:"\f84a"}.fa-bimobject:before{content:"\f378"}.fa-binoculars:before{content:"\f1e5"}.fa-biohazard:before{content:"\f780"}.fa-birthday-cake:before{content:"\f1fd"}.fa-bitbucket:before{content:"\f171"}.fa-bitcoin:before{content:"\f379"}.fa-bity:before{content:"\f37a"}.fa-black-tie:before{content:"\f27e"}.fa-blackberry:before{content:"\f37b"}.fa-blender:before{content:"\f517"}.fa-blender-phone:before{content:"\f6b6"}.fa-blind:before{content:"\f29d"}.fa-blog:before{content:"\f781"}.fa-blogger:before{content:"\f37c"}.fa-blogger-b:before{content:"\f37d"}.fa-bluetooth:before{content:"\f293"}.fa-bluetooth-b:before{content:"\f294"}.fa-bold:before{content:"\f032"}.fa-bolt:before{content:"\f0e7"}.fa-bomb:before{content:"\f1e2"}.fa-bone:before{content:"\f5d7"}.fa-bong:before{content:"\f55c"}.fa-book:before{content:"\f02d"}.fa-book-dead:before{content:"\f6b7"}.fa-book-medical:before{content:"\f7e6"}.fa-book-open:before{content:"\f518"}.fa-book-reader:before{content:"\f5da"}.fa-bookmark:before{content:"\f02e"}.fa-bootstrap:before{content:"\f836"}.fa-border-all:before{content:"\f84c"}.fa-border-none:before{content:"\f850"}.fa-border-style:before{content:"\f853"}.fa-bowling-ball:before{content:"\f436"}.fa-box:before{content:"\f466"}.fa-box-open:before{content:"\f49e"}.fa-box-tissue:before{content:"\f95b"}.fa-boxes:before{content:"\f468"}.fa-braille:before{content:"\f2a1"}.fa-brain:before{content:"\f5dc"}.fa-bread-slice:before{content:"\f7ec"}.fa-briefcase:before{content:"\f0b1"}.fa-briefcase-medical:before{content:"\f469"}.fa-broadcast-tower:before{content:"\f519"}.fa-broom:before{content:"\f51a"}.fa-brush:before{content:"\f55d"}.fa-btc:before{content:"\f15a"}.fa-buffer:before{content:"\f837"}.fa-bug:before{content:"\f188"}.fa-building:before{content:"\f1ad"}.fa-bullhorn:before{content:"\f0a1"}.fa-bullseye:before{content:"\f140"}.fa-burn:before{content:"\f46a"}.fa-buromobelexperte:before{content:"\f37f"}.fa-bus:before{content:"\f207"}.fa-bus-alt:before{content:"\f55e"}.fa-business-time:before{content:"\f64a"}.fa-buy-n-large:before{content:"\f8a6"}.fa-buysellads:before{content:"\f20d"}.fa-calculator:before{content:"\f1ec"}.fa-calendar:before{content:"\f133"}.fa-calendar-alt:before{content:"\f073"}.fa-calendar-check:before{content:"\f274"}.fa-calendar-day:before{content:"\f783"}.fa-calendar-minus:before{content:"\f272"}.fa-calendar-plus:before{content:"\f271"}.fa-calendar-times:before{content:"\f273"}.fa-calendar-week:before{content:"\f784"}.fa-camera:before{content:"\f030"}.fa-camera-retro:before{content:"\f083"}.fa-campground:before{content:"\f6bb"}.fa-canadian-maple-leaf:before{content:"\f785"}.fa-candy-cane:before{content:"\f786"}.fa-cannabis:before{content:"\f55f"}.fa-capsules:before{content:"\f46b"}.fa-car:before{content:"\f1b9"}.fa-car-alt:before{content:"\f5de"}.fa-car-battery:before{content:"\f5df"}.fa-car-crash:before{content:"\f5e1"}.fa-car-side:before{content:"\f5e4"}.fa-caravan:before{content:"\f8ff"}.fa-caret-down:before{content:"\f0d7"}.fa-caret-left:before{content:"\f0d9"}.fa-caret-right:before{content:"\f0da"}.fa-caret-square-down:before{content:"\f150"}.fa-caret-square-left:before{content:"\f191"}.fa-caret-square-right:before{content:"\f152"}.fa-caret-square-up:before{content:"\f151"}.fa-caret-up:before{content:"\f0d8"}.fa-carrot:before{content:"\f787"}.fa-cart-arrow-down:before{content:"\f218"}.fa-cart-plus:before{content:"\f217"}.fa-cash-register:before{content:"\f788"}.fa-cat:before{content:"\f6be"}.fa-cc-amazon-pay:before{content:"\f42d"}.fa-cc-amex:before{content:"\f1f3"}.fa-cc-apple-pay:before{content:"\f416"}.fa-cc-diners-club:before{content:"\f24c"}.fa-cc-discover:before{content:"\f1f2"}.fa-cc-jcb:before{content:"\f24b"}.fa-cc-mastercard:before{content:"\f1f1"}.fa-cc-paypal:before{content:"\f1f4"}.fa-cc-stripe:before{content:"\f1f5"}.fa-cc-visa:before{content:"\f1f0"}.fa-centercode:before{content:"\f380"}.fa-centos:before{content:"\f789"}.fa-certificate:before{content:"\f0a3"}.fa-chair:before{content:"\f6c0"}.fa-chalkboard:before{content:"\f51b"}.fa-chalkboard-teacher:before{content:"\f51c"}.fa-charging-station:before{content:"\f5e7"}.fa-chart-area:before{content:"\f1fe"}.fa-chart-bar:before{content:"\f080"}.fa-chart-line:before{content:"\f201"}.fa-chart-pie:before{content:"\f200"}.fa-check:before{content:"\f00c"}.fa-check-circle:before{content:"\f058"}.fa-check-double:before{content:"\f560"}.fa-check-square:before{content:"\f14a"}.fa-cheese:before{content:"\f7ef"}.fa-chess:before{content:"\f439"}.fa-chess-bishop:before{content:"\f43a"}.fa-chess-board:before{content:"\f43c"}.fa-chess-king:before{content:"\f43f"}.fa-chess-knight:before{content:"\f441"}.fa-chess-pawn:before{content:"\f443"}.fa-chess-queen:before{content:"\f445"}.fa-chess-rook:before{content:"\f447"}.fa-chevron-circle-down:before{content:"\f13a"}.fa-chevron-circle-left:before{content:"\f137"}.fa-chevron-circle-right:before{content:"\f138"}.fa-chevron-circle-up:before{content:"\f139"}.fa-chevron-down:before{content:"\f078"}.fa-chevron-left:before{content:"\f053"}.fa-chevron-right:before{content:"\f054"}.fa-chevron-up:before{content:"\f077"}.fa-child:before{content:"\f1ae"}.fa-chrome:before{content:"\f268"}.fa-chromecast:before{content:"\f838"}.fa-church:before{content:"\f51d"}.fa-circle:before{content:"\f111"}.fa-circle-notch:before{content:"\f1ce"}.fa-city:before{content:"\f64f"}.fa-clinic-medical:before{content:"\f7f2"}.fa-clipboard:before{content:"\f328"}.fa-clipboard-check:before{content:"\f46c"}.fa-clipboard-list:before{content:"\f46d"}.fa-clock:before{content:"\f017"}.fa-clone:before{content:"\f24d"}.fa-closed-captioning:before{content:"\f20a"}.fa-cloud:before{content:"\f0c2"}.fa-cloud-download-alt:before{content:"\f381"}.fa-cloud-meatball:before{content:"\f73b"}.fa-cloud-moon:before{content:"\f6c3"}.fa-cloud-moon-rain:before{content:"\f73c"}.fa-cloud-rain:before{content:"\f73d"}.fa-cloud-showers-heavy:before{content:"\f740"}.fa-cloud-sun:before{content:"\f6c4"}.fa-cloud-sun-rain:before{content:"\f743"}.fa-cloud-upload-alt:before{content:"\f382"}.fa-cloudscale:before{content:"\f383"}.fa-cloudsmith:before{content:"\f384"}.fa-cloudversify:before{content:"\f385"}.fa-cocktail:before{content:"\f561"}.fa-code:before{content:"\f121"}.fa-code-branch:before{content:"\f126"}.fa-codepen:before{content:"\f1cb"}.fa-codiepie:before{content:"\f284"}.fa-coffee:before{content:"\f0f4"}.fa-cog:before{content:"\f013"}.fa-cogs:before{content:"\f085"}.fa-coins:before{content:"\f51e"}.fa-columns:before{content:"\f0db"}.fa-comment:before{content:"\f075"}.fa-comment-alt:before{content:"\f27a"}.fa-comment-dollar:before{content:"\f651"}.fa-comment-dots:before{content:"\f4ad"}.fa-comment-medical:before{content:"\f7f5"}.fa-comment-slash:before{content:"\f4b3"}.fa-comments:before{content:"\f086"}.fa-comments-dollar:before{content:"\f653"}.fa-compact-disc:before{content:"\f51f"}.fa-compass:before{content:"\f14e"}.fa-compress:before{content:"\f066"}.fa-compress-alt:before{content:"\f422"}.fa-compress-arrows-alt:before{content:"\f78c"}.fa-concierge-bell:before{content:"\f562"}.fa-confluence:before{content:"\f78d"}.fa-connectdevelop:before{content:"\f20e"}.fa-contao:before{content:"\f26d"}.fa-cookie:before{content:"\f563"}.fa-cookie-bite:before{content:"\f564"}.fa-copy:before{content:"\f0c5"}.fa-copyright:before{content:"\f1f9"}.fa-cotton-bureau:before{content:"\f89e"}.fa-couch:before{content:"\f4b8"}.fa-cpanel:before{content:"\f388"}.fa-creative-commons:before{content:"\f25e"}.fa-creative-commons-by:before{content:"\f4e7"}.fa-creative-commons-nc:before{content:"\f4e8"}.fa-creative-commons-nc-eu:before{content:"\f4e9"}.fa-creative-commons-nc-jp:before{content:"\f4ea"}.fa-creative-commons-nd:before{content:"\f4eb"}.fa-creative-commons-pd:before{content:"\f4ec"}.fa-creative-commons-pd-alt:before{content:"\f4ed"}.fa-creative-commons-remix:before{content:"\f4ee"}.fa-creative-commons-sa:before{content:"\f4ef"}.fa-creative-commons-sampling:before{content:"\f4f0"}.fa-creative-commons-sampling-plus:before{content:"\f4f1"}.fa-creative-commons-share:before{content:"\f4f2"}.fa-creative-commons-zero:before{content:"\f4f3"}.fa-credit-card:before{content:"\f09d"}.fa-critical-role:before{content:"\f6c9"}.fa-crop:before{content:"\f125"}.fa-crop-alt:before{content:"\f565"}.fa-cross:before{content:"\f654"}.fa-crosshairs:before{content:"\f05b"}.fa-crow:before{content:"\f520"}.fa-crown:before{content:"\f521"}.fa-crutch:before{content:"\f7f7"}.fa-css3:before{content:"\f13c"}.fa-css3-alt:before{content:"\f38b"}.fa-cube:before{content:"\f1b2"}.fa-cubes:before{content:"\f1b3"}.fa-cut:before{content:"\f0c4"}.fa-cuttlefish:before{content:"\f38c"}.fa-d-and-d:before{content:"\f38d"}.fa-d-and-d-beyond:before{content:"\f6ca"}.fa-dailymotion:before{content:"\f952"}.fa-dashcube:before{content:"\f210"}.fa-database:before{content:"\f1c0"}.fa-deaf:before{content:"\f2a4"}.fa-delicious:before{content:"\f1a5"}.fa-democrat:before{content:"\f747"}.fa-deploydog:before{content:"\f38e"}.fa-deskpro:before{content:"\f38f"}.fa-desktop:before{content:"\f108"}.fa-dev:before{content:"\f6cc"}.fa-deviantart:before{content:"\f1bd"}.fa-dharmachakra:before{content:"\f655"}.fa-dhl:before{content:"\f790"}.fa-diagnoses:before{content:"\f470"}.fa-diaspora:before{content:"\f791"}.fa-dice:before{content:"\f522"}.fa-dice-d20:before{content:"\f6cf"}.fa-dice-d6:before{content:"\f6d1"}.fa-dice-five:before{content:"\f523"}.fa-dice-four:before{content:"\f524"}.fa-dice-one:before{content:"\f525"}.fa-dice-six:before{content:"\f526"}.fa-dice-three:before{content:"\f527"}.fa-dice-two:before{content:"\f528"}.fa-digg:before{content:"\f1a6"}.fa-digital-ocean:before{content:"\f391"}.fa-digital-tachograph:before{content:"\f566"}.fa-directions:before{content:"\f5eb"}.fa-discord:before{content:"\f392"}.fa-discourse:before{content:"\f393"}.fa-disease:before{content:"\f7fa"}.fa-divide:before{content:"\f529"}.fa-dizzy:before{content:"\f567"}.fa-dna:before{content:"\f471"}.fa-dochub:before{content:"\f394"}.fa-docker:before{content:"\f395"}.fa-dog:before{content:"\f6d3"}.fa-dollar-sign:before{content:"\f155"}.fa-dolly:before{content:"\f472"}.fa-dolly-flatbed:before{content:"\f474"}.fa-donate:before{content:"\f4b9"}.fa-door-closed:before{content:"\f52a"}.fa-door-open:before{content:"\f52b"}.fa-dot-circle:before{content:"\f192"}.fa-dove:before{content:"\f4ba"}.fa-download:before{content:"\f019"}.fa-draft2digital:before{content:"\f396"}.fa-drafting-compass:before{content:"\f568"}.fa-dragon:before{content:"\f6d5"}.fa-draw-polygon:before{content:"\f5ee"}.fa-dribbble:before{content:"\f17d"}.fa-dribbble-square:before{content:"\f397"}.fa-dropbox:before{content:"\f16b"}.fa-drum:before{content:"\f569"}.fa-drum-steelpan:before{content:"\f56a"}.fa-drumstick-bite:before{content:"\f6d7"}.fa-drupal:before{content:"\f1a9"}.fa-dumbbell:before{content:"\f44b"}.fa-dumpster:before{content:"\f793"}.fa-dumpster-fire:before{content:"\f794"}.fa-dungeon:before{content:"\f6d9"}.fa-dyalog:before{content:"\f399"}.fa-earlybirds:before{content:"\f39a"}.fa-ebay:before{content:"\f4f4"}.fa-edge:before{content:"\f282"}.fa-edit:before{content:"\f044"}.fa-egg:before{content:"\f7fb"}.fa-eject:before{content:"\f052"}.fa-elementor:before{content:"\f430"}.fa-ellipsis-h:before{content:"\f141"}.fa-ellipsis-v:before{content:"\f142"}.fa-ello:before{content:"\f5f1"}.fa-ember:before{content:"\f423"}.fa-empire:before{content:"\f1d1"}.fa-envelope:before{content:"\f0e0"}.fa-envelope-open:before{content:"\f2b6"}.fa-envelope-open-text:before{content:"\f658"}.fa-envelope-square:before{content:"\f199"}.fa-envira:before{content:"\f299"}.fa-equals:before{content:"\f52c"}.fa-eraser:before{content:"\f12d"}.fa-erlang:before{content:"\f39d"}.fa-ethereum:before{content:"\f42e"}.fa-ethernet:before{content:"\f796"}.fa-etsy:before{content:"\f2d7"}.fa-euro-sign:before{content:"\f153"}.fa-evernote:before{content:"\f839"}.fa-exchange-alt:before{content:"\f362"}.fa-exclamation:before{content:"\f12a"}.fa-exclamation-circle:before{content:"\f06a"}.fa-exclamation-triangle:before{content:"\f071"}.fa-expand:before{content:"\f065"}.fa-expand-alt:before{content:"\f424"}.fa-expand-arrows-alt:before{content:"\f31e"}.fa-expeditedssl:before{content:"\f23e"}.fa-external-link-alt:before{content:"\f35d"}.fa-external-link-square-alt:before{content:"\f360"}.fa-eye:before{content:"\f06e"}.fa-eye-dropper:before{content:"\f1fb"}.fa-eye-slash:before{content:"\f070"}.fa-facebook:before{content:"\f09a"}.fa-facebook-f:before{content:"\f39e"}.fa-facebook-messenger:before{content:"\f39f"}.fa-facebook-square:before{content:"\f082"}.fa-fan:before{content:"\f863"}.fa-fantasy-flight-games:before{content:"\f6dc"}.fa-fast-backward:before{content:"\f049"}.fa-fast-forward:before{content:"\f050"}.fa-faucet:before{content:"\f905"}.fa-fax:before{content:"\f1ac"}.fa-feather:before{content:"\f52d"}.fa-feather-alt:before{content:"\f56b"}.fa-fedex:before{content:"\f797"}.fa-fedora:before{content:"\f798"}.fa-female:before{content:"\f182"}.fa-fighter-jet:before{content:"\f0fb"}.fa-figma:before{content:"\f799"}.fa-file:before{content:"\f15b"}.fa-file-alt:before{content:"\f15c"}.fa-file-archive:before{content:"\f1c6"}.fa-file-audio:before{content:"\f1c7"}.fa-file-code:before{content:"\f1c9"}.fa-file-contract:before{content:"\f56c"}.fa-file-csv:before{content:"\f6dd"}.fa-file-download:before{content:"\f56d"}.fa-file-excel:before{content:"\f1c3"}.fa-file-export:before{content:"\f56e"}.fa-file-image:before{content:"\f1c5"}.fa-file-import:before{content:"\f56f"}.fa-file-invoice:before{content:"\f570"}.fa-file-invoice-dollar:before{content:"\f571"}.fa-file-medical:before{content:"\f477"}.fa-file-medical-alt:before{content:"\f478"}.fa-file-pdf:before{content:"\f1c1"}.fa-file-powerpoint:before{content:"\f1c4"}.fa-file-prescription:before{content:"\f572"}.fa-file-signature:before{content:"\f573"}.fa-file-upload:before{content:"\f574"}.fa-file-video:before{content:"\f1c8"}.fa-file-word:before{content:"\f1c2"}.fa-fill:before{content:"\f575"}.fa-fill-drip:before{content:"\f576"}.fa-film:before{content:"\f008"}.fa-filter:before{content:"\f0b0"}.fa-fingerprint:before{content:"\f577"}.fa-fire:before{content:"\f06d"}.fa-fire-alt:before{content:"\f7e4"}.fa-fire-extinguisher:before{content:"\f134"}.fa-firefox:before{content:"\f269"}.fa-firefox-browser:before{content:"\f907"}.fa-first-aid:before{content:"\f479"}.fa-first-order:before{content:"\f2b0"}.fa-first-order-alt:before{content:"\f50a"}.fa-firstdraft:before{content:"\f3a1"}.fa-fish:before{content:"\f578"}.fa-fist-raised:before{content:"\f6de"}.fa-flag:before{content:"\f024"}.fa-flag-checkered:before{content:"\f11e"}.fa-flag-usa:before{content:"\f74d"}.fa-flask:before{content:"\f0c3"}.fa-flickr:before{content:"\f16e"}.fa-flipboard:before{content:"\f44d"}.fa-flushed:before{content:"\f579"}.fa-fly:before{content:"\f417"}.fa-folder:before{content:"\f07b"}.fa-folder-minus:before{content:"\f65d"}.fa-folder-open:before{content:"\f07c"}.fa-folder-plus:before{content:"\f65e"}.fa-font:before{content:"\f031"}.fa-font-awesome:before{content:"\f2b4"}.fa-font-awesome-alt:before{content:"\f35c"}.fa-font-awesome-flag:before{content:"\f425"}.fa-font-awesome-logo-full:before{content:"\f4e6"}.fa-fonticons:before{content:"\f280"}.fa-fonticons-fi:before{content:"\f3a2"}.fa-football-ball:before{content:"\f44e"}.fa-fort-awesome:before{content:"\f286"}.fa-fort-awesome-alt:before{content:"\f3a3"}.fa-forumbee:before{content:"\f211"}.fa-forward:before{content:"\f04e"}.fa-foursquare:before{content:"\f180"}.fa-free-code-camp:before{content:"\f2c5"}.fa-freebsd:before{content:"\f3a4"}.fa-frog:before{content:"\f52e"}.fa-frown:before{content:"\f119"}.fa-frown-open:before{content:"\f57a"}.fa-fulcrum:before{content:"\f50b"}.fa-funnel-dollar:before{content:"\f662"}.fa-futbol:before{content:"\f1e3"}.fa-galactic-republic:before{content:"\f50c"}.fa-galactic-senate:before{content:"\f50d"}.fa-gamepad:before{content:"\f11b"}.fa-gas-pump:before{content:"\f52f"}.fa-gavel:before{content:"\f0e3"}.fa-gem:before{content:"\f3a5"}.fa-genderless:before{content:"\f22d"}.fa-get-pocket:before{content:"\f265"}.fa-gg:before{content:"\f260"}.fa-gg-circle:before{content:"\f261"}.fa-ghost:before{content:"\f6e2"}.fa-gift:before{content:"\f06b"}.fa-gifts:before{content:"\f79c"}.fa-git:before{content:"\f1d3"}.fa-git-alt:before{content:"\f841"}.fa-git-square:before{content:"\f1d2"}.fa-github:before{content:"\f09b"}.fa-github-alt:before{content:"\f113"}.fa-github-square:before{content:"\f092"}.fa-gitkraken:before{content:"\f3a6"}.fa-gitlab:before{content:"\f296"}.fa-gitter:before{content:"\f426"}.fa-glass-cheers:before{content:"\f79f"}.fa-glass-martini:before{content:"\f000"}.fa-glass-martini-alt:before{content:"\f57b"}.fa-glass-whiskey:before{content:"\f7a0"}.fa-glasses:before{content:"\f530"}.fa-glide:before{content:"\f2a5"}.fa-glide-g:before{content:"\f2a6"}.fa-globe:before{content:"\f0ac"}.fa-globe-africa:before{content:"\f57c"}.fa-globe-americas:before{content:"\f57d"}.fa-globe-asia:before{content:"\f57e"}.fa-globe-europe:before{content:"\f7a2"}.fa-gofore:before{content:"\f3a7"}.fa-golf-ball:before{content:"\f450"}.fa-goodreads:before{content:"\f3a8"}.fa-goodreads-g:before{content:"\f3a9"}.fa-google:before{content:"\f1a0"}.fa-google-drive:before{content:"\f3aa"}.fa-google-play:before{content:"\f3ab"}.fa-google-plus:before{content:"\f2b3"}.fa-google-plus-g:before{content:"\f0d5"}.fa-google-plus-square:before{content:"\f0d4"}.fa-google-wallet:before{content:"\f1ee"}.fa-gopuram:before{content:"\f664"}.fa-graduation-cap:before{content:"\f19d"}.fa-gratipay:before{content:"\f184"}.fa-grav:before{content:"\f2d6"}.fa-greater-than:before{content:"\f531"}.fa-greater-than-equal:before{content:"\f532"}.fa-grimace:before{content:"\f57f"}.fa-grin:before{content:"\f580"}.fa-grin-alt:before{content:"\f581"}.fa-grin-beam:before{content:"\f582"}.fa-grin-beam-sweat:before{content:"\f583"}.fa-grin-hearts:before{content:"\f584"}.fa-grin-squint:before{content:"\f585"}.fa-grin-squint-tears:before{content:"\f586"}.fa-grin-stars:before{content:"\f587"}.fa-grin-tears:before{content:"\f588"}.fa-grin-tongue:before{content:"\f589"}.fa-grin-tongue-squint:before{content:"\f58a"}.fa-grin-tongue-wink:before{content:"\f58b"}.fa-grin-wink:before{content:"\f58c"}.fa-grip-horizontal:before{content:"\f58d"}.fa-grip-lines:before{content:"\f7a4"}.fa-grip-lines-vertical:before{content:"\f7a5"}.fa-grip-vertical:before{content:"\f58e"}.fa-gripfire:before{content:"\f3ac"}.fa-grunt:before{content:"\f3ad"}.fa-guitar:before{content:"\f7a6"}.fa-gulp:before{content:"\f3ae"}.fa-h-square:before{content:"\f0fd"}.fa-hacker-news:before{content:"\f1d4"}.fa-hacker-news-square:before{content:"\f3af"}.fa-hackerrank:before{content:"\f5f7"}.fa-hamburger:before{content:"\f805"}.fa-hammer:before{content:"\f6e3"}.fa-hamsa:before{content:"\f665"}.fa-hand-holding:before{content:"\f4bd"}.fa-hand-holding-heart:before{content:"\f4be"}.fa-hand-holding-medical:before{content:"\f95c"}.fa-hand-holding-usd:before{content:"\f4c0"}.fa-hand-holding-water:before{content:"\f4c1"}.fa-hand-lizard:before{content:"\f258"}.fa-hand-middle-finger:before{content:"\f806"}.fa-hand-paper:before{content:"\f256"}.fa-hand-peace:before{content:"\f25b"}.fa-hand-point-down:before{content:"\f0a7"}.fa-hand-point-left:before{content:"\f0a5"}.fa-hand-point-right:before{content:"\f0a4"}.fa-hand-point-up:before{content:"\f0a6"}.fa-hand-pointer:before{content:"\f25a"}.fa-hand-rock:before{content:"\f255"}.fa-hand-scissors:before{content:"\f257"}.fa-hand-sparkles:before{content:"\f95d"}.fa-hand-spock:before{content:"\f259"}.fa-hands:before{content:"\f4c2"}.fa-hands-helping:before{content:"\f4c4"}.fa-hands-wash:before{content:"\f95e"}.fa-handshake:before{content:"\f2b5"}.fa-handshake-alt-slash:before{content:"\f95f"}.fa-handshake-slash:before{content:"\f960"}.fa-hanukiah:before{content:"\f6e6"}.fa-hard-hat:before{content:"\f807"}.fa-hashtag:before{content:"\f292"}.fa-hat-cowboy:before{content:"\f8c0"}.fa-hat-cowboy-side:before{content:"\f8c1"}.fa-hat-wizard:before{content:"\f6e8"}.fa-hdd:before{content:"\f0a0"}.fa-head-side-cough:before{content:"\f961"}.fa-head-side-cough-slash:before{content:"\f962"}.fa-head-side-mask:before{content:"\f963"}.fa-head-side-virus:before{content:"\f964"}.fa-heading:before{content:"\f1dc"}.fa-headphones:before{content:"\f025"}.fa-headphones-alt:before{content:"\f58f"}.fa-headset:before{content:"\f590"}.fa-heart:before{content:"\f004"}.fa-heart-broken:before{content:"\f7a9"}.fa-heartbeat:before{content:"\f21e"}.fa-helicopter:before{content:"\f533"}.fa-highlighter:before{content:"\f591"}.fa-hiking:before{content:"\f6ec"}.fa-hippo:before{content:"\f6ed"}.fa-hips:before{content:"\f452"}.fa-hire-a-helper:before{content:"\f3b0"}.fa-history:before{content:"\f1da"}.fa-hockey-puck:before{content:"\f453"}.fa-holly-berry:before{content:"\f7aa"}.fa-home:before{content:"\f015"}.fa-hooli:before{content:"\f427"}.fa-hornbill:before{content:"\f592"}.fa-horse:before{content:"\f6f0"}.fa-horse-head:before{content:"\f7ab"}.fa-hospital:before{content:"\f0f8"}.fa-hospital-alt:before{content:"\f47d"}.fa-hospital-symbol:before{content:"\f47e"}.fa-hospital-user:before{content:"\f80d"}.fa-hot-tub:before{content:"\f593"}.fa-hotdog:before{content:"\f80f"}.fa-hotel:before{content:"\f594"}.fa-hotjar:before{content:"\f3b1"}.fa-hourglass:before{content:"\f254"}.fa-hourglass-end:before{content:"\f253"}.fa-hourglass-half:before{content:"\f252"}.fa-hourglass-start:before{content:"\f251"}.fa-house-damage:before{content:"\f6f1"}.fa-house-user:before{content:"\f965"}.fa-houzz:before{content:"\f27c"}.fa-hryvnia:before{content:"\f6f2"}.fa-html5:before{content:"\f13b"}.fa-hubspot:before{content:"\f3b2"}.fa-i-cursor:before{content:"\f246"}.fa-ice-cream:before{content:"\f810"}.fa-icicles:before{content:"\f7ad"}.fa-icons:before{content:"\f86d"}.fa-id-badge:before{content:"\f2c1"}.fa-id-card:before{content:"\f2c2"}.fa-id-card-alt:before{content:"\f47f"}.fa-ideal:before{content:"\f913"}.fa-igloo:before{content:"\f7ae"}.fa-image:before{content:"\f03e"}.fa-images:before{content:"\f302"}.fa-imdb:before{content:"\f2d8"}.fa-inbox:before{content:"\f01c"}.fa-indent:before{content:"\f03c"}.fa-industry:before{content:"\f275"}.fa-infinity:before{content:"\f534"}.fa-info:before{content:"\f129"}.fa-info-circle:before{content:"\f05a"}.fa-instagram:before{content:"\f16d"}.fa-instagram-square:before{content:"\f955"}.fa-intercom:before{content:"\f7af"}.fa-internet-explorer:before{content:"\f26b"}.fa-invision:before{content:"\f7b0"}.fa-ioxhost:before{content:"\f208"}.fa-italic:before{content:"\f033"}.fa-itch-io:before{content:"\f83a"}.fa-itunes:before{content:"\f3b4"}.fa-itunes-note:before{content:"\f3b5"}.fa-java:before{content:"\f4e4"}.fa-jedi:before{content:"\f669"}.fa-jedi-order:before{content:"\f50e"}.fa-jenkins:before{content:"\f3b6"}.fa-jira:before{content:"\f7b1"}.fa-joget:before{content:"\f3b7"}.fa-joint:before{content:"\f595"}.fa-joomla:before{content:"\f1aa"}.fa-journal-whills:before{content:"\f66a"}.fa-js:before{content:"\f3b8"}.fa-js-square:before{content:"\f3b9"}.fa-jsfiddle:before{content:"\f1cc"}.fa-kaaba:before{content:"\f66b"}.fa-kaggle:before{content:"\f5fa"}.fa-key:before{content:"\f084"}.fa-keybase:before{content:"\f4f5"}.fa-keyboard:before{content:"\f11c"}.fa-keycdn:before{content:"\f3ba"}.fa-khanda:before{content:"\f66d"}.fa-kickstarter:before{content:"\f3bb"}.fa-kickstarter-k:before{content:"\f3bc"}.fa-kiss:before{content:"\f596"}.fa-kiss-beam:before{content:"\f597"}.fa-kiss-wink-heart:before{content:"\f598"}.fa-kiwi-bird:before{content:"\f535"}.fa-korvue:before{content:"\f42f"}.fa-landmark:before{content:"\f66f"}.fa-language:before{content:"\f1ab"}.fa-laptop:before{content:"\f109"}.fa-laptop-code:before{content:"\f5fc"}.fa-laptop-house:before{content:"\f966"}.fa-laptop-medical:before{content:"\f812"}.fa-laravel:before{content:"\f3bd"}.fa-lastfm:before{content:"\f202"}.fa-lastfm-square:before{content:"\f203"}.fa-laugh:before{content:"\f599"}.fa-laugh-beam:before{content:"\f59a"}.fa-laugh-squint:before{content:"\f59b"}.fa-laugh-wink:before{content:"\f59c"}.fa-layer-group:before{content:"\f5fd"}.fa-leaf:before{content:"\f06c"}.fa-leanpub:before{content:"\f212"}.fa-lemon:before{content:"\f094"}.fa-less:before{content:"\f41d"}.fa-less-than:before{content:"\f536"}.fa-less-than-equal:before{content:"\f537"}.fa-level-down-alt:before{content:"\f3be"}.fa-level-up-alt:before{content:"\f3bf"}.fa-life-ring:before{content:"\f1cd"}.fa-lightbulb:before{content:"\f0eb"}.fa-line:before{content:"\f3c0"}.fa-link:before{content:"\f0c1"}.fa-linkedin:before{content:"\f08c"}.fa-linkedin-in:before{content:"\f0e1"}.fa-linode:before{content:"\f2b8"}.fa-linux:before{content:"\f17c"}.fa-lira-sign:before{content:"\f195"}.fa-list:before{content:"\f03a"}.fa-list-alt:before{content:"\f022"}.fa-list-ol:before{content:"\f0cb"}.fa-list-ul:before{content:"\f0ca"}.fa-location-arrow:before{content:"\f124"}.fa-lock:before{content:"\f023"}.fa-lock-open:before{content:"\f3c1"}.fa-long-arrow-alt-down:before{content:"\f309"}.fa-long-arrow-alt-left:before{content:"\f30a"}.fa-long-arrow-alt-right:before{content:"\f30b"}.fa-long-arrow-alt-up:before{content:"\f30c"}.fa-low-vision:before{content:"\f2a8"}.fa-luggage-cart:before{content:"\f59d"}.fa-lungs:before{content:"\f604"}.fa-lungs-virus:before{content:"\f967"}.fa-lyft:before{content:"\f3c3"}.fa-magento:before{content:"\f3c4"}.fa-magic:before{content:"\f0d0"}.fa-magnet:before{content:"\f076"}.fa-mail-bulk:before{content:"\f674"}.fa-mailchimp:before{content:"\f59e"}.fa-male:before{content:"\f183"}.fa-mandalorian:before{content:"\f50f"}.fa-map:before{content:"\f279"}.fa-map-marked:before{content:"\f59f"}.fa-map-marked-alt:before{content:"\f5a0"}.fa-map-marker:before{content:"\f041"}.fa-map-marker-alt:before{content:"\f3c5"}.fa-map-pin:before{content:"\f276"}.fa-map-signs:before{content:"\f277"}.fa-markdown:before{content:"\f60f"}.fa-marker:before{content:"\f5a1"}.fa-mars:before{content:"\f222"}.fa-mars-double:before{content:"\f227"}.fa-mars-stroke:before{content:"\f229"}.fa-mars-stroke-h:before{content:"\f22b"}.fa-mars-stroke-v:before{content:"\f22a"}.fa-mask:before{content:"\f6fa"}.fa-mastodon:before{content:"\f4f6"}.fa-maxcdn:before{content:"\f136"}.fa-mdb:before{content:"\f8ca"}.fa-medal:before{content:"\f5a2"}.fa-medapps:before{content:"\f3c6"}.fa-medium:before{content:"\f23a"}.fa-medium-m:before{content:"\f3c7"}.fa-medkit:before{content:"\f0fa"}.fa-medrt:before{content:"\f3c8"}.fa-meetup:before{content:"\f2e0"}.fa-megaport:before{content:"\f5a3"}.fa-meh:before{content:"\f11a"}.fa-meh-blank:before{content:"\f5a4"}.fa-meh-rolling-eyes:before{content:"\f5a5"}.fa-memory:before{content:"\f538"}.fa-mendeley:before{content:"\f7b3"}.fa-menorah:before{content:"\f676"}.fa-mercury:before{content:"\f223"}.fa-meteor:before{content:"\f753"}.fa-microblog:before{content:"\f91a"}.fa-microchip:before{content:"\f2db"}.fa-microphone:before{content:"\f130"}.fa-microphone-alt:before{content:"\f3c9"}.fa-microphone-alt-slash:before{content:"\f539"}.fa-microphone-slash:before{content:"\f131"}.fa-microscope:before{content:"\f610"}.fa-microsoft:before{content:"\f3ca"}.fa-minus:before{content:"\f068"}.fa-minus-circle:before{content:"\f056"}.fa-minus-square:before{content:"\f146"}.fa-mitten:before{content:"\f7b5"}.fa-mix:before{content:"\f3cb"}.fa-mixcloud:before{content:"\f289"}.fa-mixer:before{content:"\f956"}.fa-mizuni:before{content:"\f3cc"}.fa-mobile:before{content:"\f10b"}.fa-mobile-alt:before{content:"\f3cd"}.fa-modx:before{content:"\f285"}.fa-monero:before{content:"\f3d0"}.fa-money-bill:before{content:"\f0d6"}.fa-money-bill-alt:before{content:"\f3d1"}.fa-money-bill-wave:before{content:"\f53a"}.fa-money-bill-wave-alt:before{content:"\f53b"}.fa-money-check:before{content:"\f53c"}.fa-money-check-alt:before{content:"\f53d"}.fa-monument:before{content:"\f5a6"}.fa-moon:before{content:"\f186"}.fa-mortar-pestle:before{content:"\f5a7"}.fa-mosque:before{content:"\f678"}.fa-motorcycle:before{content:"\f21c"}.fa-mountain:before{content:"\f6fc"}.fa-mouse:before{content:"\f8cc"}.fa-mouse-pointer:before{content:"\f245"}.fa-mug-hot:before{content:"\f7b6"}.fa-music:before{content:"\f001"}.fa-napster:before{content:"\f3d2"}.fa-neos:before{content:"\f612"}.fa-network-wired:before{content:"\f6ff"}.fa-neuter:before{content:"\f22c"}.fa-newspaper:before{content:"\f1ea"}.fa-nimblr:before{content:"\f5a8"}.fa-node:before{content:"\f419"}.fa-node-js:before{content:"\f3d3"}.fa-not-equal:before{content:"\f53e"}.fa-notes-medical:before{content:"\f481"}.fa-npm:before{content:"\f3d4"}.fa-ns8:before{content:"\f3d5"}.fa-nutritionix:before{content:"\f3d6"}.fa-object-group:before{content:"\f247"}.fa-object-ungroup:before{content:"\f248"}.fa-odnoklassniki:before{content:"\f263"}.fa-odnoklassniki-square:before{content:"\f264"}.fa-oil-can:before{content:"\f613"}.fa-old-republic:before{content:"\f510"}.fa-om:before{content:"\f679"}.fa-opencart:before{content:"\f23d"}.fa-openid:before{content:"\f19b"}.fa-opera:before{content:"\f26a"}.fa-optin-monster:before{content:"\f23c"}.fa-orcid:before{content:"\f8d2"}.fa-osi:before{content:"\f41a"}.fa-otter:before{content:"\f700"}.fa-outdent:before{content:"\f03b"}.fa-page4:before{content:"\f3d7"}.fa-pagelines:before{content:"\f18c"}.fa-pager:before{content:"\f815"}.fa-paint-brush:before{content:"\f1fc"}.fa-paint-roller:before{content:"\f5aa"}.fa-palette:before{content:"\f53f"}.fa-palfed:before{content:"\f3d8"}.fa-pallet:before{content:"\f482"}.fa-paper-plane:before{content:"\f1d8"}.fa-paperclip:before{content:"\f0c6"}.fa-parachute-box:before{content:"\f4cd"}.fa-paragraph:before{content:"\f1dd"}.fa-parking:before{content:"\f540"}.fa-passport:before{content:"\f5ab"}.fa-pastafarianism:before{content:"\f67b"}.fa-paste:before{content:"\f0ea"}.fa-patreon:before{content:"\f3d9"}.fa-pause:before{content:"\f04c"}.fa-pause-circle:before{content:"\f28b"}.fa-paw:before{content:"\f1b0"}.fa-paypal:before{content:"\f1ed"}.fa-peace:before{content:"\f67c"}.fa-pen:before{content:"\f304"}.fa-pen-alt:before{content:"\f305"}.fa-pen-fancy:before{content:"\f5ac"}.fa-pen-nib:before{content:"\f5ad"}.fa-pen-square:before{content:"\f14b"}.fa-pencil-alt:before{content:"\f303"}.fa-pencil-ruler:before{content:"\f5ae"}.fa-penny-arcade:before{content:"\f704"}.fa-people-arrows:before{content:"\f968"}.fa-people-carry:before{content:"\f4ce"}.fa-pepper-hot:before{content:"\f816"}.fa-percent:before{content:"\f295"}.fa-percentage:before{content:"\f541"}.fa-periscope:before{content:"\f3da"}.fa-person-booth:before{content:"\f756"}.fa-phabricator:before{content:"\f3db"}.fa-phoenix-framework:before{content:"\f3dc"}.fa-phoenix-squadron:before{content:"\f511"}.fa-phone:before{content:"\f095"}.fa-phone-alt:before{content:"\f879"}.fa-phone-slash:before{content:"\f3dd"}.fa-phone-square:before{content:"\f098"}.fa-phone-square-alt:before{content:"\f87b"}.fa-phone-volume:before{content:"\f2a0"}.fa-photo-video:before{content:"\f87c"}.fa-php:before{content:"\f457"}.fa-pied-piper:before{content:"\f2ae"}.fa-pied-piper-alt:before{content:"\f1a8"}.fa-pied-piper-hat:before{content:"\f4e5"}.fa-pied-piper-pp:before{content:"\f1a7"}.fa-pied-piper-square:before{content:"\f91e"}.fa-piggy-bank:before{content:"\f4d3"}.fa-pills:before{content:"\f484"}.fa-pinterest:before{content:"\f0d2"}.fa-pinterest-p:before{content:"\f231"}.fa-pinterest-square:before{content:"\f0d3"}.fa-pizza-slice:before{content:"\f818"}.fa-place-of-worship:before{content:"\f67f"}.fa-plane:before{content:"\f072"}.fa-plane-arrival:before{content:"\f5af"}.fa-plane-departure:before{content:"\f5b0"}.fa-plane-slash:before{content:"\f969"}.fa-play:before{content:"\f04b"}.fa-play-circle:before{content:"\f144"}.fa-playstation:before{content:"\f3df"}.fa-plug:before{content:"\f1e6"}.fa-plus:before{content:"\f067"}.fa-plus-circle:before{content:"\f055"}.fa-plus-square:before{content:"\f0fe"}.fa-podcast:before{content:"\f2ce"}.fa-poll:before{content:"\f681"}.fa-poll-h:before{content:"\f682"}.fa-poo:before{content:"\f2fe"}.fa-poo-storm:before{content:"\f75a"}.fa-poop:before{content:"\f619"}.fa-portrait:before{content:"\f3e0"}.fa-pound-sign:before{content:"\f154"}.fa-power-off:before{content:"\f011"}.fa-pray:before{content:"\f683"}.fa-praying-hands:before{content:"\f684"}.fa-prescription:before{content:"\f5b1"}.fa-prescription-bottle:before{content:"\f485"}.fa-prescription-bottle-alt:before{content:"\f486"}.fa-print:before{content:"\f02f"}.fa-procedures:before{content:"\f487"}.fa-product-hunt:before{content:"\f288"}.fa-project-diagram:before{content:"\f542"}.fa-pump-medical:before{content:"\f96a"}.fa-pump-soap:before{content:"\f96b"}.fa-pushed:before{content:"\f3e1"}.fa-puzzle-piece:before{content:"\f12e"}.fa-python:before{content:"\f3e2"}.fa-qq:before{content:"\f1d6"}.fa-qrcode:before{content:"\f029"}.fa-question:before{content:"\f128"}.fa-question-circle:before{content:"\f059"}.fa-quidditch:before{content:"\f458"}.fa-quinscape:before{content:"\f459"}.fa-quora:before{content:"\f2c4"}.fa-quote-left:before{content:"\f10d"}.fa-quote-right:before{content:"\f10e"}.fa-quran:before{content:"\f687"}.fa-r-project:before{content:"\f4f7"}.fa-radiation:before{content:"\f7b9"}.fa-radiation-alt:before{content:"\f7ba"}.fa-rainbow:before{content:"\f75b"}.fa-random:before{content:"\f074"}.fa-raspberry-pi:before{content:"\f7bb"}.fa-ravelry:before{content:"\f2d9"}.fa-react:before{content:"\f41b"}.fa-reacteurope:before{content:"\f75d"}.fa-readme:before{content:"\f4d5"}.fa-rebel:before{content:"\f1d0"}.fa-receipt:before{content:"\f543"}.fa-record-vinyl:before{content:"\f8d9"}.fa-recycle:before{content:"\f1b8"}.fa-red-river:before{content:"\f3e3"}.fa-reddit:before{content:"\f1a1"}.fa-reddit-alien:before{content:"\f281"}.fa-reddit-square:before{content:"\f1a2"}.fa-redhat:before{content:"\f7bc"}.fa-redo:before{content:"\f01e"}.fa-redo-alt:before{content:"\f2f9"}.fa-registered:before{content:"\f25d"}.fa-remove-format:before{content:"\f87d"}.fa-renren:before{content:"\f18b"}.fa-reply:before{content:"\f3e5"}.fa-reply-all:before{content:"\f122"}.fa-replyd:before{content:"\f3e6"}.fa-republican:before{content:"\f75e"}.fa-researchgate:before{content:"\f4f8"}.fa-resolving:before{content:"\f3e7"}.fa-restroom:before{content:"\f7bd"}.fa-retweet:before{content:"\f079"}.fa-rev:before{content:"\f5b2"}.fa-ribbon:before{content:"\f4d6"}.fa-ring:before{content:"\f70b"}.fa-road:before{content:"\f018"}.fa-robot:before{content:"\f544"}.fa-rocket:before{content:"\f135"}.fa-rocketchat:before{content:"\f3e8"}.fa-rockrms:before{content:"\f3e9"}.fa-route:before{content:"\f4d7"}.fa-rss:before{content:"\f09e"}.fa-rss-square:before{content:"\f143"}.fa-ruble-sign:before{content:"\f158"}.fa-ruler:before{content:"\f545"}.fa-ruler-combined:before{content:"\f546"}.fa-ruler-horizontal:before{content:"\f547"}.fa-ruler-vertical:before{content:"\f548"}.fa-running:before{content:"\f70c"}.fa-rupee-sign:before{content:"\f156"}.fa-sad-cry:before{content:"\f5b3"}.fa-sad-tear:before{content:"\f5b4"}.fa-safari:before{content:"\f267"}.fa-salesforce:before{content:"\f83b"}.fa-sass:before{content:"\f41e"}.fa-satellite:before{content:"\f7bf"}.fa-satellite-dish:before{content:"\f7c0"}.fa-save:before{content:"\f0c7"}.fa-schlix:before{content:"\f3ea"}.fa-school:before{content:"\f549"}.fa-screwdriver:before{content:"\f54a"}.fa-scribd:before{content:"\f28a"}.fa-scroll:before{content:"\f70e"}.fa-sd-card:before{content:"\f7c2"}.fa-search:before{content:"\f002"}.fa-search-dollar:before{content:"\f688"}.fa-search-location:before{content:"\f689"}.fa-search-minus:before{content:"\f010"}.fa-search-plus:before{content:"\f00e"}.fa-searchengin:before{content:"\f3eb"}.fa-seedling:before{content:"\f4d8"}.fa-sellcast:before{content:"\f2da"}.fa-sellsy:before{content:"\f213"}.fa-server:before{content:"\f233"}.fa-servicestack:before{content:"\f3ec"}.fa-shapes:before{content:"\f61f"}.fa-share:before{content:"\f064"}.fa-share-alt:before{content:"\f1e0"}.fa-share-alt-square:before{content:"\f1e1"}.fa-share-square:before{content:"\f14d"}.fa-shekel-sign:before{content:"\f20b"}.fa-shield-alt:before{content:"\f3ed"}.fa-shield-virus:before{content:"\f96c"}.fa-ship:before{content:"\f21a"}.fa-shipping-fast:before{content:"\f48b"}.fa-shirtsinbulk:before{content:"\f214"}.fa-shoe-prints:before{content:"\f54b"}.fa-shopify:before{content:"\f957"}.fa-shopping-bag:before{content:"\f290"}.fa-shopping-basket:before{content:"\f291"}.fa-shopping-cart:before{content:"\f07a"}.fa-shopware:before{content:"\f5b5"}.fa-shower:before{content:"\f2cc"}.fa-shuttle-van:before{content:"\f5b6"}.fa-sign:before{content:"\f4d9"}.fa-sign-in-alt:before{content:"\f2f6"}.fa-sign-language:before{content:"\f2a7"}.fa-sign-out-alt:before{content:"\f2f5"}.fa-signal:before{content:"\f012"}.fa-signature:before{content:"\f5b7"}.fa-sim-card:before{content:"\f7c4"}.fa-simplybuilt:before{content:"\f215"}.fa-sistrix:before{content:"\f3ee"}.fa-sitemap:before{content:"\f0e8"}.fa-sith:before{content:"\f512"}.fa-skating:before{content:"\f7c5"}.fa-sketch:before{content:"\f7c6"}.fa-skiing:before{content:"\f7c9"}.fa-skiing-nordic:before{content:"\f7ca"}.fa-skull:before{content:"\f54c"}.fa-skull-crossbones:before{content:"\f714"}.fa-skyatlas:before{content:"\f216"}.fa-skype:before{content:"\f17e"}.fa-slack:before{content:"\f198"}.fa-slack-hash:before{content:"\f3ef"}.fa-slash:before{content:"\f715"}.fa-sleigh:before{content:"\f7cc"}.fa-sliders-h:before{content:"\f1de"}.fa-slideshare:before{content:"\f1e7"}.fa-smile:before{content:"\f118"}.fa-smile-beam:before{content:"\f5b8"}.fa-smile-wink:before{content:"\f4da"}.fa-smog:before{content:"\f75f"}.fa-smoking:before{content:"\f48d"}.fa-smoking-ban:before{content:"\f54d"}.fa-sms:before{content:"\f7cd"}.fa-snapchat:before{content:"\f2ab"}.fa-snapchat-ghost:before{content:"\f2ac"}.fa-snapchat-square:before{content:"\f2ad"}.fa-snowboarding:before{content:"\f7ce"}.fa-snowflake:before{content:"\f2dc"}.fa-snowman:before{content:"\f7d0"}.fa-snowplow:before{content:"\f7d2"}.fa-soap:before{content:"\f96e"}.fa-socks:before{content:"\f696"}.fa-solar-panel:before{content:"\f5ba"}.fa-sort:before{content:"\f0dc"}.fa-sort-alpha-down:before{content:"\f15d"}.fa-sort-alpha-down-alt:before{content:"\f881"}.fa-sort-alpha-up:before{content:"\f15e"}.fa-sort-alpha-up-alt:before{content:"\f882"}.fa-sort-amount-down:before{content:"\f160"}.fa-sort-amount-down-alt:before{content:"\f884"}.fa-sort-amount-up:before{content:"\f161"}.fa-sort-amount-up-alt:before{content:"\f885"}.fa-sort-down:before{content:"\f0dd"}.fa-sort-numeric-down:before{content:"\f162"}.fa-sort-numeric-down-alt:before{content:"\f886"}.fa-sort-numeric-up:before{content:"\f163"}.fa-sort-numeric-up-alt:before{content:"\f887"}.fa-sort-up:before{content:"\f0de"}.fa-soundcloud:before{content:"\f1be"}.fa-sourcetree:before{content:"\f7d3"}.fa-spa:before{content:"\f5bb"}.fa-space-shuttle:before{content:"\f197"}.fa-speakap:before{content:"\f3f3"}.fa-speaker-deck:before{content:"\f83c"}.fa-spell-check:before{content:"\f891"}.fa-spider:before{content:"\f717"}.fa-spinner:before{content:"\f110"}.fa-splotch:before{content:"\f5bc"}.fa-spotify:before{content:"\f1bc"}.fa-spray-can:before{content:"\f5bd"}.fa-square:before{content:"\f0c8"}.fa-square-full:before{content:"\f45c"}.fa-square-root-alt:before{content:"\f698"}.fa-squarespace:before{content:"\f5be"}.fa-stack-exchange:before{content:"\f18d"}.fa-stack-overflow:before{content:"\f16c"}.fa-stackpath:before{content:"\f842"}.fa-stamp:before{content:"\f5bf"}.fa-star:before{content:"\f005"}.fa-star-and-crescent:before{content:"\f699"}.fa-star-half:before{content:"\f089"}.fa-star-half-alt:before{content:"\f5c0"}.fa-star-of-david:before{content:"\f69a"}.fa-star-of-life:before{content:"\f621"}.fa-staylinked:before{content:"\f3f5"}.fa-steam:before{content:"\f1b6"}.fa-steam-square:before{content:"\f1b7"}.fa-steam-symbol:before{content:"\f3f6"}.fa-step-backward:before{content:"\f048"}.fa-step-forward:before{content:"\f051"}.fa-stethoscope:before{content:"\f0f1"}.fa-sticker-mule:before{content:"\f3f7"}.fa-sticky-note:before{content:"\f249"}.fa-stop:before{content:"\f04d"}.fa-stop-circle:before{content:"\f28d"}.fa-stopwatch:before{content:"\f2f2"}.fa-stopwatch-20:before{content:"\f96f"}.fa-store:before{content:"\f54e"}.fa-store-alt:before{content:"\f54f"}.fa-store-alt-slash:before{content:"\f970"}.fa-store-slash:before{content:"\f971"}.fa-strava:before{content:"\f428"}.fa-stream:before{content:"\f550"}.fa-street-view:before{content:"\f21d"}.fa-strikethrough:before{content:"\f0cc"}.fa-stripe:before{content:"\f429"}.fa-stripe-s:before{content:"\f42a"}.fa-stroopwafel:before{content:"\f551"}.fa-studiovinari:before{content:"\f3f8"}.fa-stumbleupon:before{content:"\f1a4"}.fa-stumbleupon-circle:before{content:"\f1a3"}.fa-subscript:before{content:"\f12c"}.fa-subway:before{content:"\f239"}.fa-suitcase:before{content:"\f0f2"}.fa-suitcase-rolling:before{content:"\f5c1"}.fa-sun:before{content:"\f185"}.fa-superpowers:before{content:"\f2dd"}.fa-superscript:before{content:"\f12b"}.fa-supple:before{content:"\f3f9"}.fa-surprise:before{content:"\f5c2"}.fa-suse:before{content:"\f7d6"}.fa-swatchbook:before{content:"\f5c3"}.fa-swift:before{content:"\f8e1"}.fa-swimmer:before{content:"\f5c4"}.fa-swimming-pool:before{content:"\f5c5"}.fa-symfony:before{content:"\f83d"}.fa-synagogue:before{content:"\f69b"}.fa-sync:before{content:"\f021"}.fa-sync-alt:before{content:"\f2f1"}.fa-syringe:before{content:"\f48e"}.fa-table:before{content:"\f0ce"}.fa-table-tennis:before{content:"\f45d"}.fa-tablet:before{content:"\f10a"}.fa-tablet-alt:before{content:"\f3fa"}.fa-tablets:before{content:"\f490"}.fa-tachometer-alt:before{content:"\f3fd"}.fa-tag:before{content:"\f02b"}.fa-tags:before{content:"\f02c"}.fa-tape:before{content:"\f4db"}.fa-tasks:before{content:"\f0ae"}.fa-taxi:before{content:"\f1ba"}.fa-teamspeak:before{content:"\f4f9"}.fa-teeth:before{content:"\f62e"}.fa-teeth-open:before{content:"\f62f"}.fa-telegram:before{content:"\f2c6"}.fa-telegram-plane:before{content:"\f3fe"}.fa-temperature-high:before{content:"\f769"}.fa-temperature-low:before{content:"\f76b"}.fa-tencent-weibo:before{content:"\f1d5"}.fa-tenge:before{content:"\f7d7"}.fa-terminal:before{content:"\f120"}.fa-text-height:before{content:"\f034"}.fa-text-width:before{content:"\f035"}.fa-th:before{content:"\f00a"}.fa-th-large:before{content:"\f009"}.fa-th-list:before{content:"\f00b"}.fa-the-red-yeti:before{content:"\f69d"}.fa-theater-masks:before{content:"\f630"}.fa-themeco:before{content:"\f5c6"}.fa-themeisle:before{content:"\f2b2"}.fa-thermometer:before{content:"\f491"}.fa-thermometer-empty:before{content:"\f2cb"}.fa-thermometer-full:before{content:"\f2c7"}.fa-thermometer-half:before{content:"\f2c9"}.fa-thermometer-quarter:before{content:"\f2ca"}.fa-thermometer-three-quarters:before{content:"\f2c8"}.fa-think-peaks:before{content:"\f731"}.fa-thumbs-down:before{content:"\f165"}.fa-thumbs-up:before{content:"\f164"}.fa-thumbtack:before{content:"\f08d"}.fa-ticket-alt:before{content:"\f3ff"}.fa-times:before{content:"\f00d"}.fa-times-circle:before{content:"\f057"}.fa-tint:before{content:"\f043"}.fa-tint-slash:before{content:"\f5c7"}.fa-tired:before{content:"\f5c8"}.fa-toggle-off:before{content:"\f204"}.fa-toggle-on:before{content:"\f205"}.fa-toilet:before{content:"\f7d8"}.fa-toilet-paper:before{content:"\f71e"}.fa-toilet-paper-slash:before{content:"\f972"}.fa-toolbox:before{content:"\f552"}.fa-tools:before{content:"\f7d9"}.fa-tooth:before{content:"\f5c9"}.fa-torah:before{content:"\f6a0"}.fa-torii-gate:before{content:"\f6a1"}.fa-tractor:before{content:"\f722"}.fa-trade-federation:before{content:"\f513"}.fa-trademark:before{content:"\f25c"}.fa-traffic-light:before{content:"\f637"}.fa-trailer:before{content:"\f941"}.fa-train:before{content:"\f238"}.fa-tram:before{content:"\f7da"}.fa-transgender:before{content:"\f224"}.fa-transgender-alt:before{content:"\f225"}.fa-trash:before{content:"\f1f8"}.fa-trash-alt:before{content:"\f2ed"}.fa-trash-restore:before{content:"\f829"}.fa-trash-restore-alt:before{content:"\f82a"}.fa-tree:before{content:"\f1bb"}.fa-trello:before{content:"\f181"}.fa-tripadvisor:before{content:"\f262"}.fa-trophy:before{content:"\f091"}.fa-truck:before{content:"\f0d1"}.fa-truck-loading:before{content:"\f4de"}.fa-truck-monster:before{content:"\f63b"}.fa-truck-moving:before{content:"\f4df"}.fa-truck-pickup:before{content:"\f63c"}.fa-tshirt:before{content:"\f553"}.fa-tty:before{content:"\f1e4"}.fa-tumblr:before{content:"\f173"}.fa-tumblr-square:before{content:"\f174"}.fa-tv:before{content:"\f26c"}.fa-twitch:before{content:"\f1e8"}.fa-twitter:before{content:"\f099"}.fa-twitter-square:before{content:"\f081"}.fa-typo3:before{content:"\f42b"}.fa-uber:before{content:"\f402"}.fa-ubuntu:before{content:"\f7df"}.fa-uikit:before{content:"\f403"}.fa-umbraco:before{content:"\f8e8"}.fa-umbrella:before{content:"\f0e9"}.fa-umbrella-beach:before{content:"\f5ca"}.fa-underline:before{content:"\f0cd"}.fa-undo:before{content:"\f0e2"}.fa-undo-alt:before{content:"\f2ea"}.fa-uniregistry:before{content:"\f404"}.fa-unity:before{content:"\f949"}.fa-universal-access:before{content:"\f29a"}.fa-university:before{content:"\f19c"}.fa-unlink:before{content:"\f127"}.fa-unlock:before{content:"\f09c"}.fa-unlock-alt:before{content:"\f13e"}.fa-untappd:before{content:"\f405"}.fa-upload:before{content:"\f093"}.fa-ups:before{content:"\f7e0"}.fa-usb:before{content:"\f287"}.fa-user:before{content:"\f007"}.fa-user-alt:before{content:"\f406"}.fa-user-alt-slash:before{content:"\f4fa"}.fa-user-astronaut:before{content:"\f4fb"}.fa-user-check:before{content:"\f4fc"}.fa-user-circle:before{content:"\f2bd"}.fa-user-clock:before{content:"\f4fd"}.fa-user-cog:before{content:"\f4fe"}.fa-user-edit:before{content:"\f4ff"}.fa-user-friends:before{content:"\f500"}.fa-user-graduate:before{content:"\f501"}.fa-user-injured:before{content:"\f728"}.fa-user-lock:before{content:"\f502"}.fa-user-md:before{content:"\f0f0"}.fa-user-minus:before{content:"\f503"}.fa-user-ninja:before{content:"\f504"}.fa-user-nurse:before{content:"\f82f"}.fa-user-plus:before{content:"\f234"}.fa-user-secret:before{content:"\f21b"}.fa-user-shield:before{content:"\f505"}.fa-user-slash:before{content:"\f506"}.fa-user-tag:before{content:"\f507"}.fa-user-tie:before{content:"\f508"}.fa-user-times:before{content:"\f235"}.fa-users:before{content:"\f0c0"}.fa-users-cog:before{content:"\f509"}.fa-usps:before{content:"\f7e1"}.fa-ussunnah:before{content:"\f407"}.fa-utensil-spoon:before{content:"\f2e5"}.fa-utensils:before{content:"\f2e7"}.fa-vaadin:before{content:"\f408"}.fa-vector-square:before{content:"\f5cb"}.fa-venus:before{content:"\f221"}.fa-venus-double:before{content:"\f226"}.fa-venus-mars:before{content:"\f228"}.fa-viacoin:before{content:"\f237"}.fa-viadeo:before{content:"\f2a9"}.fa-viadeo-square:before{content:"\f2aa"}.fa-vial:before{content:"\f492"}.fa-vials:before{content:"\f493"}.fa-viber:before{content:"\f409"}.fa-video:before{content:"\f03d"}.fa-video-slash:before{content:"\f4e2"}.fa-vihara:before{content:"\f6a7"}.fa-vimeo:before{content:"\f40a"}.fa-vimeo-square:before{content:"\f194"}.fa-vimeo-v:before{content:"\f27d"}.fa-vine:before{content:"\f1ca"}.fa-virus:before{content:"\f974"}.fa-virus-slash:before{content:"\f975"}.fa-viruses:before{content:"\f976"}.fa-vk:before{content:"\f189"}.fa-vnv:before{content:"\f40b"}.fa-voicemail:before{content:"\f897"}.fa-volleyball-ball:before{content:"\f45f"}.fa-volume-down:before{content:"\f027"}.fa-volume-mute:before{content:"\f6a9"}.fa-volume-off:before{content:"\f026"}.fa-volume-up:before{content:"\f028"}.fa-vote-yea:before{content:"\f772"}.fa-vr-cardboard:before{content:"\f729"}.fa-vuejs:before{content:"\f41f"}.fa-walking:before{content:"\f554"}.fa-wallet:before{content:"\f555"}.fa-warehouse:before{content:"\f494"}.fa-water:before{content:"\f773"}.fa-wave-square:before{content:"\f83e"}.fa-waze:before{content:"\f83f"}.fa-weebly:before{content:"\f5cc"}.fa-weibo:before{content:"\f18a"}.fa-weight:before{content:"\f496"}.fa-weight-hanging:before{content:"\f5cd"}.fa-weixin:before{content:"\f1d7"}.fa-whatsapp:before{content:"\f232"}.fa-whatsapp-square:before{content:"\f40c"}.fa-wheelchair:before{content:"\f193"}.fa-whmcs:before{content:"\f40d"}.fa-wifi:before{content:"\f1eb"}.fa-wikipedia-w:before{content:"\f266"}.fa-wind:before{content:"\f72e"}.fa-window-close:before{content:"\f410"}.fa-window-maximize:before{content:"\f2d0"}.fa-window-minimize:before{content:"\f2d1"}.fa-window-restore:before{content:"\f2d2"}.fa-windows:before{content:"\f17a"}.fa-wine-bottle:before{content:"\f72f"}.fa-wine-glass:before{content:"\f4e3"}.fa-wine-glass-alt:before{content:"\f5ce"}.fa-wix:before{content:"\f5cf"}.fa-wizards-of-the-coast:before{content:"\f730"}.fa-wolf-pack-battalion:before{content:"\f514"}.fa-won-sign:before{content:"\f159"}.fa-wordpress:before{content:"\f19a"}.fa-wordpress-simple:before{content:"\f411"}.fa-wpbeginner:before{content:"\f297"}.fa-wpexplorer:before{content:"\f2de"}.fa-wpforms:before{content:"\f298"}.fa-wpressr:before{content:"\f3e4"}.fa-wrench:before{content:"\f0ad"}.fa-x-ray:before{content:"\f497"}.fa-xbox:before{content:"\f412"}.fa-xing:before{content:"\f168"}.fa-xing-square:before{content:"\f169"}.fa-y-combinator:before{content:"\f23b"}.fa-yahoo:before{content:"\f19e"}.fa-yammer:before{content:"\f840"}.fa-yandex:before{content:"\f413"}.fa-yandex-international:before{content:"\f414"}.fa-yarn:before{content:"\f7e3"}.fa-yelp:before{content:"\f1e9"}.fa-yen-sign:before{content:"\f157"}.fa-yin-yang:before{content:"\f6ad"}.fa-yoast:before{content:"\f2b1"}.fa-youtube:before{content:"\f167"}.fa-youtube-square:before{content:"\f431"}.fa-zhihu:before{content:"\f63f"}.sr-only{border:0;clip:rect(0,0,0,0);height:1px;margin:-1px;overflow:hidden;padding:0;position:absolute;width:1px}.sr-only-focusable:active,.sr-only-focusable:focus{clip:auto;height:auto;margin:0;overflow:visible;position:static;width:auto}@font-face{font-family:"Font Awesome 5 Brands";font-style:normal;font-weight:400;font-display:block;src:url(../webfonts/fa-brands-400.eot);src:url(../webfonts/fa-brands-400.eot?#iefix) format("embedded-opentype"),url(../webfonts/fa-brands-400.woff2) format("woff2"),url(../webfonts/fa-brands-400.woff) format("woff"),url(../webfonts/fa-brands-400.ttf) format("truetype"),url(../webfonts/fa-brands-400.svg#fontawesome) format("svg")}.fab{font-family:"Font Awesome 5 Brands"}@font-face{font-family:"Font Awesome 5 Free";font-style:normal;font-weight:400;font-display:block;src:url(../webfonts/fa-regular-400.eot);src:url(../webfonts/fa-regular-400.eot?#iefix) format("embedded-opentype"),url(../webfonts/fa-regular-400.woff2) format("woff2"),url(../webfonts/fa-regular-400.woff) format("woff"),url(../webfonts/fa-regular-400.ttf) format("truetype"),url(../webfonts/fa-regular-400.svg#fontawesome) format("svg")}.fab,.far{font-weight:400}@font-face{font-family:"Font Awesome 5 Free";font-style:normal;font-weight:900;font-display:block;src:url(../webfonts/fa-solid-900.eot);src:url(../webfonts/fa-solid-900.eot?#iefix) format("embedded-opentype"),url(../webfonts/fa-solid-900.woff2) format("woff2"),url(../webfonts/fa-solid-900.woff) format("woff"),url(../webfonts/fa-solid-900.ttf) format("truetype"),url(../webfonts/fa-solid-900.svg#fontawesome) format("svg")}.fa,.far,.fas{font-family:"Font Awesome 5 Free"}.fa,.fas{font-weight:900} \ No newline at end of file diff --git a/lib/font-awesome/webfonts/fa-brands-400.woff2 b/lib/font-awesome/webfonts/fa-brands-400.woff2 new file mode 100644 index 0000000000000000000000000000000000000000..141a90a9e0a4b5a794557efa098a5c52320fb226 GIT binary patch literal 76612 zcmV(_K-9l?Pew8T0RR910V_lR4FCWD0tR>h0V?SMO9Bi400000000000000000000 z0000#Mn+Uk92y=5U;vA15eN#0vs{Jdd;vBBBm<3f3x<3E1Rw>A1qZ1-Teh2VMdmgQ zUYvB30mQLfUUz2%GaC`R9ppkUVokv~W(8~lL&;I}ae@B8Zp0PEv=-d6s7a_@v ztm?Y%B2pP4tcak5AOvsYsTxc{_p{C2F4o)Sa#l^s1zXEXrKhqgPs?kmY+K4JSbZ7t zyp4K3qZXtQ+>8Jtz-W4*4X7b`!)Ci5UEac31QT9h2!s#-D=9#CEXN$y!u9wGHts?# zQ?J6xc4f9P=w9C9)(GBa!|}WCiEf@s($gQ<+u`Ob(GmjzfWZJ5kOVVIihUj5g*82< z)P#L}hF{&;cYWJq*eRnw;y^i|;-9~t(dGK{yZ`&OEuN2Oo}%p8 zyDg_NCgyM);Qz}OY#G2 znB5RM1Up8J`oawfHA15Ma@4FpxXq9=Lqvz_5dUA|1K_Zq+RxY@XOf#CP=Y{#F_kn< ze2L@T^|6Pmbd|1pPj=jLUN(yI-(?h5}sLT^@mF zUkB)Kvvg*E(-r}e0)(e|w+stkboh2_l{8`jD~Te0*iy!iDPs3^n=}dXaE1KX}xrnuI+9I(11w>j3IJ7fRdgwUdr@rc?k}e z_SL<|K?K1P{@?QW1KR!nU#qN_aqmw@byqEeQ1M6&3u^iJ2vgVIYd|Nm>LeO0`xisw^-2_NGijC zpujNTw##C?QA|08$nJ zN+XbxM*t{W0Aw$vNwmGen`g2w`*Icl$redzMkJkRPgsXdxw;Um5OUXD(bh+ho?0p4 zfk>r!fu_KfCIuQTK9nWP1cx>r2n_Pwi@t;HZ&Pgpq-oMheY5W~%Pgn;@yRR=k4&(s zSZhVZhJU$q2HP}(&X3g)U(sNJ60ynSbAxQ_-vRaoFiOe6CX6yb8zhap_kJwlW-q{t zF=d#PwZJy^YyBj-OHN=Brvm+)e$P6f0)|Tv6j^o}0ssU6c(Y9QCjju)id|JLK4hpR z3IK5cAV6Te2MDBV1VHd>JQWlGj>km+&{p2=zrqSz01zmK=RL|VYFPl?AKagU5B#6q z@fk347>C30004xwkB{{|$9ee#;P1ZL6GWZ>fP)dhgczUy(AN6eU|ZU5w#TB&SHVhC ziKeMqb(*bKb!eW3wM0v`LYuT-M>VC}`b+QhH5TUZngS}pZf@BRMZ zkxIy*o@T6c(!&B4Gs+li*u@bhImKC~xWYXi@{BjUN56?Cn>@^zU5ZkYUKy9hOwQE2 zna}g@x~joiQLAf1?W(bg$=m zz$4!2-9GFyzV2Io?587SsK<)2Zk!+2#^dpFqGvUE({@Hp<4oStpw-1j+OmVbl%#Vifm=TLCG-Ob(PBSzr zR;WM7g*n>^?xn8S@I?F1`qReHu z+D-R|!G|+*rt~>XIb~3w=c@G?^^m$rU8mlqwo)6YwbWFqma2~EFU{qsE}{W{u}w&@ zRqyo6Uw+Ca{6C_13-1L0;Pn93Sbzhk0Ni~g9ma%|uP9&x_QMT>%Xl?bG#Y26PWMY0 z-89f$(e!qvg1w@A(irB{BfY@*;ah{qWay9`4=6a`2}-uh_q zV!6f>g@R_$$_vq5vLJOPc7`PBk`c_Plboy{$uW4(INYNvR)9seVJJ$zjuCL8SDSR5 zDwD;ir3Ss!xIGy%z}g&#;#5R=Hn&ZqZjLQ+ifS0TFn-vY3K=MN7Hk3foMU0(7i$<+ zp}IJEL+ND~9H2yDo~O6FdymLJ;MUt>c&UbE3; z{$xagVYJ86Al=jZF}1gZXaX%eR3^FCSZ4B4&I$<4YHCUI7+)}EW!p|Sh@&>TPtNW| zqn9T%jI{=rmayg}7=nuIGp`7rt(LeCCp~JNUiOY%GBa#W0Rp9)&mGPrWEYvgJmwBsA%7kl!YxH!^OKs ziG4=tXq8r^9a*E5wlGTjB;LREi>{fAWFg*Qb$t-6^CUC_-hN1^$DN`*@gIVB-A&{W zqTlCSR%~Np4!|0NumBD+s~Q+DMbi}AA-=+d1xg?WoMiEWeKCJdp)4VMwK}0>Dv^Ts zf0|Sq2j487X=uZFH);J5SvskTGewHS(cn@?7TMd(Zv(#G6~(Yrs$o{jE>UV01d_Zv z8Aa0xYMy?oN1bitQiG4aD%nT4(G#X?^Ytu*=R;W}=4F8e%lSvhFwYTaHtombJ3U$u z0~PR{w*;b;CHZsO#glxruOiN-K%@cZU9@iXxIZs)8uF~$$mMRwEi!yAGp@EJT`nA{ z2VJA4t^tdiHk;6J9S7fCgh?)qWPLdv-)ltbjGI-HxFS!T!$^l*EH_8%xpQD{AUqHd z+S3aXxtO=>(C>6TyYS5A+}}i*OBU(%sV17~1i2ZaD#?gVFGLp=3Ts_75Oj$Hex3Uq z;YcROGxi3jRl)q_Yx1W5YM6)0EN3qjM5L@$XO52S#-=}%rMteu#*XMy`m;33Nbs_dZUKdS^3t!<^zlVp-p|6OJR zCW0}P622gf0i7TA{yPB6fp6#F_Uchc@tkbWot);&Qd|sA!r+XrC#^&{95eb*8ZVgh zw9K9LclUuxv+K)4S&!BElY0s10F#OWl9R*|(dp6U<64jY$yOfLC}XWMgr@$k`6s=W zFHDE>v$eykfH9?~Wo?T<+csS|*FG|p->O=<{m@016C$ye8)#LsBJl2P#XxTJQBy9) z*sx`C)wE@wSe`5lM_%;Bc0A>l8jeI5YL3RUW7%05b<+mJQKFyM-_$>aotyKjH_6~8HeWR85~OZRf~!CPf!1EV`7 zPg8{2|4|#q&Bf5}bhn8)!R#;l_U$DKQ-mUf#kOxt5DUlM!{)-iI-U(^%R38Uo1C#^ z?d`}{uDj1J;D7$iOjWuv`CYsI=EU}AF>b1-9$ZP*pA##~wAEKT*4>lT&ZJ4n#zCsR z7hgJB9Efz-_LXhjWp7U=7f+6>Nm$NidGmE`x&Y0V@qRgUXjDCCbja|J&FG(WIKTBu zY3o$19wY)9KoFy(!fs2%L_+$x$X>WcmQ5#UG~Cxh=z|;JM z^^C~uhbd32sEO+E70v{t(waqIhd3Fg)yuf!kEahl2=@oLQH;{JZK_Pb?0tW_yp)Lj z+3lJaC+B%OENMN(50C7OsR}6%XDuBu(Tx}oqC@S)ax6mYeE`8fewp~p=;t8YG1$?H zQ6-qvq)5{+0ZU(17#yE5`SmjZTA?eF?Z{2vgALbLm*%sDan{vA-22dE6#%L^5i{&+ zA6UArenMnxs}h6JtwBc?LWcv+AwJm^%D_Oq-Pztr&NC7$`~7&4%%X=)w^SUp=tc|Q z#^~xwGPgnw{AH}t0bzuQ%Lu9lQ6$cB#-vMdqHT+LC7lHzn`OC4ZaeI=ZYWK)0hO}s zA;6EZ!Fr_%k%#~+WtcR1S|Lzm%mPk28;W9*DF@DXU?X7&UBvoay==af^J-@wY}QpE7b3xs~yAfjB| zK3?vuZeBPap>=dxw(cz{^2 z^1|{Aum0a#%pFWFXOGLK(uNj2>?7xTIM(SbqI!dP@Q`cuy1YCEC?~lhkp3t!9R-u& z#l+_ye59XyedVl_H}+jVSET@ykz2W)WhR?`6i=wmoqxuJcNok9j-3XeH|nqgJ=;Tk z?Y(hLi~%0-xMpYjQiOwvb6>u95C7X~Kuq zk>)jLuB<{@vk0wgl^}#yXis{icRkgbi|l&D3__1CWJd#DXs^w@)lDpuZS{7LSZnH4Vq0in)iowl3&&| z%I;spMw|`Ez0$FpYZU^$X6wux!6Nd78Ax?V}&v+<(i=GQ}dF_G$te=Aaanorhy1EviDFpi>?I>ny^5c=Vm zbR+&o`0V5o7$c!MK1@2cD+zS&0{M%-{S~H2pV>f}Yl$6Fj7ZS}Q2gwJMG?MPE|&6f}7)FubI=o`NhiO2nd) z3^CY5C4`XCD2mjonVicqqs9!NV=T)yKk1IjTCp=dR_f+kn*>!$o5PDkNI^D@-$G!B zv`SS}1Zke3@)y+tallm9T3Rtgvi8{;EQ(WU#~>dZKq*Hg>Kp#azL4Yz2>cXiN)o!i zI4Ymwt*5CR&Nv{OD2d&j%ro;vMe$*a(DcO8`weNOu*eO=GpGOzCaKQpoGhdg9I0BS zkpN0_l2L7NQG=_Df*wo}QC&2(>Js;x8^pDQL=nqcEKiyYcPt{KBRpSo(0L=1rl))& z?hYou;=@@Lc=|Tv}^XJrQtB9fIn9AK+?GL`llnK_2jlxs&v14 zf8BU%ve~^on1tH&p(Kh{kGlCEk@De~8VJ>@nHFmrgOBr!h?pGg#gsnwH`bGmxw!ZM z25;|bY$AQ|xyJMR7n%1UA`Fz0g~j^Oy${z5{t)v)bQ& ze67E~wTX`J3)55IG-B!*gfeA$sa)Jy)bq5nXoT3iT3rCxjj6- zGGV;Yg8V{nob`H2%-j#|ZDYx;{6i^S!SbdcI`Ys8TV1~^Ra$|IdGu1)1$rCu1E@FX z9c9_J2BL{kMT{+EiH8Tl_pkio_|F<+RvZc3&ANs?^m;C=N_b!zMBfPOM1|N99B2PENXN!yJYi#Hm0=WOMPatM?U%YFpbcIgU{IJkq zoY_FWXc(cl8}q&S$nGz>{FLG7cB2w(g}+SR$hvng9z#1MxB7aW*Q1LRx39Avpdc## zenIU-K!X8Myx<-*E2eT#jy5xd_?&P@6OcEzq@C~DF6keuYV&n$x9<;KYL#aZh3DVi zWSzU+>V7bFx=$lIZ`!_KrFY}}6&)<3fDz8FvY7$cT5$2Vztr zWfmz1fVJVxiO0Rjy!DGp6S)|Ob^N^Qaj+DOVx*1b6#cEjV|it4;l8nt3W5(Y=+$mvytoo%JVPQ39B<89+L zQh&~f5@_;A$h~!*Y0L55(v{Vq%1j#v@%{8}bpDf6y823A+&r|Od{WLZRKs)zYu#QK zq3f&v`ndb2FsZRh#Kp7*un7aITHsTTUZG4l?mg+gDmtHWsd%3~R%M(I${47m0%D<5 z5OP&G7OgBI9E!sl(;(Aaa$80|Jvb!FaA0*Qq(_g4igS*v588M67}>*;(KRL2Jp&$e z$BE`YLckJ0WOSEQD@Zt)8QQTMZ5$PMPkIiUBJJbJUHtvHpnIBZ^gymzwR=sNLElD~ zbJTlIYhf>{v8~f(MMV!jyQ)91=Zpj0zm@7qT#i~njQsOjMAdux<;cU1%fPI zX?`TGbDi0t6diJdm+kXOirLL`>(i1huR@~Uw4vw5SswHgoIbjD`qKUI>M;;W;&|=M z>M)X4)U<*hVc{J&c_LY)f!)c&LOws%RI@BG!4Z@1!<}>Mg?1b$x4QY#;^-O(1f{pG zm#cvqz^!DthOzU+BNqv5R$hwQ*Y#;pIQCSJy^=JlldEg&eezMPpplGP+AoL>I%46` zHa2aO`<|?y?0v+ZhFjfdRohtB2-$b4$5HIW!IhvxoktL-T|rJ(0O>H*#MSq@*Ozp5 z|63s5T2eAR+55}jzi95RM3ZK)_B|b$%FypAXIA{rdOAB(hsew-7C5oYaVzsyFY~4f z^c(fk+jT3_STty+-|eRVD1H1>CI3#vLt09~`Vqj>)l9NBoe-$sBoibbrfub$Gl6*? zrT{Mhsh8H52{qZq62N91R)$7>47NKV1R5*@LO2J2#z;O`>Fr9-2v|U^=qk+^d#Zp= zd02BBw`|_FRhB-l*P>KG)YsM^){c)U9HU325(dDpkAz;^mt5Hk%)r?XfH^fdEZ^+o zU_3M#t4|uRa|(C1M};i4{j_U8!~qCg6{K#s8Sd&W^oSOEb0tHn75ksIG_cMZJTa@Y z$IP?(TXIy@3~)hEop$~X+2HwY*@GhCrKBJDXT7>q_WTFeP*xV9{N-MmsXy#If@s88 z)pokMH(`srj+@fO4HY5MfOsX?%`Sso-Z90QKY%4H?C^N?=pj-=`AQyNW}*B6c~g>XB7+tqT-i8mbH>2IbGcp zeOnbo4?OBqD34WG;XEWxkc`#?(QDEci{k37$0dy=g;?hdHat=Bp>EZ^(x3JzCq8Dh zraN(Z#8k_aCcI;h2(5i)LfkjopL-k!8sA*!blJ^0&y4x?lamEpFXHcNI_B1rAgx9m z1*>C(JrRXJ&JgD2sI}T@uVqw=zl=!@xQbb&Su@0`G;wDuSa6~4=sw-XnP{g=;D|YI zsA=lmARDJ4$7`&|nVlCK(q zrl8D@Y^yp}Q~`yn+2)$AUDOrI3FXIF|lm!32VD*i0`XaL*9n00#i= zV1_x63}l26wgA}T6e>;9V-N7GQUw8)nH(Kv_L=Y0lB#L(YZ2nKo+gB7I$L?$e5ue~ zlCWhYyrYy&nLB=KbfS5>jK|byzY{8L#!Wz*g48EZOk*Y(!rY06TjSLNI!=qo6iURQhXdF7fi)a=A zR*)MiErQ&WrT6t(8Nwa33#dXR64t3)x*aPXXoQ;cUK_GYh!eLnY9pixRb>pgH8cE2 z1=SBEIRrVo(>yFDVt&S8>~o4Ks(HY$5*N_SXNFKo#8)sO@xBBWFh>qB0mcSSoPNM* z5En%b#bF)*D(>EYQwGS2jEKC-0YW7XfCDJ~ZbQlY3%wO+xaz&xxA(JfWz($GPe^M-l234;q1YNfy4@vPb;}vW|=bR?rfNNT3-#p@Y@D~z#;JcflgIX z>WAftby_LLj}-{~c;LjP()Hxrh*Df&)ttO@SoAWu%ipKJQ@B^qixUm>XUkCtUg%B} zEk$Ociq5oNes48!y&8aJnmJ-43p7#*5Oc7W3WqByf%Ntc)o|Ctc!jia>6>1Z9$-UB@ z-`-nw!3bm@j4D_aa=@S4PD`#3q#8K?-qSPfW1)On_^VpOTawc+1$N#O!Gu=R;De2P zrv(Y06~x1CYd_P3fbj~1RmfeSz~R08@3oAKbOXX_VR;pM6{cI-8V_9N$tA7xwsk{I z?@p?0zEu*}*4ld_&*@2jRcE7D+YNLjx=gJ5M#(|Sk9Q6t4R!@~us4y9HWLn;(ZB** z3B5k2a~KWCvh3;g>lTcH6f1&7VFDh!uTbhp4pOnlZMq-u6XC*#;_@qXQ_8%W z2}vZo@I#+ajYZ$i03^8g3nR&_2zd>5Ec&9xL7#`yY*9NwhI5%g!jNtEXGr_;NCx(C zETXD4x*IE7(MI`Olgpv{h-NqIri+StRIf94hJbK?FbZegmN2Oet->wQOv zZuqgym?Pi85Io_&9n8#J8{>HESOqW$S7SQoZ#smVu#=kFq3 z>mjf2t7IBgx%tt=$Ag74AGNn$?wP-S-Pd#J=8o2Xj?oYhO5xVt@?^w!=;xZ=n)%1D z(eh0~q3c=HuvOv$;^<@?AiMOfbcL92&-wYGmxko7J(-J*xAk}a8_yNZrtI%$dunY! zt1LmSsa>5zY?Nbzuz?IyDw!y@?Pi+Ao`SAp4`?7?RMJ{`lv+@4QL%xd<|0v~U6#D!@GpRD>BqkKnTAHV z*4<}+*HhF&Dw2IJ(!jSfl0I*1X*Ci_u6l_JCr!*VNTjAvv)0C#pW?9hrm5B{Gs;Q$ zAd@9AmL#O87l3pp8t+=`Q#3i{?O_a46{+WS8os1>zq1&MWM;C=K0cJ4I8B9~;&P9`I3igwZE+tCiFFKV@b9o1b6;8FMCM z1|TXexW07@QqL00R-?=a-8|fZtZ7r#3+=}HX}ld)f4hGju>aztm^t&y)!^*KNZ3s; zWv&BM&BFv7`Y%6X&ivs0$m^dQ-@o@o&2c-A6=%&oP~UNMpoZuxWlydkw(4ybz!{wt z%q{Ol*<3Wu5*(BmTAO`G^F`aaH5z%WRPfvzdi3RF#AfKryz7-qAAm}6J^EG2u`7A6 zmBiLjHtGDRF!T~^42I90*o%hm4o0$~{YgS4oDdi7S5uqO-zZDOyOorx0U=PL@M$Uj zpIS?)l=)lhUUokXOxF|3g6VQ1R%UXQdiMz&@71ftS?&BA=eFu6T`#`b%6Ez#iFL#3x9$`${CnfcF^Dvi zwoIIZ2?gH|7GwuK89;!U4q@EHkDqsFU}wnog|R{D2Disp)e-@yg;n(=AILcHDL86P zB})iuCIBd6;U+;?d9}#@avzUAY5);}DKa!PAmB)OzTT*ZK3>BUBM*lVNY{Hzpl+(4 z#$(*a`{u%&L-2*n;AMu;WxX~D1*@@FnX>W`YHqAlb8E4j2>|Ma*N6Aj$b6Ad-TaP6 zbL*?!&&dq^yzyQ)-nwJSz7_F1gWg~@&Re-=Ke=WBmOYSE@OWXqrWb~8UtCWTy{Icq zXZN~!n^c8uUgBRk!=c1j(|0J~)eETlqc_3C~R9gmp>A+tk>@3f7soz%R zM817{|GoxcBzvc)=NYJ7=Pv#zweBWu#Z9c1M#$0`rM*(&td@9+up(Im7+sr}AOM>B zSvDra=#ZV+p67zJD!E@Au;j@L7w;D(+q`_qF&7YKSz-sC>;YZ8Y$lyj*?jfFuaC?C z%jiqlPSj=4!!!=c^FckNSeaN%CQqY4tp(=i`rT|~erdmu%%9PR%VneQ^Kk?}@z=6> zuf!=I7eD+>BczpH`s+bjaqJ3LY(`%jGyp+P0LFbem(X}MIc_@b0~;$o0INfN8|4m{ z=9L&4pgt-M57APA&xNax^sv#$X@gfwcSQ9+K%8h;HxXZeJs7IRrTm?sE8wQk37C=j zG(dp1!w@?bdwOwbV-fH}xIiut9H0Upd`%doEjwmK%ArCK*@)gRjyBJ2^*DX!I8st| zUG|E-)`+*~5I`E`>(8zgCy|3 z`wB31Wwtx^Avi9IngN>JL*79W1#JN^+oKG!iVBy|twQ zMZ~p`Yi%TrPX_(QXR**v+tRj^cRA#0?1mu^lC{yl-au z24s9a%0Tte>7a9Pxp%-TL2h$D|J0cGVPVA9+(rlg;&fe*#?V$njjwN=v6NPKUcb+T zfNsA*Pu~L+48=`DX$BT>c-H{tA-x8mg(u)S#=8#Y0lBMQ$1<(AZf=UuNh^vZH^*8=Px{upu_W3tudOte z@gEys-5vO?m`SSQlFq!D9VUukuoZyqG^n(jS&SuNaoWec5oNM6*IVB@qaP-{ku(EA z!0Vh%-g(5FUfSoCVF6fpWVcrxFjG-YNk_!zr{lz880xjhI!UAKtbd7mw<}0;21!bT zGitfQv6XYS7_T505g>})%4EG-q6|9?E=KIVN>~#{xLF7JkbuZcfLcr};F9i|ZL2J) zU}C(^uFWNvglHj9Io#J-{6M&VuvSPWVk1XishsTXV zsI+~zo%*e8DJ?(CKte|sa7)hTCrNrFlA%E@$Da}2+Ua+C-v@#z2al?^!#qnIvFi_3 zt2g51ubkuhB=hv)qoP*Xf@7@VYWdIUgZ>Qe4 z**1QCs$oRt$`8fKR_?hETbXySO=sS;a|j;Z=w|=)S=V~~&u!}&S36HAQxt6!XqnKU z48F)3MefYleZ-dW@2iUZTYR;h3GH_yzelz&WSKG88aJP>LE~!U$pTwTffBci?#BM# zf2r#fIa@U=qIfF`TXPzN+Yq91utr?J%&OQg{oGCv_ny9=*SxItGW;UZ{aq{*eOxPj z(_xF!{?FTgo@~8|BS%}&nSG@a8?(Iql<{)}ypO%J3tr&AKk}e5YVW;56hxh&m0}(~ ze~Uhp1WJq-IX|LmH&x!+Qi4kQk8_-Bbj2`Ri{h73xSBENc(H5$T0a+|n5L~uvH{{a z5c5KgF4rREGJx5z6rNLh+*Y!94sC(njcjJfNBzjo3@h^}!(ncl&-S3ae}@eehj@^I zfh8|bhhX`frYExje^=8`+CioQ9AUIE<2Q4fjS&J8hT(`MCkoPwJzB}NzG+Dgct=0A zn^|QuMdgpTwIQ{Fuoq>H!^3yfM4O7!A#IWD>&LWieS{u3QX#Ap6CsP(3f57z$6GE& z$@EmW?`-ZGsV6y%L60F5y%w6u8Tb#9etW|J~b?l&?&*t|BbPlAIB=_0ltVB!^)~a!g$|zyH z)27IV3_6Y1O@bPFsp>i_m6JO-)xjKV4+$?~Nrg;TloPQpBW2RDKDIc0XYRT^VP>cB zkcTG%(O>S^{2v5_+voAo@+j!x@~O5LJjdFiafN~)Do;Q4JGKI~ukDI9t+|&`ss4%t zEd5~#akAqmA=gz6TOh})8FAOJ0N+T{@(T_aQcS~|f2cYesn4UCj=|^>_u&(&EXQ}+ zu1r^h=6wVE&V;`_cI#rEWj9U6Z7D^qJB9qyXi0zQ&b4(sFV4a6l@F zW5?9MJr!D(nBSS<8cEJhBvlcmY6C2NZ7@^hjvV(L?d)mh#d(I#g}l$`RTpe&25kizL&5$!WT3MYGin25*%*=Ro4(6NN)m8yMI zM)5&xx~rJlO*gbe!;%7VQ}PTTof%dTVk6Y?70gQD&v$(9h_wYzag!(m5e}(Rb%LG* z?Z%=T_fL7%=dbkdJ@po0{>tuDeD+h(Ds-&PF-}zsywtsPKj~NSjV@j8MEa%1xOlm8 zd+%f-_wFDU2dmFnfi6p5#`uU2))@XXBmF}-rVZRaTfF+63Rvie%xL^>U?oJf7+DBA4!$C z;r1}K*kdUG-QGV-c{8c>kscY>eaq zBT#W;0wLAQ(d#+^PoN#6I4GtJ z@#(BN6NTYRF3J>0TPgj+p7GSwU3Zr}wZXL{J5#kXd{)Z7`^*|JW-xQ4d!^oi&l+g>yF2x+~) zQ%I6>1|23>44tOtj>a_abQFqWYyS5_x>C|uj z0Vb!gAxoM8p})6{UnKtdXR420K0tIK|BbcH*dHqcE$2Q9`>l^E&vX0g?x*!v^nBH2 zwwZ2fu13gvF3CHE4tayz{AhE5*9<*tphfWymmNc+l!W_Zui0T=n`XDyQwE?MXx>u7 zDxBXs1Yut@T%z1)LpLB>UWgDlBpP~&K88bRh1-mU9Hn(USHpu5;VX(P078 zL#1X#2m~^|WAX)R>_>cr+NlNmTS=~;(N(1BJ4HjN$kLm|h=WHX6p9DmnY3KFa3I|e zIf}}BcNOz+Y}nmtD;}h68I!G6uzBNfZb-5F`4>|`Xw@-=M2<*|G6#ZHaF;ITQTsNI zO3H*bp0+g4)?WaFo2QaI8n_8c2n?>ZCi%D^Mb?AAQ&=(lbK?tWx$6@R+_pGU9IGY- zHPuriNLMtLf_abig>yzuTL>NW9OD=$F)iMKGE*(k-K*k^8|pOALS>el`I|%tp7)3R|EQnFIdQGDHO(w`ax|%o-VhuS zR?GIWQMiwGpkLU0Pf*-p=J!xlLIp%!X#{zK zv^0h;S~V{&GVuRTC!;sjYpaB{XY$qEDk65#KR2 zKB29W1j>t8v#+SyMw0%~3!`JbS>2Uy6&B{i%1WBYV?yl-_L8@N0e1K>`=j>*I6SE; zh?+1@H-}bM^XGj1>w_EVLZKXyR-jkAtzqu>Sbf-Sp4eAi=`G)VD|c_jm}k$<8^wb* z4&#*F8zy(|zL=+(@Tp=Z5$=UG=lI%yNKs^jCfedO6h~TM>8Q;9#lT-o7Rq0 zB{#MAFo*p8Zr)d+FQQTC;*pAfxtMa8`zR2PkV91abDb|y&+Fy>kRuLdIbeK612_;Y`xfM=^MXf_b zv&hR1g)pic5QR#+(d?iybvADUVSAmc`#@u9gWiKEmh~is8E<$R>J@#Ta*~}%zhF1n zuIgBw4cF^93N}h^+V=K;g?Vj^Q!i}WRPsCno;Av^x(1yfU*pyXBwG&=3VvTk5Qv?4&86APHu z(ruz!T&rvB5r_krJjo&dP7X~fqh!i7oxU2qF{G{V-64|ckq<$d{SQNRX7ra*AQ6X^mh(ya~pp-bAGW{PA15OXtU#`jhwSv+J9;<1dIxm-l$A1V*Fqb0(+uGXLT_>bQw+<~dnhOU`?r50WpM65b$vL5{6Xd!P>M z{CqZTHf+AdUf8vycEbKhLI=;Xoo2`zPQddwrQw+561Tn}m5dj;K}I6)&~}`kj+n(- z-CXWZ-Cvl)60qkB@r*v%ZrN94wqIxH_akZKCXW{v$e?mco>5I2gT=)%Nv$XCH{ZIS z@c3k}>Z$9RX`04VV{$GIo6q7RzU9hGa#O4vexV4Oh6H)30dAdO74Z1^TlMVk+OU<2fRD0LKtK0#FR7J$pMJ=pv3nsJpmvEc=5YTp+!!LjcK9e5uzF=*9H86Z-* zAylcfGDj&`l(r_&kqV-D#S#MQt7ae|$yKWOLn~EUTpwxH8dew5v>Ih{tJN4^7;nzV z9Gs|nooreA)k5wFLD(uNIkE=9taC?5u_b4Sw4JQ0)#TSy78pK;8cKWBxA)ysv68b4KRhGz)g3*E%F|WTIoUd5n@`AwErFcEM8^Gn>b0z@(k5KtZCE$+7~# zCoLEu8pRohuUV9{}W_Y7aVnNryprS>T zE)ok7Zai0$1d(%A);|IiZ-snL2@I0{A;Jy90^9u(WtM1eevn#7Nd8pP;6$P2VI9|5 zJ}O88Ktu!4I82Uej{H=ZCxk$yaklcJZNTfC_`ibCn=oo2bXu_=Rc4(Y{~Go8fEYJc z#7r{BFN1LegdqNU9(SHrr#tUI(T#&VS-2y+?zYnY5-OP)-34E-zIMc&ZU?=Q@cxAT{h!RTev{DMVVpNNFw6i#iQHEfEzpcf_V%*w@W|kA3TRFB+ ze%J$4E}ACdi)8uw&wH2Hlg$gwx=}H@)$1=9Ylycbn zG<76wIAc8Ldh%`v_+)03q`@x26#D>+lTd5-nx1? zO`#+pcK2KE6G|K$BZu2M;~IWNp;3=J9IGTg)d&g3iq;0^RWgA{ zE#@k^4}ce}dWJ~>u@bSvZw;1BBpdO3?fzAMc$|}lkc6%65G5K1?5*4*dgK+iN`bcm z%KQfh+KrLGB4%D*#o7(B2VVd7WbLG#4FO7glO8%G+)Pdz$1oj4;H z)tA*IIz?aa4aDHgnLnn+ORp-3dNa~==Zr-|w-PpI9>ME67oI;T_I~p(8WM`Xc|+Fp zVkRQ;5cOr7&NlA*yZhjiRAX9iy*2~#Mv`dXX$4NF+f(Up$w;AtAW%J&yPq|gtoFx*OLfkx;|v_yg;`fdFNRKnz|7UKIrOZm(haQGZR zBik+kFf$L-3hdCKzZ1D`lxpAsMT2|Sz(LP@4)wsC)PFX+q6nz;{9`lH2&6;PqEZzE z)nPOM)>`#pefbTm(0h)jP_~3`z2@DwTekibz!qSA(5b4Gs`78;b6u+xn#o`r5ixWp zzi2bcQ%IKfL#8)r&RTU=3f^`+fKHweZWOLyN_^LfGLBb!i&U={hDEbMm1Iu59Fvfv zgNP=%QkVpxVo(xm$AQQY^UCGg=@9vJP59p(Z5l$u=_cpL(;YbZofq>b35QYCJkPRB zR@)Z1w~rAESt}thBiXMZYhmVypiiXLWk7g5L95XOMEbtzq+5yomlyLw9FCZy*&ob; zGM(?PNMxDKc+C%LY%>~c4tkdclfLtgj=ry>AJ3@Dh7nnSk{Wr6yYF0LH zs~i~~+Qb3UqC8;kZ%koaLdLQReFeNEG{+)E0s#d5Q?vcrJsrv1t1Y9)irZ$`oEk>p z9cg?ao=m&`i^ibZJW+74_h4Ac8VR!j%`f%0Cl+!OdbE0`?Gh%ijQ9us&aAUCo%hSS z*?H1S@3a=89JLB27)gN+icfrOEtF#l7UC`$t$S%z2=Jcy<-_A4J@z!F$+zo3& znJZS;Va1P_=XpzNr)jGGInv6K*>?+kqm?Sl8IsywoDNV{^S!qC+uM)D$x*}o0>Ud- zO^p9Uh1k{TzWo2f#k$SH`OKrnH5i)u4*a`=`DctIn!jW*Y#Jua#yoM-Dn5O2hfJAw zZ_8=JAu@5JKw|}~=A7QTtRh*{o~X=~)+XXLA!~Z|)_f!(4u_*IlwLEu4Y$jkbf(Z* zO07oxMkSk<3q&`BnZ>Ez@B!5@I*lu~CKqsU?9e6G@1R9W{=+%{&9|HbGvs?5OR7b@ zZWk9?zanht+*vtH^iG__&U$COP7L*?N?dt)Hc26)H#0Yk@3^N^s015|XlT<2Yp{JK zI5LQCi(?JeZpB-?*+WB<(vN`(+&1B)E>m>St$za7UUmw5eKv^uzVp(#Ruqm~fA#kMXC zyj2X6#LQbPHieNbDF~PtCrTw;GWrFEFTis_2@F;^%0(G-2QF!QqVK3iw(1o*>6rpu8K$rQii5APtJe^lKca$EZzP*G56 zT?7lYvWkHnH~AA)*VlBnb>My5*{;=Jgko}Wo%OUY(b26;ZqPZhn-7PQj&xwKm$s+x z`N7~ITO|(v8YCf6dw{DtGokET5n@O4T@{e9Tsr#xR2f@MmP3-JI4;LiH}o5tk3GmW z6$+3Sk=i_%5<@po!x(Ze38VlE%iIe_OaysI^J)IPO6fd%JIm@@EU^~|Q0)5A)WoN_ zjI`xhlcbW+%Mp%Wf*T)Z5vQ;a$3kVUR_oXaTKIUS7(H8!>#ma~8;F(a>QUe_{%qj! zW|P3k^Je>?tlWrp>m~dwYV03Dv@C6n99o(jj0cISe2s5>l&%2^-^%IL=;1}idb!-; z&ZEb!Qu(6?&*-gJBylNLK=nRu&YhG3YpdpGH?UmQW0cFJ^PAC65tXtof;IVb_JR9r zw2T*Pok*22z@o{oP&j{0v^sp@nmbxJ^WjKo*e#{=Co@+1nl)?~7T9pz7_p|{5@-DV zDeu%wbzF1a96o;S)L$Icqr81x;ROLxI@0(7@nhEb{v5FQ-(e;{(@Z)oO?pqp{i9ny zW4z6ovkt1xF?Vs=z8t2n@H$AMu-}mBKKX+&LYr6Ix;IdLZaR0$-9y}az%Pz0mM&KC z`C-Y079X-TX8OM0Y}H~qvb2}nKcvpFk)?TV(m@ZSt~osS;FNCW{)wZ00@zV()U73B zl#&dlODf1}q^nnZlY{52jRI~|D1>2EytI##ZkhwQQ>4ur(Dq7mmn8{2BA%=GRZYWd zElj8|54~%98M!n8433c0^u8p{rxy7Gg*8e4-v=%rBGSL}oNF54H=v$3Ed*)KU6}$OWE7E~hm`fJqGt-=Xgr=*-Yq_p=sSP?2kdlB7zY8rX`_ zi3yb$$GcmGQq!zsA49HtR_lj@sx<|yx|Ll_hGio}NI(YyIFZIGn$*WDL1gqQ59Bi3 zUP7eS3Y}YZ`^712zI@`j1n%G0+)k}3btP#Cv0kuK2@o}-Q z3=xl;-q)fAUC(H5yUlU8-f;CPT`i7I6(}h+^$)ZubyC|~VxRTjd?juU-nK)*2|^Dg z_x<3sRvmwkM~4|}9zGCZnPV*JVR?A=6(pqn5YT=U2;_QEylFfr2LDrR+$#C2<}wOi zbVV(Ib=EzWr~=fg-N{9tMl_Pdhy?CjeRv*TKL+ow$e8Y%gGSWp-6B7n zFR*t;4O-C1A={P3##!6&bkdA4S#zzhk&o2?`D60xkkKkJ@I#L4*&Y2ebF|~ zycIiv#`Y}zjh$8Q!{K*79r5P1PWlg*l;2%C!X|$g@j(TeP0bb*XLS-UvDb zt&JuhR$4v+1ZhgGX{J$;DfvkbFuI+V0IC9#As;X%N`X>ABwAWA-x??qJ4n%RJB(%N zNF`1OUYKD$ZIl~6SOuoi@cbhOxJaNs%qm7D98LSpfZe{C+kb2@InX%wmC~X2Y?_4- zq03PRmMmnmvNWZP>*u+q#59RfIuf8>kLGq3i(>mI?_xUAO=$Q|9dV{QNv+GAI(Kxv z!VNoJ*xI|>A0ecUn@OKE4~I;X@L5AI+a2g;b8D zjUq{*fl5A?Wk#mNa&U9Cm^FFED)21sFPF>|wbFTN+OCG^#S|a3yTJ?KOy7lLSIDU| z9~ei_x=rWL9?e`jaWQiRo*(>R>@t7<+y(0>Ub%^p6rlyMfq>Hb52#a%^{T^Wn#T>7 zxy6~<3~gooeDdZkJw+_CXJSDaFz$C7Hdjd5^_v zdm2cU)J=>i5*Ew`EWb&%-aj zmF7?9QS zZtC5SFef&6iSa7G7-;_9u>QS>l==D}kuq)tf-aDDp+CXAYs}v0$%t5zDot|JG;`*K zlcrfxVp)0Ca3;1gRk?R$QKtlQ6SE9>dA7f6t zqK%KqkRoA24YkYpeuta=2j4Vi0E@z3m!TGFE&+^9Y?&C(kVjft81>zJ&EnZ^{(`L` zM5rnkPPGI=m|B0K@%}M`S|_5Z8^)|pa1+(VT@XbR+58bm->z6M&^OZb+l=+TfLM32 zti2snX`6KWP!tYvE)Fk>KAjMl_k!Uzwi>+hCFq)2ccD>|EmTj6cR!b(9U8jWkM#A#4Nz(`IJ=`C{1O6Kj_ ziMTsa<8@L-L>paGX5!_E8jF*)4Q-*x_!d0C#HE4^LNd}}(Yt9u_fyywj9+pYX@kQ= zAi7+B42GKZC~mozn+h^NQ4eo`cv3$l?ui1O`6uV>#euXb@mU5kaE;DkmV*u#t^Mrj zT^DVm{@Jty$7d`NB(@E&UWvEe_60iP9t!w_Y|@VCI0v@qX|dP11TrxU8`ZRr+9?py zV-3~yzIJ6{DyaEbsIvpWOA55FDN1d3d2VT%uHcElXOgs$o_m_L3a(2k=+H4OgS1SU zO8HC`>5%|rL7n#RjdDFeZ>qNH^xTI`%SCj!jRSb6#OX1v`&D6Ez?1qoWpM| zIu(ir^?xI^vnI01bpj}*aCQvhZQ40b^Gai~EQ*9evJdfyM)*@8=$cO+-mE9ILC7jn zLSZB5SbkE*HuQJOuaVZ`YtuyHH|sK zX2=42;G}I<^9JmJoH?ttYGfV=k`VuoAXW#TQQaI@3fo+LXm4!N8h zT?f=8Nhr5Tecf)~e)Y;jdcF}g7pYcyN3UIt{N;0uCBm7i+&L=#WjG5j|6%grX>xk| zJquv%r5!RY=|f(ceJs4PhDm;391wx5Uh9n)hC=R9M$6n_axh3r_GyR{%t`1b4O-=W z#0(3g`ve5pdz=anilM!(txEwgA1X^ryq#qTmgOn~JUwUXvMD}bKmflHGRYKpwcaET zM*-oikTe-hR=fgdJVmU9OBjF!bBHbR+92ScCm^z6o~5GqKRASx(vs(#sTRn(0c9%| zOZYT2i*?*oS5ldc0}Ta-A-ay6B+2=>)&-;X6(J7eD|V9ow8vaL=tNb z*>F@X1t#FXcNq!9oQ4R?aT8wdg`#Zc0|UhuS?0Gr=8_J$84*JCBatva0xmNV?Udq{ zW|#mLir;UUWgZ8Vk_=`g112_SUZ)HUfk+rA;dOab0`8)t`z1-(vg$0uStd)kYmpP+ z4FXRobdmd6+7xnO^f>P$cW)8ph!`3Dj+Yaqsj*p3URnDK>bk0`ZxAX_2qNh@C+Dfo zN1hbGRw{3q=c)T+lyiZp7etvByBZ6%CXTbcWL+mf@w{*BB*@)c*Xlhbavg?g(B2zG z3KoP&rzXZUgf-NbA#~kO-BVy>tlNJN+ihd-l5TFM+%V!7AW$s%x^J;&(=KHtD)IaeAD09|(jrf&SQ0w)% z%~$kXtd}TXf;Zi;2D$N0)kwVygM@?8)_Px?;Ch@_Zh)p3HK5 zfmW_|oIC|jpuSZkqPg>e%>0ck-h6AEKPtzmwU{21x>K4eQV>D=XSyymM*#tIr-XDQ z2ArLGOZyd~*=Rq3<#7wN*T~ZN`{Kzg%Y;W3bXE{gR_mqrxH7L;7xf>9o?iMjD2&Rwofrgx^pI>c8$N0t;aXg#a{ zxOV8ZghP6WWgToddD|^y7l2F+eFH364PDCrbYbFKSmuEiVjjpz3@7u@toEWS){Rx3 zIr~Nl-1_zx=(o$AR`_SEH+ooc{T1f%%{=Z9yrq*@DRLvP7}?8D9;P5qaZ}MstP)6^ z?jJ{#4u7U9TvF?;m(ys>w5r;dj2a-3MRf>BtYu}yBt6s%?NOeP>H?3%{{Ww9`;Ry9 zvV$WH3JDG9^;45)9C+Y)GRu#7?CxO+9=wrs=iZ0yyIxvs$(R58Ib`1$+f&vu_?aLS ztPvUu463mJv?VIu7d&ZFx{m={hT#I5u@wy0XxWiKjy#wo?e&6vfyKz-vm8izr8xC{ zEw{|ajtH6%u)>k?Nzsr+w4 zi9n@RloO{1;8)Ga5X9<2V|WAWca^56G2C;)eXR68tp9s`mD$kr1b128zm;wuckS4@ zm<%eWSwefYt|!ueJA+(L_!VLZ7%AO^U(j3e0Cd^UZ?K=*R= zqD_ikhPyHDOrrChBIsMASwHTC>$SadrY)FN2rzq0cy^bUOYL`P@uHm1w?>l39`%t< z6yKBS4dNG-Lewl#petAKQE~aF3td<{B=cn4APi3uR3C|W%4Cwq)_E>inU&}?Dbmtt za@~9hMd;jOrR+nnG8EIT4n6CfsGYTz{$d+JrdBMLTvh=Kn^*qo7%anI6>uZs zMlg=Sfo*roG%w;X54daM=#ov>4d{f40avQb(rDhM^1`NrX|Z5%)xBrStHU2K0-Tke zAW3IH5ZVYHi?#B(Y=z3!sG)fejCno4tubcAzgaxxk|F_`K5Qc`?ce9>(xUTe+j|1m zi&wht{K<1kD*Cc*eBgu!%Go30FJ8}lXy;UR~d_9|f&+DWE zFY_^)Ir70p4;;Aw%-AhCe&M_g_01a!kN>X=pYZ-&zWBs*cZN=>XVEvnlQU-y9UC(G zVc`o&ZJcAt(Mp=}XPy{r9ZAR`9|HXS&)AT~yaNsFJik22u=!ibJ{~L#t!}U6jAmI3eZjefO(lsvq)@Sq zF4Kb!sZP_1C^mm~ekT<`^qw|e3>fWv_Kf`aTM)`jy(7tU%TC5jPcLD;!?t+U1L-9( ziXC%!(tSMAX4|y0P}}pjWG+nzp))?Ifgj z>n@BpM!!>ag8v)15s${GmOnPnS_C^?NmHisX}8FAERahh_m`BFkJ_S(WaX7*x&?H zkM?okzQ#<5(MTZJ1Q7j(OAv7w_;BFBM}hllRzBj)SZPg^pz|DVD5vI8cSUAOa}N5-0e!q2_kck(Df- zd?gt)XLql7|M90gALOO>{(Q?)x^j55uCb`5n3oNf=AAU}*;C3TdHjF4uZfhiYNzG_4HJbxY6DJe%`8BNbE&@7O7>O%hY6&h6R)fzuN} z;!OTvB3D`VMLdCW7AR@@NeIe8&JfG8bZKe(hG^Yx8$kyrNX5Hgz#&RxZ!!4S3~6S0 zoX)bGR3qkv*-`md^n(aNqnMi<9e*P5-Td@iz}5#`A~_<32};L5s$t?XAclD!csj?{ zS)U6cmyeJnVV)F+TP?48vQ*=1sBEUn-3++_;}NqVa%mabj7)KpGkZR zsvhE3c9KW=I5lK@iF1~Fl|yu9%7+t!@zLnoonBA$$b-R!p~OV2_bz9cNgafpD=qd~ z>GgAu^Bbhd1A!H6+~a4K*6`x;`%=YlD@|=eK!jc)Mi$=_;MuVed!Uz9Z=09iHy zRM8v-`tpg{(1shs^I2vx3@A4r%}U8*nkQokA;wcPn&Z*-aK+f|r+Y-<Fr>N`X*L za-`96E2DWj0rh0=A+YSaL^NB9ry8vB+P52(dDWkbsY0k0s@%S4rhy$G2v z@)D?XdO^B#WRnmu3k1Cp)w_$1*RH$c^`!X$Au*S^SXM=thfI0|Mnb)t*1ufcVb7^H z{dB5_2}S^6(Iu8~mX?rE`GQv?G%L{t9gXQY1wWjC5WP3>Z{k)8Pvg=498 zo(b@1K2i*A^I?T>PVW{IlIM^0)0fTapfl--dusXV=U=_LM5VOQeDW*eJeq$v64iDJ zztiOTV^{68JD#q#$#)$+M!&sIkD8F?vuD(x#>FL;lC~*3Qe&^1;^y(f4?M%C( zV_seh42kXf7S9I14LHc z(3r2)1>BypbfbIm8wYnFABZx?YND|4yZCB@kAXSoaNtwIP{-P2y;m>ALpu=u{Cg<0 z5so$ndSwV4W|Y3wW@Dlz#e1AR&M#vxSz}G_N22MRn*r$0`Db{5D>5%rTRAtr=c9Ea?M5y1_}%h&?v10%sN0}`$lnZOE_;1mWc z#Wg4i%(}MzPsvx_ib?msjKRO$-}AC~&60`CxK>boO<6}?>QshlWaYhWAsY<$f#LFH=Ki+c)>m>h@C4F>S z9IL2u_(^&CFReF&XuhU>q;v-#Mms-ok|L7sD-f*EWS7-l3h!`4eF=RT@> zC9N8yDp*2{E43I(g5qAH z_-m8@E&`F%r@S&vO!D^iNA_O9s+^t4d1d&0fNiMf3O+m{SxTLyvnN#xJ#KJ8ti%KXQnd~~uga!2wviPngqk%>R0NLVN8AzdWEXqk)B ziL(|0Xs9Eo58@SKcQ2wKNC@)-4+ctz$W)LqS#X^sN#?cakBk8@RZS&?>*{nN<(^^Z zz~k`IuTGAmxld7UK-!+kRk+DRfe>TUB_gPK7OV6mGu($CISe0T;k%U zmOzs?*ZPP(%a&wWPHRlK6Nxz3_KAKZASiXx<11CtIHfHNSftt%0tSH2k3@HhS&^3; z!hM;^=h^90mPKtFDb9=o$R+Lok+iJ2mrtJ_g@l%i?mKbzMC_zsQaM?<$4^6{8r-L$ z9^(=!8m41 zk?YT%K-+;>t147HNj|UWmJ<=cmQG;~$N;%VnRB#p&Sq(ZkqGJ8l}O~CYps{L3<+2D zQL?}kmsk=lpBIIX49>P<%r!2&`9K+crzs**UC7x2LR$QI;-1X92J)=|hbFD+{!z zB^)7Gl%$|J_<%CA^T6vRBQB({nbp0Jh@j#~SfQB{rhqGss6a^BywVh7N<>p2Jc!dP z=@Y{VKj21=x;aV&nCY}CSoELS9q2NbxU#_tizkGp;D!;TqXGrh<|L2krdSpHBO28i z0QZd{_tOFykX7@{(uGk>mrYfBPqKV$lu;E8m$r^EY;uNJX(^O08XUo_pnyE_07|Ce zW{#(v;Y?tcfbs*L^eQTx?UQM;rWJ@%7N&!DtDG=~CvqT?5yPZ`=IN1}#C9I7imh|< zX55QfrPiZ`l%cz+&g-Ev{954`2fM^2Dv}C7=t7rk1%DLV7^)*a9U+f0|798V7~q=o zg}MHb4iRbhwdcm@CXzx391$u$6hLUb&qUB5ocXlG zO$u~h^YZpW0@2>i9JCzc*&@rxOgZwD`wRQH%XYF?DG@Jk0lk|H@iI4IR$_j|U;IT7 zh=On>S}z%8SH~O(63a2g1dtuN2We}Fm{H4jYdcnsT1$G`)BP*;QK?yGK!s3-E=v`- z>#PU4xIHsBa|Xo9nVC3KzZRs;5Q9nKGvXY*`ue)5S)m!?VyC^D5X1e27}c1#B1WJt za*L|wK~HqmXWv1X6O+n8lejCXk*1hiQGE0jDmD20VEILM&P>`{%thq`;oDFAXc_!vocD)#8)9>KiOL z9W`QL!$?~8d@FlMB28khE2!Sle)rc@vdQz~w{t`GfY$736jeK&HnX-ih-TAAIvln) z7%Tgvo)lxCuLc4H3(7b zHQlamOOw7FE9U))TZ1U6q+DGiww1gQRVlwmG?IK&wXid_JQ>Y{dOTli!oM!JZ;}Nf zrCS2<4S%J&^`?ESr^V@@$|Wfv#PV!4*)A+N#R{$vjaL4v2TMI8O?%Nav;BPgi^rG} zB^F0KPW2}`8P=ff0zg~oW`F+~CHdi0mSJvYc;ecbs1RN^#97-wFUy=~vsm?TG;~WQ zjFSWlWR5_}laN-4HlbG3nhZ&=K8L?3U+dXH+j(!==FX&M?qCr@xyj3y;y>tB9(PO=YxC5I$^zjiPJ;%Wxuw1UkhK`v|lhJWv? zy}qyDtCqeG9e<;`-zOG*nRq0;VDYMI?)I^9UuH7TzDCLtzf3?`O(kPZ-@)w!8+Y^s zWqw9d$JG3aAb;}jd&T3E-+h(QC}RwVk$i><wvb8CL1p4Sq1!Y){=1Yru)jEt;uxc9x zgtyQaEYV5C-h<^nN@9vY#Vf6?Cg@|p6}5XV1$qjbw!Zx8cIP(2x=dYbzIEYXdJx?FkgM+R=&o7c`|wgi7u)l>QhT{P8|paSc-XJM)6W#$!4O4V zF`*iQRI-OHqlMVLSQY7@ld;a-1MLU(rPhk&JC-KK5M-~URVB2#&z?=^mc1ePrA|p} z|Aom^+s_p`L$FP`Dt7Akk9u%nGrG3?>~6KWcLORpZT)2w$7W?UF>XtG%@qWL%HdL! zPpuRpY;uD*5DGlV62C&P1KQOSLAS5lPVSfycgD{xY+>4Ve3J=v3PA8WOoyzxrW z=a%mfAoe=a;;nwU*h)HwgL}PMW`6Xxx1KzD6xreL+wJ@N1C;~zjT4N#1n#j~sglM? z*{=Fuo81jMT#pPYqq(Gi>BjU^JHavpn27`r<;=e{TCK_nYvgyB`!g zoqL7zyD@023cT)Zq#sWWH*6)FnN{iZUe!9S7j97$&_Q@59dt9y_v)4D;qDnbRpGWp z`;5JTL6S-Dr*OZQZ7>Y}NErFL@Me{Z_P3;&ZFQuo7ZYFvi z@|DX`I1cfXCQE;kfalikTF(oIGRhP0*phi5CN6YVJs`z+A!XqBig_%BtQ>J* zq^A1jMU^GgnQg0{qey$n<<%jm5HV%j-)lg!(cQ4~WM%snDOtlAZWg;)UScBADP^}1 z_-4;qDoJm=r+idNXlb?IX=+%i7pJQUw?>Rou|hjtGW0d5q|X};b39zEgve$`TRn;B_vH>a2J02r-fp~IwxAc*pz*c zt7K2}lP^Y;5oP4+{2F2UR*Bbp1MQD?iqjS(jgxm)5$xfX=j`m9otP!@tf!J?#UxeQ zk0+K#CPMPk+$l#5!m`>qfAq4lh;V>RL_Avz4-46P2d5<1Q9@JwqbeRlQ<9hge(OT6!VyL7#fB z@4MK?pyC&GNMS9|&n~1_q>H4N`mgFy4Er>?9H$q1U~Kf&xzc%GM&J;CD7=L%YV~eF zE_M*TmBu7!2}Bj|*0oOfDw2C*l*M{caaqKE# zmC_{RhE8KIvbKM?LW7;%a;w`8v!u+-X10k?vHWVaI)q0c_WRWwo5uyyQnBrdIx%I7 z;ra?T=?t(r1#Ls>>lmd_6}stag3#%RsbS-`*EtbQCr@68CVPiRNB26tXJ_v|y_VfG zO|AUiw4UR_QG(VDGrkWTrDO@6M@hkPD(e5mQk=YNRLkl`P_K1$Bmzo$H|RiKkrj$j_rR91 zX4&eAfx}_Qqk9R&IwFhpn@i1$lkQO1A>m5J!s(=szIk^$dm_?L3icjXGBZ-#xyN|}Hp6JT- zaox1CYuCF6UCj-PFzU>geP*Tn{d)J;CJa?+D-G)HDk`u18yb4N@AL?A7i*8!8uqB9F74$xTrRdYLW_7cj?R==jT6C1YlR+qm)4z&zUCUhCfPeNgX>*^4@)_9juUO$Cci)dOk=&Sb$V!&)B; z{=ck4dt0eJWs&PKDe)X`@`58v)7)B;`(j*s9iK!z*qCKPPe$Ov=AB*} zYxe+P*ZrM>kzHhVtn&Ti%TpcOj+L!>pKpRYjOo;T(}h)U z(e{WW%Dj0Xz1CbYk|=BSN-PNk4soPmugV}h;)baN`Fg5Y%p!E8uddTY4KnJV)(ocM z5WE=~AxZMG);!+5)%ZE&x zfeQut9mf6F2#ZlzGLwvwq1Mj-H$$ zi*y#`ic#&~;MBM2zKZegneINmYRT8mbUlr}cJh?%?m|X*x7doMrVTa3s25rvdB;uY zWkkDKJOi16IcY4pL!y3=UKU0}J6SVCf3;%e)kz6-S@qG7uzTKqKx{=!a!!`4XK4yf z$NK!imoKC3d->K-;9hUrR&2FXSKR^A@liM8YQq`j4{E;*)4#lzWWNd>kgvWh9Hxa zZ`YDzL}V_d;aYFjzrGDAZx`J9XyPz~+^}U2`~s2#98GRW;JeY@C4smkmxsNhUC16A zLFY_czp%)#CA}Um9E^<-wSq3@b9l`h=yT2Iluts7okf^r7lv(98vY|G74XT>+jdJc ziTgjB@Mn`Pp$^3GwD_!r4NmgY=49w}85vr?kR-}Z928Nkn|kf8W8NBIfm5$fA^g|7 zf$BYPD~t7^eTkvCqt4sK+iP8~VR0QKlOI{`P2IpG1wtl!Ob!Ay9sWx@DI{~5A@q!j zR-mIoVofQ9Mx#=^Q^ZtiegEVOs4xIRDmd=|ry~r-kx8{a;VF^sz-kfKN7rppRYLANfAs!?=Dg8+<>O(KsiC`0f@$Y)v6{EXpYA@4rL| zSSv^1ZX6v5)L>V_QF}WQKThN5#5vm9FmF;XEhyCp<+n@`{W;OLD( z{%;3fs=m~C#J4>=S}fwPBOqlP=p+nKu~PaM8<{l zlA7b!F$IoQ?nFc1Oi8cO|EK8}3w6#RNqSXGQc@^?e_9?2=AjfnV?-{Ov^Jviehmf38U2N1>R*pxbK1Str&s|vgT_o3qd&v`HDl`?SXaM~L zAMghSnJ$@kSic7T9~bt&6ug{2)08<#Wz2}9ib^EInJ#Hg(9k`h_*C&S5jfauFQF08 zTAoBF@rTDr+BPltkypj%le1gSw;XTr^HYsAa@qGJWL&fBLW5(c zz_?XBcbTI9p{3w6-{1$94&pngqJ_!0Vi8PE% zuHHpcHAZ5>un38&0ie@*%hn5VAFVKOQmTyy-`UFCHvg(iCmcr3xJC~>=yJC74RpX9 zyAC1ha0LUt?Qm{J%$5s@MDjzzxgLS$4nHU5;BuQm5aD}TtufwvaDlzt1IIM;hc`N? z{6wNwhFOt2*vY&-rq1`2@_Nil`GlCH)Fjq(@-I-e+Mbj_&9M@M$R>A}4W#LMP=O92 zh~DFbGlH@_jbQY_u&E<439p4WIqosf4KKIdbr_T2x2*x>d!V0BZo4MQEtk8 zd+LnXGf6c4l<^}NhuyI3$8jCVWLxO11nz4Q76R6+Mp1OLHUuA&t;ep0Tzm!uqxHib z9GASi_7MFNY0lQYo~8(Og283p@oi52#Pvv&eyC7|$qPRumA1l_5S$stvw^Qnid~ai z?K|f~%ulgG4EX`A=pccw;`URd08w328-N$y6@w-F_pyvBeAI>&=GFP z%jdXkkf?16VUHZ{|BG>t2L(Kda&@P%j98H57L_|tW>*2BS-Wr43jF!$zQ&U&sn6YF z6~v;aTE;6AY#6GrcUP(0?QfaniH(hkOkZ=yIa3{?c6AC!V<=a`Z=;e(xskCzh$jyh zN28Ta+hnDK6JFClo!i+p_@@#(cpl52EukHmbi)ZB5j_)SKz&#cmgDh>+mH%7JC=9j|-wu)5&}2MW`HvD=aJOz^C9<5l}=GYZN_>-isArhJ+f#BsAM0g@f&5 zY{1r&zgt0eYy9SBM_Jrw{E7b`p96*etQkkq3_m$zscA$Od9{pOYNfIs81!$p-;AYh zX#(QN`clmMhxJE}a7yovC52Zl0m3MEM-2_6V2Z#+YO?(chhm!`^w7bjJ|p#X!YB=y z;r~_Yk?p4txTZbCand0C?%>$qYzit_P#|dOEJ0uqm5A{ytBy*iN`uehF($iI{_U$# zgI1lTZQz>SWU0T5F-Am6R_U%QW{2Xi(&nE#1s|S2c>DXRJTR1;lBy0U+HJ~dOO9+G zii`p`Sw=&L$}T{g7UEN!sU2~A9oBpyRKIoWKwYSqolMG}!i;rVPW!d>Wu$?YuIDW1 zgWI>><|G9Wlzfvr@PxhQ<>5cy6s9~w)ITj_^>pD-Q6>E-J=n)3J~&21m;Zk9sV>kp zH98_a7E+N&#JP7Zt0ne`oBf?vuNGgDTu2&M0;sjiSb`)QdQxiI4x1!_TjH|Br4`cK zLwUv~na^6r;wRgj%UYMP(4C@!DKcJS^rA7lo5|I2#Uj+(+?#WSPvS9h{jRxH#UWz8 zo6;0!0GS<(L-vOpCg>is>6?{G4fF|z&2)D;W3#>SXpa~4Lx>ljPP`l=2oEKEPIO|) zVdD5Zl*0f+kuu@so`bdA|7J_=5``p++tGg&w zA{L6eo@wzj{RV@gL=kHj%neZP0#J~j?VWF4EBpcja{pCMe3ueV?sr={Q%}vABbpD$ zBByT>x;4w+r(AEu2Gwc(r1r4^i_99Kbw^(tUvsq~NQ(~Df`6C7@8#2U%i8NzKc;`% z^6A@CI)(njvSEH;wtKwdDUJSA16~{~?dUGM^);g?SF2Pjf2QEl)xNQERcg9YVtE3E z_9wmZ0oUg-AK;j;=OZzgy>Y@3J|V63r&8i(;vXPiB@ghT$sa3av_z_ZFPSxDyPp($Yt9e3O#6Bj#*1yiwTK!x0|rDJ7iae zK2ZhS13l3Q0Uf3)^%#j65;^#`H~buDBvYky54Q`8z`tVs#3wHbu*3hvevjYmu2On< zWZlD)bg|Wh4_N|`!*1%(baj}UG!z_}$w0??0+f4eH5^fSX0rwbjlN=e-{LA3I?=)bq{Y77&~)p1M7efmv3t;t&;;6jx!QCiU3iv1**@o-1H5>r^WtSPAKj zKVdw{Kn#LqrKpZ0RuGj>B&b3jEU{2Pv>YIjFm<4@wMd!8CR3K@AKErc{lObzt*iLq$MaQA~z{{ zm0E*L)~XVadS{J?kGT7HBE6C&p_nVx8g3#z_talsiU`aHct){{$#J@xf7i{aVX+nW z8bCf*%wZF(#Um3$FPH6Te*T0A`d@x|x~1Qmm1w>%$>^2|*c>sJeZ};4>hODhqV1-U z)YSWB*;m+HF_*Li8N6_w&&2;Ig8cjoY2%t2bHOjmL}P{9m$@X*AGDbLidz$hAM!^x z*^QQSAMAI59t!`&yn`S5CH;QakmLy=SfZr<*;7B^26K!fj(X;d{eDotX00W&_! zJ|deZ7$xS!RW&*|6vh=gK(TpM!5zNn3H|MPXCh+LpU*E`oVxj+pM!sUExIv@-TJ)V zqVd+il!4)J=B4?Ff?aWkHsAb%#d7%iHOm3jc>U)0%$OOfPLaB^k5Fui2xFs^8ktla zC6+=_ZUev0W2IY`e&3TSy!9f_)%0FzuWW=yI23fLZ=;NRs9qFrWww8x?FkamNg@L? zxkfLOoeHxDBiIq^S|I=L3k7o$gABZ%&Hh^}iNW5-iLM&LW~##wV&-EUI9M4csR8$aN`|sL!BetZ8kI7PWwz`_O0%En zflzc`^t%=Nr`g=e^NWwv{mBVJQ)n;TMv)YS*pOb#7bpT<4tG?LAaUq&^bmYq{<5>5DVguHPh$mNa8iQwa^RbJkpVt2V}Y$$ga`jz5;BF4TYI&YHn; z=aXnYqnYPXQEfw1zgrTh1fbxXu(QC4WI{C3UWnnj)W)#c9RYtL2C z$GRi+BO^d~;rBsWKRSKPfr9FsN$WKfLy{Dsok2RXm&_hH9p86i;h&@^c$Sq#4^!jy z*z+%rOl+q`V93vP6I^<$D1Kbu#)K6Hf&?AhM1E;DU?HJI$AUFiNl|B|Z z(~OcbPfTi8!Rg);&&Y4=+-6jxz_t%7j7^shb0uO9M=e>NFGRZCo=OO!9fYG+Y2Jtw ztQMl-Q{SmmyOHtkCZoa72rE^)>&B*nE`@GFs*;hjoRUyZjWHuK z!&RMah^UG?)KzWed&#}{yl3Pe4Ta(F%kT4eX7syDLo}A3N~X~gCm8Enz2WLS)9*rZ zdm)Zi<_+THK@zKURaDNkkZ&{OLD4C&MXxZK%oelhkWlN`cGFkUJCBwICVG2U=TN2> zVWANzbj1=1-GorSSS;cjXw9BZ$?>U4;u|NP*$Fwo73MyA5kLJ3>B`cxlv;`%r9?c= z|D+OhONAY4SMxs72Q)&kpm2))l!R#xQ)XH!!Moey%F@#7qBjhlix3ysG+`OQ_-Uo- z^RQpaxGYLPO)}ANBkL@Qr{63h(9=^4o3FFz6#71m&=n2yXGK4&)08hE@nFt3znf|P z(xNAfwev?;u5gm~v%vn4v`GYd+P$;BXCk*j=sd&R5G$7!9ri_HK*b+-<#C_i*ZG!dA8NrPL500MdM2fCO2vwzc*hv&W@#u3;+wW5PkEGDbq|m~I#NfzAkNB1^QQ55%lGEe* zo%2v@B6#9?@MhFJ1&=8^07pq$Nq&OQOSGyW{71P&1)u}&p70w|7Z5=tiMa>YQ4}{- zzY1&h`T(PU16WdQFvcwX*4~{QLxpW*kzE%_ku?}qL7}!XJh28L`tb=iO+iKbqj(do zM<}eBq|WC?F;p#1t*0wdtgumEU5TR>Ah>o$%&*nSkK#E2`8WF4X_=PG*8)!kN3Eko z+k1TypDY`S2%bzv4T`UEm#QKcGzo+|IwGnvG1!cybZsG$3RXnA%acReMHopzr%i7p zPMIg-(8DNbZdm(Jn#1Y_On;c45`F@L`Vbc%qCz}1Hmy`iu)+T=bGrfgH&LW|;3D&! z4H8mkarYb}t3lk-K{10cv}I2s+1Tt5p|{*i=q2_RHgnn>N|z(f->n}J+9gx+`(6h! z|1wqXcFBE#MEu{|oKSwz+6%|!xV3i<6C|yfelLJy6NigC$_Vm0mVYpYsK_ohQnC*p z8u0j3Je@Uw>iz8ZE?#$`2&=JdYvSY?hqo^(W=wu2+Ycko$Hc?xowp3Rt_D)cG+8s8fqdkWl*r%?YB$UjfX7jiWGo2|< zS+pctAHjn|y`dO|ksT@wj1)A|yCle0n?Ho?S>^SKw7`-8Npg{k%uu36fv4m+HUMYU zHgy@!Gd0d%oR&-52@92__@e=m-(sdwZoBOWqHG?Lj#DGHaw?IDvnD&~7BB%toF$5d zR%ZmT=5b;!XS^|Kv^HOCqDV8b1H%#qhO{k0whPY4hMG8^VYDkcdORQj!7c;<;vxUBJKJ|{U(!|NwFKU!{$-P{Ek?9VNwMK?3dqKN$AGslLlQGbY6tw)hj zkJA1|Clwy#b6Yl`=t4J5tB@+TY13>yeGSc`1eR;{(l!e$Rfr~XU{mASHA@qh1?R2{ z7MdmV1-^ocUgi#=?QsVABuBW;&++UQIR^J0<`83UGzw+^ET^(D-f!tV26(IoX0D$- zEmWn`gll4z?C?>`l`n;jaqNcpc7BK*%DB3YIXJA+kmE!4xn-eJc8Fbw@{Ks*LkVTC zvMb*|c`7a(mo+_H>+`GXYRJ*)*Pi6MVo>tun~vbOv1F@BTk*F|*6NzxM7m8a`mZD_FA5?bHno-rZb!e9z|aPMRvFcjQhBVMDCs0i(eEw)5OxNrB!aq z^!GOrn5}iDS=wXL4K5^~Uk1j#L{IQ9tgA^1&q@!732CFmPsG&W&<+7U`(2V>5xJcK zp!e&v5rMQ`=9?*2kC%mZ(k9dmPYzazMbpIWh}D8mT1}B2rh0 zk=6AHq=ZRYG_iv~xO)G}AXHb1lHjI@bO-=$b5st3NGn#Mc9M(a8ivr={74L;`UylQ zf%-)O>A}Yomm$2bM5n<5v;ifop8(9^>m$!XfcZD#gHoH=u)I3Vq1|zK@|0p42EFuKs$ZZJEE8w}zO%@qWbWJy<-By=;yz1Rm2k%f zVRdtyaO$=lmFle$mhZn6iq0$QnN6w;8Pe??FNhHB$(?K{(*1&@j_s< z!_x;IG=;3|c|X?Aw=QCIZ%?RCU6>XImfVW4E!&_>x(32Dif56d=XZwsKGP_!^QhwJ zAAj`MEd!pn`mE|8OLCHYcKFn#@T>pNNKoX}uzg#SfH2$7>iZBVdI6jE3n_VHSX+^uYe&(P3Ji*8K$qUaW$r zC41~?dagjCjhz!io^ig85+%K6cL(t$|56)`l;)D6-kwXKIoHYjYGG%xuVmRLl7Vy) zPZJn+Zq?)R2f16tqy@rrxtChv76h@o9pN6x&FKz&0J+p8KHbRE@NT91;+SrU>%U}# zS+1#8jUsyZcqs;tT6q8A(_!BQb*f3^(RwoAq$BjzUNrAJA{&Y1K5(&nz9&93vx zuRh8Z%spetj|Z0VyBB*xqJ1TVMt$P z=&}yAB0iboq0(1V6x&r4&`Y9EQ<-<^_gk!=Bt|qhgE?bil_pcdL!+s-@UqcGrtF7W zlPPSfO@FL3`8i?xeM^02)aD^`Vuv(C1^59cKA&*?hL6L4)1g|%ZlWD~`Vd?P-rmZz z=x$UC&b~wXcZk=&;s!Ks<0fYu3Qp6dEi3f_@g8!Ns4Ba8_5i*P|KeOufC9(reaZ$l(^{EE9r|W@#_xqQuw@u=rQF)WS)J|qC^!6FelU43izy16( zW?NZ5Hi4Y^d0Gf8@TdHxlC?IeDDKVCY*|o{hl*6b6qvag4!kegJLz2opmN~ zMp4fp|0I^h6w9h1JQ*M1c;7-SFk83yFk1mkYbvO+t$q|-KIoz5dMH<{2YR=t_{hOC zVGffwAB~ir{5y9tfaXtlwnOpk^Bk|9&F2zX4gB>dSwe}nBH`#xOaHD=Q>jMm18b7NV$32_Zx-RIzUI%kz zO##6TmY0!s-h#&uc7#dS7X)l1f_GNzm^D-E-9o}rn)bw{ie(W|@37|q7a!2;$opW! ztLO8{-Q*uEzm(jgYT5JlluiV0i)_U)TkWwjXgimt`Pdp* zxZ}#-P2r7|gdaO}!bwLZ*@pmrZ!GN1K6fmksVdIgP+>^-N%l~r`o~vSCV;Om!4OT9 zB4Tws2oJ{PZT!e~JjHikB?0Yp#Pz&O3c~00f8dyG)ubfW*C!^`O*fbt8o{o+7VTHK>rzuo#4XQ85gW5>m-(4{Efz3R$Ba4i zJSsIdu1U*&V>)wvn-)r%XFZ%bD~w2tWGos~2iU#?HEugee=G5r2b91(Fpi{qRn+~U za?!JP;n-LAgPVnqf2P)lEp<+dnBp7Mold!5gKtDx;3jtxwcl#7%~q?kHR7ylva_3g z6Jl~V#ZhcwC`5cR$$R-sbrsR5PDyk~6*?peQiw_gIHb z0W`YjCx*Z=-&^IdiBSNwYQ%hPOw81-m#-8(sydhXM`L1vX6=`i-&!-)#mWCBTmM~L z^6Sc-ZI1m8yWkrW<15+aEcGe2JlCW0=9#_CJe&6w^$G#p@xQ21*eGfczVEr=%6J;J z@A{l*e$vj~?DzS$K~K4ppL_eWUs7iG{$R}o=)Y$UPyJGynwq%y{(>YHw+Sq6p%ziQ zGuzrSVY_rw8>v(HcHgeYcs|Ex{?;2s;zK|G#R)h*@6lqrh8_-R=_8bpCB{5is#gt% zly$BmWgY{11S~msi&p+(5eC^xWPi!5DEWG!{3@X&C9rGuDmT(n;_HFw?E+QX8Mmkm zA1c9uaPdw^LDs0i8IED|>C?IK_XtAZZ7B~AFE>5Q?#YG(DkK5|Bm!S(lY=fJgy41n z!=|M%m#EBt=1F3g#}p)Dq&8RJ4WU_KH*;^9S#%rb`bZ!N|o{-&n`Rp|esuUyhD$ z2H!6s>Gq-1Lq=~MsSv>t($QIa>U5a_b3VElrQH~u+beCWri|}+vlDSpmPrAS@X1Ec zY{~|1l4<8#QI5?I03m7sWnGR1{9Zvn2AJ7+i4MRZNJCe)sHmq8m`#hDrnUlF)e{X+ zVw?oTjS#7Txsy7nBO$%aMiV3Vb=6cf(`L}bKteEVMM!p(Qz>EAgy868pZmJe*%+(> z>z!Y4Pt?9psVnrNN8gihrQDuJ_5`l;w~g2+z4auEa3-82*74)yLvcmg{?`^Cwcbb6^a|Wc9rFHA3lcV! z+DD)FP2bnUmwy13qA1SlRl180v#?=rmXph7f+Dyct=G_4d^WI!MSh?C-&=tA^SJI_pcfb z=(ZpcizTrajwK`jz(dVGg`VW82{awmH|tE#k%648Daaw5)XuK{pl$-ormP8%0R5%S7ys2 z`ly(E84HC@CClu*cJ6LZm^|AQy+#XzN=DUg{8+^O1Vvg;JE? z1s3o)Aj668(!5>hU)7003ZKwDVK-yLj4pDTxocW#)C~2ze&A*5*t6$L)eb^Xkt|Zh znuv;C@Fc|9OD>l(B!vN9e-n7#xtwrUr8846MzL+MilEE=@5Ufqns39Z3>qX zhKB(R1qZ&6KR86e2YciuEsRsDlV$RLDYYXiBgM*tIxyHuf=QZVSNA27(!ma6FE5EExk%Sw{o3tw^Q^Uzu z`BxU_Q@%xtrAd?eFW`1utga@0}hQ?-^X{CQy%B4?7TbD;B`1?DcQ*=|5Nsrga0=4 zV)d0WFrL(5li(5;i%M`;BHdX$T{vo69_*KPll!n6TX~0Drh!eAEbht*dQA!MiL}jz zz!Sd}ruta!?zzx28~o>t8?(3(Az<&(xE@ZmD8U7J;pxICVb?HOCN%=@YBQ|${0#uq z*VHgAcm42Pr(@=SnGk|O0A=UDy1~hkj(QV7UPn_Ow3yKBwPsfA(9U#0%TN?(0Jz9s zVbL3dKN=?$bU-SF0)sMK739f%VJP%!0I85Qj2lpJ9c=bkS7(_(D~X=EH5WVvpF2oIveKdt`{oCeNkP4N9JW zOkcUvb8~!FkWaZ!4^$DF?C-TZI$%)-@yKpYIw-ZZT>p1T<>~hjFH08DNhhw{s+8&x z8{#zP`E7yNLC>@2{zr>?lz#Ri<+Dn)ALhq?=H1{g)pt(Exu$)p3_8NODEPf*#h2|L z(FN11=Jz0?{<_<5^g>5l&IdG|Z@z!r9)W$OGRzDK$2y}at2PJD1(h|e|JwruP;}M8 zoW>vxMs7B@)?>y$SuO|h!14IOsNo0B<2bJzsAvEe|7^yIUfokxd zZ}oAgFroo8wvs1__MFMx+UV&oVOF4Y6%wR?Z|aXy0NUNR_JmU3TOfk~ob4}N)pi|R z_fSr3xrrk(hLq-m&e_9!Kx66Sc_(X99LzX#+ln>M1j6%K<-!>IaS))h0~4%TTPOdw zXKHLflR*9KgA*mCv=ly?gMZj?y!uZh_oz4O#N0!NP;j&I+lJgs)3hj3-pZrFdT|D3 zlq9OCD2mV3N~SIQu<{!_Y5MdnVyz@{V%mO{FSk1VO#XP4Bf&g~CGmjp)wOoQa=3GX z)@AAh;XME1K+;zYoDv8q2m0I8Ra5*Of)i7?YMBzAu$eVJzP@`W@V8tq?$&1bG>=(D zzc=l^=sF4VzOyzP=3M^V7X02~AZ0%2Jy%Ef59jK8C7de%W1<4_I|!VUn;Wjn&D-ca zT{uXqJ(Rawut9KXIM@WviO$#O>OWR?uKu@PqcPOY;U+x)K0MqvxA5za3dBn#;GORo zuCl2|n>Re~HQw4N-OqLhL=l;k@Rw zb{++LZ3;c)D_-^J*^fZKC;2xjePrDXg@P+BNCJ7l4wfbA4NN0cDMV17{S}QgU&T9X zae{AGI$IhEt@jtvCw-3-_F2NM3MT9k7kt!*LNIa}-V`%2;1l{3H-S~-{&|%M`W{+j zw6DZz3w=Ruyve?X1N;`OFBaSk;69Z4^b7yLeI-gPDvsn9-V6=(%k%1@QkKpa&JMjk z+7#-Xzw~jurAGI_5J^C5a`cWo(M`D`;i=k!5)Dgul+)T%tng#CPdM;3 zTauG4t%o~N=cnWR{NSwy{qHdDOH0}y#B<!jlA4Lk7pon^Q}!al&vft&siT9_AD! z8U+D12q54VaLbAEfAhfM2Y_xv;883)ibY^!1jii{z~B@IfEw7e6k!HrJ$6CTN~qbF z0X^nH>gmTcYE}@7;F}SajYibo<$#?df-uYimkeIVs-gvDilu!x1Y6pz_D z%m$9dV^ZI=^W!q!X)&Z$suCM3;}aUDC1pniRC-XCC#GZ)3LE3LW<*DaXSfskdCq9b zorVKwih#G`>bfA&B2bpC3@rv#>agoHTBE~74_j=o%qH`+vY^BXIbb}5T-3C8 z0Gj>o7cc(JUo>+dmvRG>8MTNz2NF)7L`p+LW^BT>S{DYU)r2jtc(hQD|&~obR zghYwq;Knb#qRg;-%taAiW<}|z9b-UMx-#^C)T*_(AY1oglhSif&8v)y_^lWE3v;k>*#|6qtaYrvfRrFhaF{1WbALmCg-E5-Uq zEqoqp!iYepE4Qy~`q{=s$i+PJ1O@60ut%5VcMYQYNFA}Q0KxO_bHwKZNwC5!Qz5B* z*zSQPC`MV@yUXmFxdyi)zV7^e^ReWo_9tae@h+u}vudn^R+q9uaZr~ak(Vpu!`1T3Gop8l zY_D6_=3OleUSOO_;8%l;&)HE$FrYd5a^1CR+l#Wdv6<2C#%g`EzS+MynbKA5#%)XS z7)@pPzRBXwp6td&T{irIIJa7k`e|^k4=5Y+I53w7yRnVJxjvZ_m@IQ!!|ecKCfw-q zm^U1Bsq@DmhZzFc5Z;1D{bl{#hlXI8LD%15O;6J3nE^>scY@puA=ru2foW$r#&_27 zViQ;6PM(zi7MX{-N(;0Yzam}{f}LJ0DO4T2HNjJ5GZ%!77zkHYqqFZqXTB zGO@pe=paB6?^NvqJI@j}h~J!t(d)YZmxgYzM9#>9`5Hc`3Hu@~Z$rqNf2Lj3#Q*C& zR}YS5?u}7TMBdN^FLu7~C4@{k`E!!&`5d3+MOSnJ8<(%P`A5+0Zxziif&C=7djOP( zDhO!uGCwFop%2<>n;IJmjVcNZx4+Y={nltnB$_Ga)L8nVV(iS5C$(&2sJiOO0&1X* zot8$icRzAVpn8pHkq9y)$Sq(nLTK+IcSlF*$u^5kcL6*e6Hm}O=!a;h#d~O7)CubF z#9fZ8+4&+hkr@gcumf#YAOVTeDyM9`h8Eg=gj9*Gb3@CYpL7&WznT9gtHyrewD2GC z6RT(Nu`5uQ+8f7eOkL1o*o9fqA}D$RqJlxbB&lK1)vj~V-L3k6=N^v*EQk9=Y@x!b z!r==NLg(f+9kl;`3naM)12?=}lWp;nVOX;%L15Ei2wI)sGCF;dSuw)SO)uFqJ1flO z^T#DO&x(o;skfA5B3 zM+Hi)@0;GAEoi9uXm7{(r4@xsyfu`+OtPa%7Lez)MRCGEsx)L}OytJ&`X^dRtU5vG zFC{QjDN-Z%kI)>!oU1NcP8+O|aDl(z8$+AlDC!>*GJUfeIbAG5QUm5{I%N^0qHZ;i0nLQj5hcu$K$!-djm*tX*1vSydgQKhGY#g=H3dq~~ z(s2Gqk3Jk9-;O!-D2OZ@jAYu2b0e4W+P)b~H#IeXjGo?PTTRz)DaGyQ{fn2}VapE8 ztLhb)L(~t}CV*p7I`7n=6RTJ2%=A&#{ed;Q#Oik%%udladyVA^*^DqQIunYHwB~ zW!^zVy1o-7{`zfL)VJ-&jXusPZ*$b>EfAXXX05B*b-4fFb#jtVE#qc4r=~T}PF3&1 z#ieEO6NFDXzVXZa;`99A^6$x`^HxNLee;X&{Q170Z-+&O+nN;ANp1q8q)u^%FxJ1F zqjpyASSC;Yq7vW52~j2xc!RqftzgBNEgVBD#d(YMu7m#Mbb}ZgI&R|?tW;u}%2ZM0 zN3i%si#`51Ak+NVm_|P`|#e5FJ0Qa0Q6Kz?|=qVOI?#vu4fl zN!%sXL^(P5!8mh-&Mr?5;+fR(@#_1&R^Op%U+I?4i_C}{d%9ErOwIw@M#-QFTpIJ{ zq(@uQDeq{_7yeC4&T86JCm(HB}wK+R9kn;Z487`uN$*vgXO)(63JB zI2e|>#-RvZTU-#^h=ZW+v$Z*}lev&0=`+gu47g1qNtnq*u~N6h6Gf&~v6_tviBY!llyrB}a#lG7MJlv>B(3+X#ER$Y$kFrJ1p< ze;BYg83ydr=lNSWbGByC@M~7ii&PFT_oW;)K4Oe9mA;d}$M!v4L`%LfMc6KR&C_Y4 zUx}Go{SrqF0>>RoEY~C(PaH6XLu)_(=}`Yec`{`qEJ4wE3B}CxdEt4IS6+MujR87i zhmC5>htg~>l{$DOhO>2qB`G9dD?>%n{UoV$tRhtTHF#^k?{`e(&csVwwr{GwwRy*u zordIcfaGi{7`TnUz(Rlkmt^Sn2A?>xZj&9^w_{>p)t0q~IUf7ov|j1N3VH^K%Wsum zfPR1yGlD_pq7AMnx75d6UDb~s)VG40(81i3`A6-eLENlx_1Vb8B<1IZH{0%7@X>Xn z+5FrsoNO-qI)`a8FEVilJ*Va%=1zzPu-a3d?DSY} zw#&DE@b;<^7I?GT1ZVWOs<*Gr-L+_4N{I_={W_Lp?Om0)gKeaY%WFxO zw?s)wiX63nE0RtcRhXd3sR!1Ry8n|{zzB{$LV&F_$h0^gw_J0T zqTeR+!7bxk(nu`K#WZXCKX~zI7J6cSH$-u;{PLvS{(!V9*I~bkxYi41H%iv zkTrO~IK5+GBr9Q{=0zf960*;(%{P|$<;TC;ruRzOF&geK_pXBMTlFFLT+DM%pFU7t zPaT5y&v?=@MR7-AKua9AQr|GQe&VA;ilat!>ox0u?m*`0nw$r-oPt;5j;OV$0Y~6<2G`{L>v1Sj=G0BRk`6ZJOEGoeXo8iEksw4n9gkwABK=gvtca?7Fe*wL zhSJKr}yaHeP80fr3XO@g799f-77 zYP7R{uXxsW3@E^%UMh7D%Wh=zqfH;9;4n`IAo&04>#3IO$$&~<^K=cx^pz4ZcWW>N zfnL}qd#_i|ia|KbhX)^J+yD@O|M~!sKv7VNVL96K1uKX25MT(NpwXm4rSR*dr+@L? zNeV0|FM_bc;~op*bP(+cKo3An8S!3w{#CCWQOe(&FovwI{V=T-$EaD}fPeXl-ZN?p-b`l}ejYPw|i6VW==S z1nvX7p1@#@G1^=|lZu|SQeOp{{k%VTGq0R};a|^XTl{a|c>gy^>@6V*bVBC4+1(=^ z*vXr>PkSGxuq~0dPYr`wuQAl@Ke0lc^Y9g>4_s-0DtyXI(wk~l%Jq$7|qKDNG*G_o!A;l^kbAu2wT&wlB=n@qBHzPw5%zi2A>9wj3mhqGft_S7=D+wLcS-^{TWIpWqyzNC>jYK)pNldrpZ> zkY1h>>{F7as^@&EHX85Rg6uL2INsqr!js3!F+@!55R%s-f6sZudc<+qd13JELkaU8 zD~y4@N{t9s2lqn@lJd{aM*7vjF>gb0 z!!$qDI4d=2H$VDCz|DDgqn+Z_pXr2s{UC<-alRyY=(WJ0)Td-b7{o~tWuYS9Z?E&h zkehi&^Y99&^yK>c--0Izhg)YhRW^v<3qlCq>B;FTmhUUx6Z#bG9{nE8{$y=Yu1xcR zwqm~I&Z3OWS4c8F^-R(fH;Xt?og((JtXwmMn0ya&uBvPM9)3 z+fe4G1pW)KEr+4(72ed=kJnb3&#B||r-ra()q&5@nKEK8sTYk^CLzd`7{MCS8uNcC zGS+U2p%<0VA%qgbKUW!DD4=Sg)aTYp5iYEz8fK4@Sz#ZZi-0`u?QYnsf8sHrm#`=w z@5vqi05H?|(`ghHJ!OggQXHPd30934M7Z)ddTD7KUFfnvqfkuw|ast z>Byz9T{Y(Alx8thW`o3hIvYud_JldV>^xA|;Ol^9H<2%x&p*y-m`#m8lb;BBy-R|G zu$*L1fML<(fDi`uO9&=(^qDiNltLquJS(v`0?IwYhzCi%%J}NmIAg)E7Nh$Zu}9*L zrNGpMhL0<!5kJC zhtQY;7svTJZZbd@q8Jr8sb%%Fg(sqThW{@GASh${BDsJrLF5)7Ff6Kc@+0J`5*df! z<_J#LofKlzS*LXZpq7ur;W1$4sW!BKE`QaSY>wq0-N%p3jjVbn2VoOl zm0KTJ8(#HfXzv{N`+tY0IYBXVO;PfAOzN!JyQ4ofD5jz!AkO;jyp{5~w?Vq%RR@Sj zVIx{M@2EisM|KP@`n0t!f=%5v6hJUn^{mL{&)jmTINhZ2tcXrXs9vx=I%POMq-L}$ zW7?gJiVc$~sml1%HwP$D@5*`SG3-HF;6o^QYW#+CAK4wrlxH&quhswRF#RO;H? zId<70`E~7XaowRUv)jlra$6O?FB=fFxScL4!i^f?X=mO3b`q{hr zd}mfsW=ng?chm z)@3~|ngqB_t#LO8+K@)MNABdJV<5X@n73*b&q*HX?vE61F!rw}Jy9Ux<{$Uin+@-R z|BX!Joa7w6br*g2fNe}AEa6}OPCEgn4&om2anhOGdl08PfBOhug(DMj-?35l&M3C} z!N~p_DjdtmJ!PI$C;qX}i|=&kL;(8-s;Eeg14s;~i2dGbGT}6)r=b^kS#QYoiaSu@L4QaEBB@^) zDhwFBSPZF3=|1tkFH|i>a7TPDvGM80oi6+;L+?Yvg~s%01kN?l%sldLm3HkO@1ohk zPg?>4Dxb#sPq$i!Ej3PAMjg|y5jn2>_88Cp1+gF;(epXf5wxv#1VM!@SO4uGJkZH` z@el2sW=7!nM!*bM|xOE-z7xoD@{W}dS15J9kH6`Xbe4t^QI(MW(H(G}Z+*g1t% zj<1JW2>zzvo?JbX54HU`U98<;a?z~XtSSELECn|??0W?CBfJF*LK7qGWm2Udkn^z| zd;C?ggTkt5S2I>{tDPCWpn=Z;y~58{E>rH}qLq&4Rgp4L|2To0XeDieahrdXMK(TH zD@j!)Sg~>oink2_3iN_!;vJg^3cyh#GRuJzR;q+$F zETr)a02pIgfLFDjh2sNt&;~sH%{M0L0N=54FG!GHxT|84&$A0lvDIOtr)DN6(c2|w z5~j{qu+IZ-}HR$&Sk``)(R5(qTwzA-1Pqv z%OsOMxQisFCVl&+WINq(1k{rjp5&fg0GY7mc~hp|v{1|ui=g(Z&1t%UfS6s)H!h#(ki;^KZNrTe|HHs<}2S}2T(^=XkPQB0_YPEWAi>10~S{&C9~O|(h0`P`@C>UI%ayhek% zgir${jzCO9tY-_>U3@s+T_?{{-EWJVx7K>JrWWpQIRG(0&cE2}61Re2)GM9I!o+G^ z|MpfxASSkpS;(RRcHkCOT9Z7@w0OnosM+*g?cfBti>DgPK z_@)?sf76zCXsodT+OAC#Y0?)5TJHMO?mrhVwcgB8B&jpv0Q2?M<|`!G%b-`TTSm$+ zcbBe?HI6!E$JpyI!LU0O`se6mQ!i-?g9AA!$(@`KZ$2cpaAY|*;ObvBGhm^B9{NP$ z-5CScOzp3#npashU;c{yYZrW&>4HsCG+cv0n5(9_?=S*CQ71$ zF87Uw4&;JenjCja&?%Qsu%v0dHN_HMFfsHlSlx|PV=TqNtRUxA&FXTYr?6u!+OtJx zfmXy{gxsmE%Ev^df(mBb%4xnmD_7c64= zwA^(7LG&|Xf-&mIXw9nM_alRKK;kD!mE1Fk@m#~?e?MR^{&X63hN~&5p-{EM#r)t< z{~CZcr%?2XWT7pG`lgVKT*)c#B6a==E*Sd3LP+zpbKxRhtHbD!p$UM4^B!xo_Z;qZ zHx5Ar{~r6&%XN#g3aX02`-n|_KD?UU5V!}(s|7>f!8519vQHX+S3#f^3&d~*!~_rU zHBZbCMdOc{{G1}b2sf__plm6tGt> zzWD7-#BI~#NfqA-JtA#o@Jp0QN659c;M1*N3v)aWcts?Xs|8pd`BFu`F;g$H>eOTe zl98gcvT>?M6ldnbfj-F(57Z?AA$R?{@ z1`9Z!fllyvQ>Thm^^Uq-fkz((y4?TBz5-+tHtfQ-@!daoNU3$~$10 z@4cpVe}wSL);*^rDU$VA+xp*aalYIbm$nXE-z1nOoHmWW`>^4_SumK9^dgVh*1_|` ztv6FArPVx^>Dq5HruL;9r>Xs>=sgvvOrh=rZ({&F77H)2PQ=Y;r@hA^PV#neZkicS zeWw8cqvu2dTu+F+%@36&wmp{_G7-M}0PwkNUk)c(XFVa^#WBkYa0&i8K{|(3V=V{eJORwBqj_^HwZ`WQG*1T7<>o?}6y& z2sj!c;PuDQh)5fX8OHwY3`e6p8AZ_{Cib^BB!eJ}Scim%U5#^pBJRWYRRLan@U&=u zzPI*2@fi3?%X)j%df%*!?g<&b=8Tl}7ed2P#F(2kbZ_ySIG7rL)n&`UT`fI^=Nje} zFT?AHGhT7*#^n^YA;Ul#dQ`V}2Tkk{!&%l`!;9m{Vxf$KjtlLRW`<8!eUbr+$+&v_ zO7(T`j<{;Pk_YY$1D}7v;uY!O*B9=$_^$r-6lD3D1-rE$LH$+8s9-B#>a<)b+bscs zmbm0TkiEITAF6zW>w=piC@`5ZC{%{A1=Psm9MR0cpt^6Wha#k~TuTZIfIM{`PWnB! zthwdNtKS%k=!oK+#ubd;UR`OKUB-FEz}BQ#U;Oj>9mm-@Jj%&MotdWOynFr6-?O7Y zgcr8vGG-Q-x-2gjtS|t0oA;H72QVSQQM+{ z{vu$D?LH&%WcU(U=XvriHx+M1Tjp{bch!lyue&ju1;tjp+l$z=?j}BW>)*L{1a@MD zBZe}uJm9X;2Eo4DO1``$ZJ;jpF|bZf+3efP=h!S-?NrCSCB4Pm&glNQTm0T;?>=6? zBVqleM^wv|cbG!n!M(*uZ^;>qxxtjN_|bci9agSsokj>;)m2JVLswC;x1clA&N$Ts z7df4YxiP*%>VQ*}`M^2Nvfq{$Z~ZHEiGlZBK>!mWWs%|a>V=e(-UxEJe$VDL zp3?qD3)S`EzD$=U?V3SHlvp#6y1cYBFYs3oqvOVK&9gI9(QA?DKnWG`8C-L$LPF3+_JgEqtE=;nSGPxe4x~MPzYq*WCw1Cv`*Q6aLv;q2?dVox+H!A zB-VDRow|za$KW4zOv;99Q6+|kFXemkaE0@4CIh=Mp;BlUHYpTZh2xWBc2=u0N9YO0 zTCC$rq4HtNIWq%%5gf8}^B4<|m)MrLd5(GcR{vR}?cIAz_J=IqKJsbhTXzIex!Evx zeGA^-AB|}7LgiG#P-&lGeBoFp-XBpyi5pSqt%P5P`cH)C6}S3sKw8DU`SK`kQSI7= z9y_(0dqMJk7{S7t0MFf-R7g5y6d!R51*H?_JY>~9gCuFGTh7||!)#jiqGRyjM@}-8 zaP^oEnnIIe{o$FvsywDr`Xxc_pi+5eWetggioKWfi{VAt3gv+kueD#LUWUY#Ulrs1dcQ=9(4b2 zpDG~9gh+Bh+ouzm5EU1PAWv&auO932h=%yzGsgvb$4jet_FY|wqy70`ngIEo*Lp1m*UkV|FQ#`BNtXm3ZYbqV1?6EX8U2u|D#<^u(1rghb_% zSR{6XwvJ-(|Nky!WwDT7QdX{*U;bdR|m^cA&w$IH@C{g2tUt}3OQw0#(1OQmg+jnV|0BUQXKpgyRrwe1o5 zB@?5fC_R%x>Iq@jcgK)F9nS&8lb*LHu>rU zhxERdQrHYqQ)j29%>JnbZWOV4-;)~Dn|6L_wO;sSR!385;_CU+yl;3;!hi|fIo8va znsu$aQ(Pw&Od7~(;FFhqzv|j*$DKO?RQ!~b%LmV_uGRz9a@I%(-h_fjvL0!)Gaf`h zsVmy8=Hse7$YNQ7ZgC;Ps-@u9PhPQK`R}scl#y66U`S+M52hi zJ6*5T->DGX2CH3Ju=k2WeKh@++zhj?nvgG|0#MN?G|IJ#M5??)sR!440d#>bI|D$~ z6ID;0Hd!12xjCnAj^zUSIJ_Ac3UVltprna2kig;?(N(X);Os? z&^{q6KHJ{H`W3I|HQ9JNI<`UFDO-sl=0~EazV)=nwp1?}>u}1=TPg9JA!2tbx^`2EdkLW$br_4i9r!bfyHy`ATbvk6Ag4YJj|JG>66k`YOVX651S#1Q}Y4lKN<2{uE}Xqi`_BZnAOZ-uOsP*nO!D&1-9ohQht( zO4HcqaOB)Zow~h^AbtZ(EHqBAsnM4D!9kqbPbU6TOu6y2!BDuJWD(pT6=SDx%yq{3 zN}g<5ETh{GI2%v<+vHC8%vhvcm+(=ANN7^2YBZLC0gG;sm0&CVZ{QDJ1}~sXcwqS9 zcly-*FuJj`bjGaAaPZu8$}PbV6DRVA*i+;*&zsYCh?mU3Z4ILhY-Q`-Mc$=Q)=hsN z?M1pj$nJ^F?n;_b798zN3i0v3a0bx9%D%u~**&h%-q`Oi^Z(eVofLxks%?<$uK| z8rzOv^HWuKubd2;mfjL8O1}_bAG!AcY0m;qiD_`Hmv1!4<@u}AJ_iuoS`a_MW zvRD}2KRx2I}nRDD~@5iOniiMu%{<^JKqnuX{mrMXTMdpL#7pI$}8_NIap~uD| ztN0&uR)*AMXD%S7mGya>w4qv8KI8_+z=MIIo59-LyuMJqBg6gvJ!D%N{d6^3!hDp2 zBRk`Nzkcaw=(!tTRs{ddwEN#lezEP@^4MQqHhf47i&VTj_mUu0sl*2xoxF|?UixYE z$!A+berDLdi@$p#@8;=I*3U~fw)CC9@xV6q(je{4KmM6^wy7Dm;Cqk?d$6NnC#Jmz zb3h%m1U?=^O$VnVdYw1I>ppm7Ek&ZY&b-@BWPbRZV~3u`%Jm<9%oeec=YM-UkHmQ0 zM0tSOOAi*-pI$AuQYlwXCbb=}&s#iHSeOx2UuZHUzAkhs6APCXByL=%NGz;BS)Vt) zBB?$-qA)QlJ@Mbfu1XZLaP+1%8x!j)$R0Y&6TSg0sg8}V1$Xdjpih(^Gh)#iM zgBxhX5SYESjv%QNA+&~ss5Zk8Zv+h>W?GJqA@V=lMa9QIfaWiL03EKV-Us|sIH1-5 zZ9}Dxt8tKl3$Mt6L0f|+GhG(UE#a^L#25lFYzicKGM*Kro zWJ7fN@6Rvb`hUc~wcmZVxRLT=oo>jDyvvIxk&gJbr|Rm(nV5={;>{=Qhkl{_Vj0Wu zAMQ!%R5;upkE@WIB09Inf5#wQ1!B@;RkcGL8dZ-Pn-m?6uo^Np7Y<2?5J)~EmBL8` zm)6OGLvUY)CZzhZCCzRGPu88;Y{@YcydEyJLr5a~MxIf?^0CLKqYyPzUWrf4Pu9fg ziu-kFvKVMG!HLq+WH1p19=F_W`8+wez9YIjdT~KOk~bsSJJCJG7yM(_@axveaRge6 z;DTC-?xEwH^x#~vOWHJ^O`(=rM73-|*5Wl`>Jr&>fRQ4xWOf|vIW%DFtN5t zp%tq|UL50oZG#jnTNRIyi)FWAs*H5?i<(rRkZBwrAcA{?|74HmK2V;PKp zH@h&kg^}OGnN4uBL+S@dbvne_1^QZIOV&-LHWM2!Am^cNBrcYoq|TMo^l-W&9kxhJ zgA9;{Zp7|F7z99LWIMqz^aci72Seen%-@@d#N|K-peO`f3rVFga3QTE$UZz2u^)9h zq72noeH^_K?cg6H@Hy5~2w32u92?w|O0HD12v1wtM666j>D5GS75;XL0tFHJKJ6Y3 zLMreNVzrj2)}xA666U9(_;PhVCA!U(rj@oprd(eS)Gweh+tu|QrkzO>%mX9I%2Bguk6!Z?5_~4)< zx)*_I7=ZgdRX2(TS86LPjXu6{v3}CWYOl-EFnhnN(?&5sUb@mKmG(-E0eLGCV5f{m z1A$pIrXG$H318C~2v9CbF6v&g_>A#BP0uS3R9m99&r)Y*sFWY>cVuiQxM-OM6H^S1 z*`BdcuRdF}I7YjwoK?O{8Z@Km)_}rgKWm?}_p1F(GXkZ%OIRhVwa#TlzYQsz4>YLA zoBpP-DIUPzJ+moq9m{?5e6kK%THr*U!%Dhvr& ziR0@Tu&gyai9Bw21IfBw7;$c5%`@VdHb`2DpCZXHEdm_lfU74rp&u3eGRA+U7=DQ92L&<83#@PXbOd^ z{C%|wm52h|wsXm36h%c5k6fNFIrP^K)SW$W^kjuNihh|O@zMyK)wY5SaT10}W@5B# zVy>gxn+$lRH&?Adiw%N{m zxVHwl0Y3r9j}x0pG?inBq9bVWlJT zb^?ceM)(ZL97vg~VstYUJxmV)G4*gNj@ig2@Z|4eB_UJ>6|>xEgX#YhtLUOI3iDWO zJ*0HABzepb5`wV+%?h%>X}SOo5m4Y}X=W1gcZ!1OEqu0gMOLGbLV<(kLtEu4Tcs;h zmh0N@;26JpR&e+#>JlCz>0{rE^2~b!tSA&rv^C}BgCIDnF^VWkIRst@pdUaEDE9-a zM*2lw6~X}#5`hje6;<(v3GE8U@5XK`8Disrs z13U!i#e`46Bi&zviWDdJB`vzyta%6k|1dc%b|tTFN9coC2&*5UmiVV1kZ^{QFJ)Bg z6inwp#@51d8vy|U<`Jl3T2Cszn!l-@l1(nmTWk=Xj`h%SwK02?NeIHTXyu6$L4Ktr z4z&F?a80tv+3Z&tA1vVVQ9K6#$RtPZ7B5C>DLnjN2lA(XPdaU5A@TH1FZiS)hF1yE znv5%;#|<>*5whr}nP4SplRukXcF~|_p-Oz8o(Wuy>qyX=3C{upk4)!qlx^E$SS;@H zwl+4V?`TlqS6hX`VkQg+M&lhDP-xR^gE_G{2&O>s_Uhlxe4hUJvE%j{e5r?xE}Td)0~D(A?{Tdp7A7l++YhI%cD6D-*Rf<+D<=az0$U za^+fk+`hbgE<%pP)mgwfZz*};!Gr$6JeM(&_^;~n-0Arm?Q|C^o6RxhoIxpmu={xY z3WfaW>7bieB~nIcgGw6JWNlCTYx?ZQLLE*p;!>U4fB*do_)OM*6DS2C+gCq~Q|g-{ zAK%n#c5H>&MwVluc9JiWqYjsD0weLOp0PIb$5}WvU!{{vRRlEW2hcPyVHHxPW}sO( zlSdRw+|wNXQQPRN#U^j`zEY^_U$ee8(w^;Ch7gg^3~cnsv^Dq%?Dck#D1 zx;?QBzHHT@HG0dwh8081V6UIzfn)-Y8ohFL$Cb^_Tu1SN#7MFABrM+R?XS1@A%J)@ zkGGER)jY?)a0kIOFFZ_Em3=YX8VqVkJA?k+-7H7wHiLkNj~wr(#30S>1&zK8Z$!Q&a~ z;WW59n*_>HV@w2Q3m_H9;qDOtekJ~Sfs$ZEuMiLr5RXwq$k{O_I$EX>k~lOTr~2T> z_RZ^md>U{Nu&wO=pIe9PJO@i05%A}=51R}ck(WP<C7B<>ibx=uhnC!W(K>N?-jN^1?|SMlfj@mEu)drcFsLT!)s_rv@> z-KiehBl(fih< zV5qy`(8m{oBlH>TN0PEN+@htCWPSvntPPxmA|J$&9zkBnvDJHO%akc_^baWe=@pps zZ!_3tr*sl)uBO`)E-raEF56bu{ksVeU)+RK>#FK-+x4&t6z!(azA7bjBChXqaHDUi zOv=I2K)1rP4Oa!^QZ>FpE1EbK8=g5n$``_=M8T9T|GfK~P1Kah${#r<6kPDRJfR+r zTzBaXB)Qa0TwXa}MCFah>91iJ1}RZ40JU}~H+B|)Bm~=z!F)*$v7G?+X1OE0Ds7Kt&l0F^ z@%QX^uxtVw7^yGuUt~~kYg7V0i5~y3*TZ_J#ZH;GYxHDcdtsRn=ObJ8pc#4ETPIdx z=nqtjeRm?xvj2NP|Hw!U;K_Lb0{NsILaa>|8J^q{p-K3HIIVs|L!Ios`<;}&k(WIr zF1GaEN;8=k@x8;DBwydm1uF%CsiF;^mGdctDU_wDvi~y0+7&1JF1UEZR_i4%c;X*L z3D|gcktB6gi{*;kom2j(x~B~;0NX3`Vkbg1L`QM1eFZjYWz}{3XT%JtzHg;F$mM-@}8~=&)o2xkaZ)HpwqzfOYMAW^N*KU+X0Az0bC+Z4hP6f zxEAM`1}Q|)ssX=Y7cesO2MK~Dus@D2$>?ZpECnAqP#DcY&d>X^97zB?5+W>?rG$zh z9DMXg4;fq19eKm_`g21)T7okd9)MYFe&r-$V0q_l;Zq?%QgcuSi-<5~Los{##XyXa zs-e6tGasNQiUEmpW%+qjHKGYw?)}lB6g9Ug=Zcq_Zd5~&GXa1B$NpV=f<)vu4_41@ z1rXZAmFPr(#~6k(rHXWgG0B(v>83^u?&J_05LUUZsb~pEEb~tGN)lrUHmgX2Qp5^o zeg%Fy1bCY4m4TcnaM}UUU(h;!b0#}*YugUQEktjtC@K;B>P?ksQNh>L%RvYnsw-SA zWguyisJVI)*dWXjGv9@~w5{z&12@^%%ey;lg z02YF>X=0Hfvv6F0QMm=W5r*K#Dt5OLM}*4x+e-1xE|--;O`A3%3_L6UvC?$|OWx@q@3mXq|2 zl$j1Gq5~5&oWEphI|>EFLx4N)%Xjbmb2zQyK3Y-U4}O;4EsBEG4PuJjj4XGPf<~`` z0OBGn+aZ2ZjNl5QPA<+6dSP=`7q7lw47xXR#j3<{pfYJ8RUY~CPV0N7j%sODk=qN} z&h?ZvgWd07KcZI*3jNM&j@NQ*XFx}$Q~)P`JKU!~IP7^xloRH3GHzzk!b9Pxk9YI2 zm8tP@tSEklrE676T&OEgrjTF6527e74g4)Shob!|B$G|xAJX-}k)FgFat3UxAV##s zLWBig!2~kD2+}#p#_#u*8cka)bX%^feMZqDYId&3ZhSgB>EHCMePjK-cEgZCcsM_( z-GR+sv*^bX_gBULg^1>+QjodRyHx)NagT?$2lw|Rb&s-G=l>`MLecvL|BR$f4gTnPlLm(IT+RS6t%{4X6(gCaj zMYjaVk7+Z)rU$Toq)mCzm;%K%!)<`U0~ml=dVpA0JfjdZF9_BW(}ZWh?kVj&12Ubt zoUQzs7}bfIxc_^Mp&X2b8^ZEbnfo#u)_#nI%NzQ{(6;w~{RI>=gFK6Z!wbBY(oqiW zpvi!+*$mz)d{pFI-5c=l=E`uJZlqYTuD=!F!^thTIB@&2Dd@qC>Dni*K*v(~q~3m} z_101R_?Qn`viElOlsT-GJ(YoY${Hfs@uvz zLsYv0oIT@d+BHqKe_w)W{Kn!Qx4a#hqz2^scxOkn_T<9tOfGLk1b)F}iocCytu9RV zgD+o~HeI;`04kkX!Cnpm|F`g);+g~&)LPW2l#o#9FVRWFI`Ohs4(G~-`U+k*yrpp7 zV2SHXhIelE&hQGz3`(r82jBOX%%5L^vEP8@)ttCzo7B|UVB)@_ybz-zBpHq=W!3fq zdnW<4a}vW;p91$w$7EzV7kYa-2plMTJ~|%#-uLC??C4aRq>YYU0fCeJ(s%mX1TH@&a|`!wv9V=iX{hi)*u1t$OpT4|qfmypJ1B*pD2<8b zAsKo}X>n?xcw+l>mPqF9e>mKhb~Hs1x`gGc`Gr?T=ux^PH^7i+#dM#?7OE- z%Zs-Kce1(dT{PySAVR1#y|h+B4T~3h(6Z$*$sS)80%)R#siDE-o#M4IN(fH~{kQ4i z=ikCS{6tvM_dtKKzJwCq8}8HVQzmvGcR$zTlFZC9xjQOoU=H2JRook@CLZ2R@(PUq z7xS@~)QQ6X@4?X38k_ciUMqJ(i4OkakWfaUD}*$LxK+7NZ%N2W@pd`kLhFn36YGOF zdhH|WQa2r9i8k$bW^Jp`glb(OR(VvAV#}qdMm)S&3;>lT=)JL40zWDZCu0ybX#$pZ z?8uySdn#6L2{OFFj3JAy@~FmPNlKcwP))~6`7-`4U^!4l>x+8;keZo{VXR^%EsC_t zLhkuLR_MrbEDvX*(o7WN2`0ppi(`}Uayl{c%#Y#}7Egni^vAOqIWs0@VtH5_=H|&6 ztQ~b(4PX;u85zyA>q(0u=$=1R^2k}VjE=@Ly)TJ2VmTi(Jp8bD+Qwkb4!lgolte3B zIHf6>jJeyBA|m$?9mG)>T!?>?kBmg^;WFeH?_eLiQjbM0s0pm*ABb{n*zagq)Y&)QX-gyLr>#}x6K#Tvq}~ygGdI{Wg6o$2TX6Ri6&Md`E6z)g1lOjJ93rMS#p?+4 z{*#VGXUwuOMALLA5Ozu}>IuPkHR-ndACz>aE(Psaiye2_GdWOJEU$V(TKg#P_po{0jFuMRY+5$rxK zG~W7T{13}Y<=)1U$W9PESC)nYfLw}4A6(lMQYx#bYn31xZXRD89< zDh+G)ZEZUcOB@qA#&1>O_9#L#NVsP#@OLy-Iiym~(UqgCdA;>XhK)^~37wX>qHAhH zg01DsZ!oe9Pk+2jT#^RQ3eFyLSSWXClO46{ob=X=M;TO?Ks|S1Acjb|oGZJ@{2T@5 zu)HcR;EO+2Bb!LUdv$8Gn3aE{LJo9ckhw?})MA7jgX2=h+1N6K>Wy(?3h8F`oa6Z` zYOq^0(Qls*cQNKbZGr&hfDa?~l#3Of^oJyImV~w%=JzL_LwKR^O};C$+~ua|b*vNo zQXupn79ao2Q0ukf`@NpR5Bzqb6~DawcXlB>oFM&!TJJPL28z+>&QM(Q8?B8%r@Z;d z@=D*fk~nE>;w)DoV>ppHAF^e%p~!*g`pT+{tQ{Fx;8I=(z#lV!?p#d-YTvMF-0%8N zHhh?I`>JTH$Hp=FSNfKS=@p~*z2BDsS8aFSF>{9DetS8l+=|kj`I1mBiK^(di9=+c z=N*S&YeEI#rFgOMtRX~}XKW|1F5&Cx zDDCNz44x6r_uyw?;0T)_7iUw`^}fFA^wUmG`6B1QAQS91L#M|xUGD=~elgv$IBv45 zOEh@Cy5l(YHC)@ty%ob9*BFJyYiBnlZt8P0+8N#Z7NgHzGpdc(f3wrX+<++up6plf z8@V=PZ>Eo;QukDF8_`o$6?>@sHa~77pSnjF6LYJ)eEdC?dc09}3;ex;;JJy|0TP^8 zREzz9}3$!&@nhF~Mx2!|SqY@`FUsN2Lx>q_yTg`be6>mCVi zr-%nYe~Me0N1p3jiAWoNr(Hz9d6%6p;;c;LFH&ceXrKz})P>6ILj5ruV0t zH=u?!`+uM9&Lcas<@-8sMI4tdfa~Nb;F~acnL=gwF7{-lDIl9jRGni`+pz&zNv0sw zPC3q~ph~e4laoscB2!{w1cKLBulD^sAEVyRc7IXK2k==`P-+=Y!eGyK@#^Z*5{nD{ zA_mv@uesXwoM>Oms`lMtGa3P$>I}RyWZW(e%4J>_P1WMRm37BoNrgI@)z5^4}(gcc7yE zU%4T{n3#%XkBz=hdQs&&KuVNwLBcQBs;P-7J|Kf8eEL-juw=% zEhAb8%{|kd#;w_8CX|*w))_tPE1LzVRmsS=cewhrKL*#_UbQbgt*0wpw2entTSC;} zhrJA6<&kp$4dK8osj}(SI~LG`*nkQxo+b+{P4O9-uoNvm-n4b)n{CbA?5b9M<4&xR z&+j5k(2v?JNRw&nGvbPa{qx@#mMz^4_e3y6#-rmAC^4gN9|9>0p&p))-rRL%XyOX6 z9J(P(0pxZ5-zQ*P=N`%OJUghiRJ3=_6y$gdK-Ub?5N>sBbsb z<5`y2ZhGX<*-1Bi_4m4*TQnFz21y~KLU!0fDyx5T+w{Z71}Kyj84+(UTKC-u$SjQE z5TPY5XGPA^8Vxkecf*bHGbM&9J^+R1}vY~Zqe#;?8e;c{sZeY)rT)dGyn-;wOa7gojWVx;T#0Z zHmNJbIveJ-mib|T9=QPM_3K|8FDF)5Y@lG$%Z1q?tJI0VyIjT8>2onnYQXTIa#kJy zs!ApXqy!)X3OW4C5fTW2=IUqo#UZ3a!ZZLBQhE-UbxSarGQ|Xt$GmL-SUE)Al=3VA zoz4s)D2n8^VI*XE;CT)zl^jk4$$*Ma7xX!inz(g=rKOfqJ{LA2%`S_x7)S83ZY3rB zCC}@~dad^3o*0%Q`xX~~n^r`jFwpVL7B-N?NKO+09vvb_Yqwl9&Tkv4Ue1gRL%;;l z;z&b|*3J&B8v65;70S16%rjz$Q9=rgC`_;>M;9Z@&o<(A@|NlF)PZuRZ-tvs21VnJ ziSV4GJm?e^Zv%9>akrYx-f^t^pnU940$ibr8sOEt*JeKo4Nx&F|B8MmLn%{UR%)X0 zT>O(MS|4G+FAvfsQs)vwWV(27C%SxT%H4FeuP%H%8P?R?MykN(Jv6^GYsiwU=cECF_e~~khj*%~W>6Wq}c*!bwd7;-#g4-|z+3<{IBBd&4>v^LFO5!Tm3SFMS{LaD(Aqei5?`w&Cg8ihmY%f~9cY zQ)9=BhGorJZvDPJJEuWu=+!bi?#DSyO6S^rR~j9k@%{Ap<}Ef+#L~?pzDMnF_WIi( zCJu(MdBQ}yBnq58aMHH<))(?uayhzq)^zK6`pwt~CkO+69i*H2?BsseS$%1k{eovl z7MpBB6tNBf2IZi^CmaUZJPI-}%F;22Wa6Xzut->M4F*BxOsTkj_6}xT(4P4fjn{6( zS{$H8MJ0&vjILs}T_OcFeqAFH=%i)&XB(yV=-*yF6GGoc^ud^yw~I!_`NTRxIPjqc z5PiW%)k<@t+TxEJ{aL)Thq7G|vS>~lj_5R3Nx3)3CLJ`PsQEArh}%A;QF#(^V_rX{ zFp!4Zi70>|8R$ZzBPoXlBWs`(;h>fX#NzvLk&X3!tHpFPM{M9r7F-U#lmPC8K2*+i zL|_HufA|AG*zU<5`2(EX11{}+<>Mo6@3BBE@<*@aTcCf83`S*)j1T6%0*!V#HwwNw zyMKN@%gWrpPTyN*a{Vj${Q|NKo_-hid}0de83gd~Ml!Yc!Q-w&xjNiZ;$kbJ6Nt9Z1XpM_iM~m`mO0F8*Nm{W!(^CNpMdml2yJM`y~flK zXQ#+Wr^E=S-GDP&f{(v@+c^~sp$;e7hye8TOiLZJrN09R!w&HQR5eW>Ei_H$Wb&09 zJ%E?r$MM4$1cBiB@4gQI|6`YngC%lqWMnV^2aA_2o2nz#JJrzs&gsF($UDa=r?<_BZUtDgI0V~5tzeU)`j=hH~_P0 zE>7+LNUR(|kElf3))jDx=68l@R0)d`4_v6Ma=za* zMTNn7p9?elMz13>)$7Z0W?l1Z?m+M^A$h$Tf)+!;y<>r6lZ9SRE%#>Ke8DQ)wmWLg z7W+DAXdTcB?||MofY{Cpmz1p#tEgI`&MiYD%e3KdM5?#8HAi+N2mLq#jkFPtyfhY! z98tX?@P@aVk8<%moASaWr~JZg>dHCvYPo)!@f; z*OV6UAZ+k=*Xahiu*O<8x_;|dd8c#-mXF@(gmQ8mOOd7d(2nB;0fx&EH>`uGhMW_H zO>K>hNm9VEC|m){2;7Kagi#XWh6on*|039>z%I>Lnq zXcl*YtPaU0x;yNXsud+6w?>Uhk53cA)Qd1W#o2iq8^ikA5zlb53^(mwk zv1GwxN2l0;z}R<>Nov4d$7Bl%B-vd}7d?Dhs6*TGvnl@a1G_$$)c`pbfazo2J%ZOA zm6G0}VD&2Woh+GbF*-#u#kYhzxSZ%#hQ@vp9{jZ_Bt3X2T2mGw3HzDujn6;>EG<-r zXj6S7lgBv0Hy3qsw_5#4|D=-Ej-HO4JjfP54kB3F&<(O)UR)d z*us^OL3)C$tZ9un4LkkeSnWsc$I6f8x0|FI#!P2j9DF_>r&Pnpg;Gt zOQ;xZk5u_?=AL4fd#u|HP5SJ{di9XdsB!45k^}0#agP^%1|b)9X?y|OwB9D4@@j}w zT}?He7TETr5TW+|Nq!%TC2TIn<18@d2&)R7Q2gV0@zifFI?Qz?jNq0JuMN| zxL!SAyGH)7{`aZp;lwllHJtw9Wi$$)=VHAsoS2lA_h;dI-%7}xO2v(U1CXiwP*Kx;FcVNiHF&%3)#5zgb&;V*7VU$X2JiT?KO@}3Jq6X8a8U$o zK!O&2Y9aRm#V&YIaqV~h`ZRl~M<$+??OJ$RVlaDKbd zH6=`82Y{PugL#Y~F{TAy5l&0%ZY{SgFDqZTgs zDmxrQHGq$AR!~0l+kC=#@`?;~Rd`|uK|vDT4yP z>YePJlv|A>8R(r>j!v5jp(4K2?K&WdBMH+QEczo>kbh{@$ zYc|3(-x5<7$D$<{T6i}gLimv(%GkaP?0kvMnv@M@V7&>`gfGpp&?O# zE+WZk`w)TG0b-_MRZnB|zYn09!52>_LTu9z+3-&f%GoN8ZjJzwvIw`>k)exS1yfuE z3lRM~tj<&Sbsbk}@&!Q*9K6ZDsL}~;JnhEwY-08S8F`vem%TPHXsGJ+5x|^8dPyjl zjCYkCGu^Z7UZZd$Bo#);6#6QQX{i)a;*p9e`}#6K5hUeMYO%Ka>3t>=YCz-)B4!Z{ zK?SArYLg38LL_xTF(3g9E_0O#RdJqE5GT}xNbHM5Sm^tU5*VG36aZusnK*5v2}u)t z0EKMDJRr&62nMk*YucCekw}46DI}&eksK46x(Kkf=G@@<_yE~lTL97JNWSsF=Y?CJ zg({-35lLFir_MouuT%w6B@#7YggK8)=-nj-;0i6NB9iFB2#1Iov*v#nAz(;cHVg?D z+_tE$3i3mQ2tLA56O5mz&$dy)?1!OK? zF2-syYc-TL8>YfWe=`?IzErF7O&NXGZf@!nHnIyiF;f+N!|mpbvxh~k^p#IM9Ti#BvV#@5(4OX83Tf_K`ZFMw3A;Uy8r&^L^eo+A>z+f3ChWE0 zdy!siqlHHn>g_S?GUjA-OyYNySsWotj4&O#L@4BEa-_0w0qK#ibQVuo-0@0HQN=6w zjnM1QPc`4POUEBMwEU4^g5=)U+poWV3ga&^)=7dILh@a;`NrhjRo48ZY7s~#vltp@ z6{ce9ilVY~)|I}hECq^QvWO8Y=Ly7upK#m(|GnMf_?(weXPCoB+Q8b1ID0R(#Ey=4X~H zQ6%;xz0<56p;0G3ovWYU+XNvXwx?Z*eh`&n1f7FzyUAQSS z-2S!(TLkZ4Ck}zsrW&f=RW41G_t6Zf^xN^$`GcIukC#ten=w;SwT=NGUV(xi5TwO5 zTZ3u@!8-PT_iznfEQQqlm!l*>aP=%*P?f+}>a`;CDfqo`*Y*{QH)N%2LeQ#(t8$R} znGll#3lsBjcb4+;Q4qlDy_lNsafqC53?HxDP=m|T7;zXRl&uW`6#n1PDF*kUpOZP# z-*S}A-)-dEMS2G4W`-YtJhQ8dGoj92wO6EUn!rg!I(vd&PY+M}gzIN}|K7J3mq;xD zUVUR05OiCV(~X45Rd6Waw9b$P@DQzS78*Vzg@pLR ze$>Q0dJyB!=->G;-`A`JkP8j;$s#-|zfcM?wfE1OBd7&=?czjF|Fj**EzsNv+rM#a zg6bd;i(uMtpW-rY@I->dv=T%&DSg4-O+Z{Ldzs2+F1xG znkIs=EJhLF11KAyISADzc;O~9iM3M!>!uf7;h=zu7X0wC-J(R6&jO=e)MQ?fwkDAF)MDVlRg+2M!*Cf$CR zed>JSFwTy#vIN`6u(mXY8RPI*fyro}ZFz&aj=6}1a~#&{U2t1?GgTF~kzTH)HWV0S z0)i4^!)hGV!2+n`HZ)ndJFHR;r+nzu`u=a#F0QSuuEF1-2ptY0YH-E=V$SvK*RK~{ z%#F+v9xPc1{V*&#pV4*>{PUV+Ld*TpIh*A?lq?mGTZ;;1wJ1A_N>7 zmqtq=hy_NkDRg?%lpH@* z1lfdPDM7~f-Ib9r9cQg|+US2zd ze9Ev@dQD@e&G=U+&Iy-?uU>KyYcmS-Slx39wc3VAfeX(46+N!S$CE;**tv08V03lv zR;@NAXr&Fq^M3Le0Y`Q&TxN>;l6yp}Ep{0||^sGoq@O+1SUfLDduiQRcgt^zwS!pKj&i`l3cR z>&A^%>iWUCHRz?O*tGOtZgxh)kD8HDt6MlbUjH*Kjm#FM*|^33S_!hKlfBE2bNe+u zW=^;pIW7HierEjp)FKMXpk12VuW_;NCGXwJ2i?gblS_sgEUwl~o2=8Fe&rKf|5Tt* z09_Js6FVz<|Dzx#_5lqgo&cclY>DkQcsrOGl<7!K@-c3qYBMDTT?(`nEce!-Mz@CU z3i3%ueazFL8&m6_G3ARI>-r}X1ZA$)rJ<6&qfi-(eT=9-Cj=Js*9B(s znmaSIs245Z-wddr9YA?;5SZQ!v&q6)6kt~XBG{4amk*0q08m7MG6avo1cjCbfNdd| z>j8jqIs~SLLSW`B5Vbl(S)6i%01vV_R8|trWxMm_FRcWpFvocvsBjAA`EmAmu;!ZX zFj<$N^wnPel*Z)b(9g~ua(%m2bol|9{rZ&L*5B2i?Tdifg}>Tb-7Wi2 zZQRU6kr$0Q3UzCSRwLG`ziGV{GVKi?StG_EHJ1_l99%eL}aXQuHE097+S^8UE*@YhU~omz?9dKvng0FE{&b8=b|ATMO| zPDCh#Z+3a_nsD9Jv+L2vF}`=AkF_WCELk|g5J6G;{44OcLyu0_00EoL)AAs;y-3$@ zFhK3?zqA@DdVR3w+M1pTDdcymXU_>u{8F@^M*Ei!>mlXNQ_fSHzc4$UM*XLkM$Frh zO!wK@_)LyKY}rs5R>`j1h$`7MKLZV*cNf$HFN4h{z1NM#EjoI{Hecm>oMQi zQ23K9qghBrm1pGdT=Qo@W_Z^5dwy95VLof`2)**X;y}05-U@P>dKz1l;Lt z&@bBng;zkYL^+M?fK}lnLOUa9#WJKs@Wq74L;=QES;9cuoIk&2;H)xu^h_W$AJW=J0DWyg{`nt^eG-<^>9aO+5|;h{L?;Lx3z zCfC`2wu8h~P<9wnTI`%gd^=mkAy7_+euYsS;6@>+q7A8$lvx5D(rb!KaHZo9E-u=3 z!^V;BRF$$JFwUe4bmZ*;HiEbk+0#BhTMWu=tnrN4OwX8tl8Mgk3z^*hw<*Oo_l z_-J_cV(4+CVmI%)m#q~*R^IIBAvq~Gc9RJ_t?4t7GTV3TPr=?nr;!3E&eRc7l0)b>0U3EjD z@zY5Ke->{ucmPZPOws(h+mc>hk*JvT)wQe_QHHDVaY6T~nT>vz5zHa~rK71s&d(nD zHD*#RcM6N)Il@AtvW52|MtEevHt%dog2GSpN8`7j#piYJ_{rb6dSn zjU*gimY3r4Uh$Rh{R`Nq1R2UgF?AdPPJ>lZ!YyYsW7ZEWS3!w`39wwNNPu(8I&C6I zV5jBtq{1S}(SD%#(^2~HT7|DX3M?o_JW;xEt0~+!tn7TjBWmROh*;0n-Xq26GO(`H z2z(>`VB%-z8a!%C=Rt76^|&-#7=XDIYsYB>UGb5P?h2KI`v&iRl5cna)gv`5xFYRo zy9vOu@aDEz{yU}AnxVY97b-9@Q5C$? z;zzIpymZd&&NM?%aX;N0B4vJxn5G1}`# z-htx3jj;Dt4!u6Rs^3oZWYtP^-(j+K7fWNGN5y!BD>l_m3qK2?m+HGFw+AX=KXaxg zOLfyWia0jj4)UHlCKoy#OrvV-m9eJ9+aFYXmc!fJTAlakzuD07zvwj*V#IG3s)%bV zD`1gHi26pAn5m@k7yRDE1sN`0rhP0R=_YTLS&>=#h?AqIa=kP$kuemP5G-$L*LCFe zc4xZ@J7_}1#{ewM3_uX!8-DJmx1c1UY?~eArf$y!uvvX7M}yJKvda8Fz0l?bY6A^e zjCqky%*)R^mFsd)ZpB4}7`3P_Eidm>nrXu&O$?5;(0XXjaLhuNh0a!`i_&OWn2nYP z*;YYtR6$ax3vheuYRv(?J-IIW%1S+G3PLJq312``XMmA#fDKb1P-gkfLy-c$bZqcu z$|SLHJ9yHC;iof_J#>J{u9bk+s*`K>P0vIHbq4RMdSbM=&sk0u+#JU#ge={KvYW(| zM4BU;Oib!ie~D7!;FBDz=gy4T*p={h%m&5p-UL|U$v6TPH%B~{dA2`M;_|RM5V7pj zl9G7XA0{J{zN0elGZ+`0E;5LpR|tiZKPMU-z_`z(zGEk8?bta?uqggTxC$F=iU}I$?f7 zePYrgAognyb%E*s!L+n@i-hwgTf`0aOCI$coV-8pYKxY;>?zFy^XBE7zik+u>*Jxt zHv6EReC#tg0p`OXh(i!S*H;o>dI>?&yf$!b2&^W{{vlOC;0A#-qf9lD$f2{o!$M$J zUF#A;TAbMJ4M7IN%whAIYVg9ke3G!HmO^v@$REOHisfVw1dQYU9E(PfUWStt-5L^S z4;Zy|1bR1ykKI9#3^N}B2n`UnX9m;|pGp9DV{P#1h|UcIp#maQ4~-?n`lDF#aBf>o z0JD>Tz=A9~LlqRXq)e}%Wlu}+c*LWDIbQtmgjNVjPzXL@QM}NcfdT=;1|TU4@Yh?v zGi(Kz3SL%n0*&d#L_*NZHpvH&9EUL1jfqkcJ;2SyHGhThL{6QQ25|$Wo8ibX$G5!H zbzg5I46lx8w0R&^A?HP+I;p*w_DAlIdPNUJ<1DQ=>*Xbjz-G|D0xekP2}8HEy*T&& z`aL~(d#=o~=Ino|j^ZA1IGu?^YPL&xP^2{Iy*wyb&~+CZrJs56-Q_LX!G9>4{;DB5 z@i-2#W4TARe8B$a2+PtPHq3i)M6Tp`BJMLGxcpqxbLEnI6zXSxk$ayiGR59Aer9$pJbC<`!k2Vo zT!Pe9SrL1P=M-{mJCnbg!r9v&i7MVt6tCSPT0T$ImqyU^FBdHva%3O4&K3q*t&W~9 zoi4iDRca|%>#!(6SYMYF=?nuQocS2bBOf*lONv&aQ8i?ZSrPSh2|`mv^)d{eGGEC& zKD>|NS5IX@$jID1oB0btEp|^q=^tjXxCdAanauv3=m7B!80Mnr(^!CIqX3K zqUpqmZg%&{lM8dZI%!dT&u0>qAC2%w4oEpO-fny+mt0&z&A;y}Y3k-Qf-kq3EV?tD zb$-{R=62C9-m+lC@ERY4P%5L@LfjfI^Anw-zI(D8EtINQb`UOCkn7GhI~fOf-g87A zbV}({Mx1A_okwrCRKphBtyj^EGt+YF+S}_On{)XA-N>ayRKzBg*435jJae;r)R5WF zBDFm;%hz2o`7Tlw#r|NszueYC<4!v$UP|-Q!)nv=@A@eUyiPm2Z~)+F2h!WmYcbL> z-0f;h?00clFBUI$sg^@>!uer48|l36xBxO5hxxmFLcVIHi75n_dURh}`I^08^VrMP zZHL0hEjaNw8C-y0B8#PJ2@~6dA`X{(SFBjJzvG#Zn<}eWC1s{o?`4>Zy{9r{hN68O zCds)2l*&NNV2nlV&~yh?^y3fIO`5iSH-&7Bp@<_#m5lm z|7rEbdKz{f?8lkTxevn1e>*r#lulc@^R12NT337C+=W;=^qe=0lzes(i{k77CIPrC zxU>b1Hn6lb0zY**P7LhffqO;xS~^VWF`B*mXenQJ`-<%q)C3-M(4`{5-IJV2~C!#AFKPG44j=#ID{mkNVW}md541zm@=2M+*MF zu}LphiXaVZx=#J_-(iiu*gs%#;zZxO7^8G7x-7Za(}bo-Qmz+xp&*LEU(*PHRIo(K zbgH525({eK^LhJqt1;B0t>4za#)&DVyJ8>^)2|2=80t_N3oRn4cTVSEWtYc5n>Nd> z5pAcSFn?i9TH*Y8g)kHYNCs=ar$=}7@~KT{K|XNbdh~4wS(K>}iSHFXOoM0*A^GON z&2)tF&oTg3b3waK}W=Jh>Q>Hv%zj`>N0{YzJZy35w%%V zI6)*qg%i%86B7Xu1j?;vXdz7zVRvnyzz70DX#)7~Ur^UrL+2HtWp<}x3agVzDZs2h zYEl-x=BfAswU!Z4Rr}H|7O}q;eEfLzs`t@T^5a1Z<~k8{{8VwTyrFDsQvV{?MY5!U zt!~qu2B4o~fZd_=y#(3be{@+9VA+5u54`N8O;6#YHC|H(=p<%Emzsp_4KXjv(jfQr!S_lt5jR*>#6Lk z)~z&*c+TdlY}#V_^6zd2?a98C&9*!531L`;_jLeAj=C$PT)K7;ypeX8S05Ax> z{)>nv%&qOAS5))ZDhegd`^(~{l96_k?>(gA-W97@<@JMNknecHkwL~{8jf7-^*W9A zT?!O`eH*@VHC6)tN=Uz#x%2v9za|d=gxAN@zSP8%Lz_g7god+SaZe&mgwl))DcH@I8 z9B-wl-AC}BTpq&$O>(`q=ElJXWnEJGj&vSSP$j;hwIi!=vMP0Z@|FY4HeOkjR;(S> zo^9`(u?04-iHH83HLfs(n07G*o^yO0lfhD633BJEhyQ77LnZ44@sU9Tn$;#_;j04) z=LI?&KTunR7;>WYl&BQYdTP#227JwFScEtr8PxJ|a2IF|4eX^mMWO+>YGacfS^aHl z3gKBtJ9BF8L$5$#=(u!bw^ddy(iN}8%i1#dNf6#eEc{qaAAFR4r9m`41u~8F^r6)q z;{+#ipb|b=#Zd(QR_e~)b&~0FcQeaO&f0p{dgfqqgAi((XCG2Ry@Ay=mppreKWxYXW{Dj#>_X)wQiyi`E2iS3aYY5&$a> zm0#XD>~58nrhM*k2jQb0E`|n~+~nLFF0@6$q{!mzSJIUo*NS>6BG~Lo72N)@h42Tb ztG4tnt!*`*{ne@2Z-j74F9MB;OnhrJj{4hi3DqGobRx(ZYb$vsvkqALIOF`Jjz0nJ zYcEOa`2J2vko>iPfJj@H-AJif^`E^BFaN{Y)KYGwS1L7T3%X*(uxgojIx2WoV?)0>n-+C3LID6N#2*Wxv$V{HP7x6}#dDxIic z;hWTIcFG3lzo$VkFLnVN3b@^LL^UFwi zhO@G$zs+#oRLknDkuFFPKaB5SeM{$OUfY_>6O7obU85^U3MqQFM^g^BwPA5v_}lo( z$YD-7*a;4Y{2U>CGzBrKQr2yl|`kEx#Dd^gQb|5=wE&E6NvT|(Kqt)YUc0SwH z|Jyh|^Q3MjHa`e!etG{E9C!kJ7xXx$tLFCzB>F1J#Bw|oH#u+J?<)QC?S1?RKVW$X z|LyczQhpf7`H>llPqCXHwrB^Pb1HR!u-T**uT&MHa^0W9O6xX0Wg_UMyO9 z#A8gl4Iy1~7%HXyb1`zah^83u5a=3oH|sENYPf=?E|PS1en!b`8ny3SiD0PG6W=v@ z3e?}YGH~UMqlrO>(PWv!XTP#Fuqt5iP3dgX{ydf76&M{BXxZQ=DbuqkpEX+DusZ5T z0k@nMPK!v(&belCwGZ%2u+^4Ujz@j-lppTm3L|DYYXNPbSjmCiFxUirayR&{j9nm2 zk9wE`O$i3)W#nyeADEZEjh$+(w6~+SvO32_gn04X)J0=l{|L(hW*_e8k@tuu#Y#Ar zRdQA~hZ39H(UjU?F1)Am(P3RrBiqaE(Cw|Rh)v#qxIfKrPhhK$+_{oM@X=bYu)uGQ z*^}XBVqNW1U&Js(7t>e0er z^f0z}4A^?U3=UpEHn8}}$FTfA)Tnee8DaQruHmt9%;%vJd!Om9=HK9~gpcyv_X<7z zI@wB~u0rSqdl8s$5JNc@5a@-+Qc#I+F`$I%IWUl}6@diXG{krU7ffEkgI;`x7{mnc z;$TW&U_h|}poT@s5Qal1f|1xP(#RpnKW4h2Gg4~^yefO3VA*c zhVzDq&GV}%{)i-gqBqR?^IEk=1gXLdJhACCUVM>o_jw~CG`D}>)=flO%<$wX;_{`y zBZ;55h8?)_GLup$>kFy94fh96{E`=9hpW$~GT=q^d&s;1*3m{}>&lK~Y%Y5=_1~^` z(rl#u<3jOb#LVx(!HyO@N*eOXH7{tV>`dGs`7-{IN!WA5jzn?MhO<(Y zxjwU>ZnEI<-(9h{KXW*HHyxs%(fLMxlsuVGDuIC3~6enqx7iCp9ZPyRuG%xG6ALn&H@AqQ>CrFBBSdJG& zNmf)%H%!ZRT+a_+7{y7NpVXQ zqc};kyeO->X}f+Hr+Hbo{W!1tdB0ylR`-~T+UyRe%kA;{`~V2S2#VnZNzn|;@q#GH zimK^`Y1xkJ`9T=PNt)$FS=CM3^}{&L%ew8ydEL+Z{V)KIK%&qXEDlc~lE@S)jm}`Q z*c>j8F93xiu|z79E0ij=Myt~sj3&rzvD)kor^|gZ(P@)AYV=4^jN0bT9F_C$EZWf9M z!I?6*b?)Uq?~~iV-pw<0w;q1X*gTon=@Ok4c^Xn@Ip2IjdPaSrE=4{E?zz2R750!D zrKjGIcnIwCk)G&bu6-{0xJCOZ-OO1&tpjo@l-tI9oP8QJYvO{fpI5~(>!D4Jg*mVk z>B|783fH&mY&X#?VGy4t84N2HVzi0vg3h!7uXw+BM;VcCy;Um>TQyfjv!I3ydDM+f zBy83WVNlNGk6RSEai95Hjz<-aku(*Y@(Vm9DwIeLYZ9;P!x(cks;JTGTZY;02Gcky zlyIo*1-d{RaKg7>-6V%6!hXL-H%o_`I4#$h=taN|>Z9wyITf0cn~NE{s2MAAfA~T< z7i2B#*Ux%n8XR$BRaF+9*+>2^rV-B@d+I9Cz>P9>t zha71wu4!Iem+5>i{5JM=0+)!GvkD3W9VI!K)dse*@ zR%Eo@md|$^l8@xE9_+pFiMit7QEASD-@$<{;@cD@V_<#C1?F+vj4LstcH4%Wa<@SJ zMG9?cl(D-`5KFiiGRnh=vf11B4mEAyZ(%YPtUCNX9YNzA(doiYnbkb1X4LjpFdt^c z9mxt#j1??zH4}x#EE(I!5{0)s5M#{N6C>=kpHy&F*!uv^9&FyQuZE^w4NIOogB*)q zy;CU(%qL$JwaO=|Eq&cSy(l5{qkcfG8BzQlF`*gF1>=K(eOB{fvUrd5LNzFF$O{z$ z$7Ki|+A1%p0I4|M1AWyCD=oU%0IShm6d#crS**>pM;+nUzN0HWJFGn3pdBs(#`A`4 zL*QNJADFxg>`925K)uG}v&)RCR~MSBE9wBeF?@tWr9(GZ@T{ngVB}99SK^lnKWCVQGnM`<{!>K z#TkNT$SdlIV|dW9J57C$nCXLT&|cDVq3<+-jr49Pl6z?Jlv7WiV?A5si>i<{{Zdknyrclo(H2%w=0fa z9J_<~No?(*EBd#?hohgr(Qwk0GEXO`r#YyjK9X8Iy9T^}IZb26rrnEof)&}N6UtDl z^{iKq(uVIA^+7nPm@Ix1K7;Vr!4yOpRl1I9UK@5@GseSi-S+VT_Wyt5ue9o zajOw~N>nt{FsPO*m&Q%Xi+CLeldh%>yo;xFH~NoTp>uKqO&jt`e0S0pCDdc$owK>p zIchGK5G&Z$xgk8Ejy~bi>+*_%d+oE76r(Ekcr!in_{7G^mg^vyQIvkF30VyGt4R>T0ebb!t8CdIDLfSvHE%XUxS`sQKg{(S9j7igJCZ)mrRA>@22ttliyuTRPhxYhe@#0P4l~G z=Q-=uuaflGzZWQwoKhTEkGW8KFKp(>;5Z(XsnttP0?trG3HYupH!prL47C6KWfg9m zefyXS8DqwbitX;tTgvhjPR|&?9ZK=e_f?f=DrFE*NgM687h0i;dg#2*MQ?*%y;rH9 zpg3Vzz$zr{MEa#937c>(*KPCJP%ARB_7nwvh7;>rkkp$#vwU*XNvwUXv>gGB$gZDR zF%Hb}MyKB+{c@18%+?l}lV}wM$NY$+;=}|c4k`F1UO#v-;3b@IiC(|Tzz%lBTxi`q z7TD45M$A`)Cdl}!O<$ndqAn3)Y7{V^W+i^c$D`Ur=ey#eQn5vn8Al9fyKGR{6O4d= zj=+$^TNwpv!PpR%5Q$xEjLJ(nkQ3fgEq!9JDhllm$Y;3XR2{E7^sH%+J?#HQM#9a( z3cV0#EO^}qtm-Y9(I^bc8+dR&J~g`1eL`2n#+PAO;b1>*%dA;`Nxh*Qz&i6hKIBU- z7(4n9RNP#lhOm|eb=1@23Cw%CP-W^iJku6gMn^$gFcaRfw~87QN`}7feG$HzjV}y0 z_8qk=`nc@F>qjmMk6^p$Nwm))jkfpex=a&8?0^x%EwpcS~l4B=6rAJTa(z unzg_Qp$J{1uQh+?R07Apiq*-lDS^thi<9ujp91->(}hpe&Z|^fE&u>g+})@E literal 0 HcmV?d00001 diff --git a/lib/font-awesome/webfonts/fa-regular-400.woff2 b/lib/font-awesome/webfonts/fa-regular-400.woff2 new file mode 100644 index 0000000000000000000000000000000000000000..7e0118e526eb53511cb57e7cfaf515784fee4345 GIT binary patch literal 13584 zcmV+rHSfxIPew8T0RR9105uQ*4FCWD0EH|705rA%O9Bi400000000000000000000 z0000#Mn+Uk92y=5U;u|&5eN!_-9Uk@N&z+kBm;z03xYNP1Rw>9TL+IT8yRaC#xYae z4xrTj=RL^&|34)`jUj`_9q62HRTU@G3g7X=Vg1b6uhqq7u2e{@>ogp;1Gm5-a0ndb zszDw81qS5!`3YCJ0$ph2KdxyZy?beXaxZw$ zb^xFVm7EhZa?(t?&6iXAncv$?dlNWuVZoRbcDX^R?D_! zTY*yQu*)D=;&0Uy{d=*ymjN^Nn0a_g!*-Y6r7M|)RY0_Km&y`gX#iCi081MlgfV7A zd~|GxbgN0*P1&TAEJ-942rVI(1O!V0q4WfjP-6w)?wi)0-QSq|~Q$ zFo9}%MD41`OdYyQ<)wJ@_GChes%)f?06@AX@h z`u|J9UtaPsHAkQ+p|;qThGzbZ|@P2e(-3;+WO&z?)=%K0%lA674|Ti(*zzV&~ey%(cq z|G)kF_Fp6(_z2MuLmWw@kwHG?w6dAcxXKv!dC52Y#^*87RP!vb)@5#RlY27yA+@TJ3puO#= zt5q#7A@9#h{+>PJjHh?Mk(*?u^Z)u&w_eoKx?4B-;_o78Q8mh}tVts>KKzGgbiir` zOrgnP=O#C!-ajMlFTFiZRuZu;r>TwB1@1^Q#eNTE(z++o0dvIpv0>n8&>&rQd=Lf$ zBQbHrgsW?`Qh{M{NlK2!5jTVE@Hj28Vmaa{b|N{vj0?W2IhuIH(T%1e5|YM*Vm|N& zj@Jc&I1SD;00yrxT-q4=d1`RjmK0#`b4a7aTvUA=fPh^}*PTT4r=;+1JJk}+{nbMwdsTJxW$!m1 zR=YPh>(7v~a|f*Sasx#GChdXTQnOi`N*Wl2cd$8NtvfzY$b4bS-e&@p+#szILI$OO z#TQC?AxuF`NBqW2zG}*CutrIiGER_UL;!ru!19%mQ6&@i=B#U?=|WZRa?Yz1vp4iZ zb|L#K)(N$%8Umk!{UGA#d`4MQ=#;EQ4sAkg{8pOa9LBAPxJpAJPn%jRgd}-fd_gb- z9RnvZfc3tQ);8a6Xj4&GFqo@Zoj6(7i#GHUGjKL`!ovD|4C2D9T{OA=ZR3n$nAKL! zGU}Z)vBbGNAs+2B&3-;D!!QkEmS_aBTd1%D4GGpg`oM&Ejd&OFKH^JI!q`b@WXJAH z`xSlT{P=>y`(3`LiQ^0HVM|1_;Bi4FR|>35+hCy6Gzjn$dIm1w5sC*eB4^M{;8h#i zEw|$YDVSK5lqvD2Kz8OSVvM*Oj4b>#1#I3x1ldNr&jyq(R%F8OzRGIw#O1=ujH@@u zV6}{E2Gw6`C)eGC_ZD>ni3LnB3`$-_Fs!pxK4XyQ)<8Zh+i%NqK#LpNwsF)j$k0Zi z6pjqaYHBDQJ`$&1J9;?w84&uhqx%d>^~kZ>A`gfE41=Svq1(A)FwOD>1eoLLv|IH$ zr~s|O^QA5iF%V6}I#aNB-NAYlmLAe4h0r-x4?>f}+*ZPp(!?JKM^7m1GiX9$%=wJb zDEgcOio#6Mz7Y{Ly2Mh4Pwr@m!SuGJ#LaB1Tq$}VMZuBfYlBsBe4OStJbZvHTnmk_ zTGLt{MWj~G07^9|3Z#?}P!mO^M zz=mR~lJW=h*$L_&v~NO4J#qp%wWBCMr&m%&#T)OU&JQ{Ts)t(x@;&qahb%lORry6F z4sjaj+=momXE8V#Sgx@hU<%K9o2$Hnc(9HbuN2zTBB;TtdC^=+v?7XtQ`rVCjHQ^5 zDSm!9fOx%=+m{rE+h1Yj^VvpLX#FK1_D-A{{A2t7xeF49PgUmfvO7T^ycp~ymo8q4t4z5bE@rGL z^h?Pu`7Lc!-#x1YRnojHXa(80Htl5dn)GFZ(in6i{kQQN>Pcf!e*%{DC)4Vfx7;hB z(yAk$lrx3XFVP-c!5KzH_4lU3bPxu*9QOjOXMkeT0HT8&;}D3Iic%?zWI$tT3Wwg2 zo+i&}p(9*oa|-bW#|-fTt6d#cc@eg8&_v(_YMdH!!?Wh~(#|rWsnRC<@qL@lsI01< z$Dt$-n=3p0qEUhwOgEc1+v2{INMp~BX#Huz>X%GRB8~7 zwwOws4q^-BP9wW}jfUDNPW53gfrjm(sfZg>@2bzJ@j--tGcuO1dnACTMe+KrVMH zz>dzEmmjr0VTvkkqz@~&>FV6q??FS{k1DHM8P=Y}!kWR!ZvTkYE8H@ttKh}AH9b_- z-s2P+;evPV$hnc|T_LoM0(pwA>o~Qlff7EVt~$*U8z#b6^)J1$U%IcIhz3r_ZQ1R# zDZJvVg=xyP+q{A0%K-kmb#jq?G*~N7mATEP=xFiUA!F^Dsuz3IZ(&w~2$$fvSf!bfCPVmcrITz|XlObgJ9SYP|g16&Ze< zAY(fl$9957Fz~Vsk4?(fcBUb;0UaqXx z=V(~2m;Krb=@wp5SX1k`1y&a|eNRmKT~nuUB!0~r`b=kXsvk+{deHNrSHI#&s9!T> zRnq|s3Io)&1Gg1}i4QM)_4jg66Z+ePVAtJ;8jAeeMDAnjF|=Mf$11x)A+tFfp}6pPAtIduThgDz=7IXuT{B5-EQ8L`ZNe-Ls9CPFE&{XG?ZmK z``|64qZ6q|Vw|h}L#;LutO?F=Who!-5~4tHo>h~x!Zr{He^_prmcPteB1?pr7C2ng z2#;ZWDjH%6wX{R{4sL-sQok8_IFmY4sEj;BbP?leG}Q;B9!oH;%S+>U;3BE+Ut$b7 zHPjoFEZ545nrNaaRM<$Xg*?&deYPdf{RV&&Zgx=A75O}J}_+Q=`G^gRaH91jNa@&ZOUebH*{<<9Tw-{ zHF}_i+S2}p4}TnRCeyvR0MICU2q5|RDs)i0Q8;PYUHHl&FKLrWS*ee2(P>v+Z?qW&YOzg}(0hrFG27DZ1|H!@WsEdVq7( z9Cub8zVeE7>Mem*E`Vq=Ec0Z?Gtn8q)FP_qU9(w zt|P`)oRFFD;};oHA78f7Ft(7}dsl-4#Co{-ZSxR3+-6zpFKjU%?OU}K-T#*Ios|jm zi;^NsfDdg7&98c+6lJ)%Xb-{;L@=3L4+;+oe4legVRJ~JdDIhlvy9NB=~I)QcMjh9 zPTa!U;M38Qy5wN(iyR*_^tJamMNp0!=#eaq$bDqMY;7I1!2Wdvbo1xPw#{X_8iF3Bhp0< zv_Mz~9d?%VB}!=>#7(ZYXi2!FbrPZy`0`ysPsFG*aSSE_@CwrsOSgtShfv&&M@rq1 zzM~EW2SFzb^O%&Yg91cz^$aD-;fL&J-Y&n0UDTJ-Ok5Y4*KrZiVc;yPNWX{uT@+r z62o;(t68;6`QYOuZrAL1vci7`CQ`kQ>r0E5NHAurCa#|8-cF0LG;-&i9`3B>)oV|( z=AAv}6v!UQ7!37TI+4(zS)#4!Z8cMsELTk~Wml@34tfVrqQ7i|ko5xLMoU_L!^W13 zw$+Lg5dGd#A&6`XD8}g!xwA=-f#_NdVlepF5-3RiRM4Mj z_r0B#fse(v-b!{2*{LfAYwEzEW15ZabYYc7ISxWxIC?U#9>*@%IT+3sP zX^}H>e~PQPT%%6OM!I54lrbfU9QdS`3&b&mP{8zW&F~QBjuzV;#rE_7on?3XbL-=5BJr~CihE1l`*{uXEdkbI)@yz99ZVtDTlScs|( zP2BUZ&HgnRFE2GqXolAq8kjv1HQc=gddU_4lR~5NVpzjIfEV9Q|xj(k(rE*0v zxf(^!E;mY3lkXAf%dMVj+Tc9iP@@wlr^N@|j%d1d)D~ed~26*#_S-WSr*v6bqsK5 z(e+8wgrgsW2L}6b3w_z@P*|IE;!hkgx|FI(EaP@JseigwQP(WhV|2+?(}a7vqu6GW zL4IsI3@VWqH&+&~3j}Umt3S>PW=+*)66vf=`KVNQ`$>mu`?NKohDXxJT#&0 zx`m44*W_Id-9H~TMwB!eZjlQEnsy%-JOhT?T%v6(n@S znj%j~h=R~_qUQNkSfZ*_l4?cCaNy}>zmS8AIg-kms|^&GD;#}rKsb`B#X(8AtrQB% zoCmZYi{Xled456>>ZLz#fT5<6& zVB9tmT6py2c*77G$b~oQ21~(``Z){asTcjSNv@m@5g*U@3#m@fI2|Wp+Zs22E-k5A zU2@uV4IJZ9-FT_ir&NkrLYJN=CDvTAwf!|Wm15*ArFRqke|HRmNHrR^Y|+T4XzYo< zs6}WbXMm`NVjZZY9wTAwfas@x1TiLNlGUD;C2G3#RrSR4HBd}=5VaF+fi4@|vx;+Z z;th54s&or|)5eS2ggzzP;o9tKR6Xl2UDl zFT(*L^_d@OhR&)aTrub+wL#=MhInHG4C3 zAB?Ig^tRTof zNhSP3fWqph}Qr^%a1T8TxKcAEcp znX(s(W~y0_>3?8M_Nhw``=be9GJ{woJnuWXoNz3+E>%7OHI235pLmD{cAH8pT};`3 z@d)NL35H>NrO-35o%9YgYkXX666e;_a)+ynY(RxJ&)5C7NQN|wGwLJ~`q&6qF3_xb zCD~#}xC;{gR_swnYAe4P6zK3HEe@IP$85_6V^!9p4>27>{E{WP0=t1erD(!XBai{1 zx}Gxuc8@oF&4$SFC#fBJ9eDn+BExRO<9^7)Z}`-61VT;?!3HREN&S0vSZ`?)woPPc zb&y^krIRjxayZM7>aD@vJXX5WQ_BIYE@Ziyn+zxeY6C1wlIgG;9 zHrMZl`w( z?>%+a*pXv08K9S{Pfjgxpk316h^viWPx@Wjc^5dQPO7`S3~N>?R%HYFyq*pSvJSG9<3aup{DR9yUx$br^_>NF!we((;}t@$M+VT% z_&Y#_`(eO?Rc)L30T#{iDHr)fOuP$Ekt2a=lrzGa9IbLZ3A%|6)q?7z(~0j*>C|5{(CVv_-Fi?S2fu08_U;Z|;Y|wzLzj zszG%BX*y|TiadJK#c#FmO;e&FR|*=rRv6Ou>WGlWc562Ve$xOFVwf+?UScg+wMFpe zWwaJuTlVvU9Skh4M3Lvk^1AH-CS^XbrC)&{-&sYJ>vBkuznW1SG<7sc_9Hu0U7^~& ztZlJ+gmOkyiv#U}eBB*v>%YCN@sQSYp=!zylEgT(gZZqiI6f|O=C|5du+}VxzM0XQ zMqTgH6AwO_>&OEWdd>*v8LLHYp}HNT^k!ZK4`%EvPKUt}1oP{yPOaWEO)bsBPN=5x{aKr}ymwUPLn z4DIZ^w*8=sQ_G3zvcA*6`01WK7sENO@NPfjl+kzDQdiW{{M`FWv=v>$B(m6cJXT~h zieiIoSkLQ6z1vc!;d-t!A5K4m(buxJs3?JW71IYhmM(Qv1NsSS{za;t0?_gmK~Uhc zQmwLMn*+}d1htnc5F~-lbV;KWmADS!@E`9b%?2RX7}eCL1h1G=9-If~iR#rB%lH`uZAF`?(rcFvS6 zftT1P&Ez9;pFl>}akg#~7Q~aZPIz}~5(LVUGyJ7q%4m45I!7Rvf*+M}1@&9$=gJqn9a6f#ZJ-?@GCoNc{7UGs?a^=UivCmh_bFhhjhNYpE zwBia8R_r}$z>#W!_GnFc7F}F0*AqbCUrlyuf!AAk9=py{ky2}Zg zqa2q!m@ErA`r|@F$mube?>N2E#QiEyKu~EaX7GL`kyv?u5Yy`u)c>1jI4CxZvF^dn zdbG9U!CJ8z z?D;uVmz1Q_CMSl4{_F|n!l5Sl+{R^lt)KdUH@Xc%*F2Plrg8 zv=xIIuaY{7a<>X9KdF84&K!Q_^)K^=T3^&*=ioU%j<_Dj4GnIa8>l5yim8qc$3Afw zZkO0~B>QMXa8k&e@JYje3D^kn-f~G)2l!!DNParinXTYQ-7VKP{krq()>WP8$gXXc zK{zOhm90e(JgFw!TY}3ZWDR^0$eVXklNZ&Iw7Vn9u9=-j>(e65;W28Sf&(}bT8tjU(_96H5=97DTDO)aY<>_Q0g7G2Qf&aF{{j4OS4M-b> z{#6`1B>7&mYbb@re|I^fJ-5X^WPTTk_7>hxoNn$KBt;o~{TBIp4^t3Uoogn4nv0pZ zbQYC;g$2HUE(j8wrp9Tfr;Tycrn5oEnpT)`N=J=7?a5Cm+aC=+inTX+R;FlEG|b#w zre;>9T=WJ~vxb@H_744)dE>Z;xkL%NarFzgZM&`eun;XZX0!!7PnGNKrbh%cajsJx zIg#1ec*wp5nW0w2c?*5bBHIUQOScrzTk86_ABoAzXk))DV_We*v?)8r|#SxG%TP@LYKBEWio!~RL{-pTZS_QLFE^b87P@BKY*B15T`ruO{* zBfrzRn6hUpwx@oMrQv*irL)?;>=F*1bJ3z>4R>S=#04KsFz^;Gej8ehl8?XJE;=A4 zDwy#o$(xJ@Ub+|J)j(j9y;(>M-QD@ar_%C%!LI6x@1dw*2))e*Q-7wid1)m6f? zU|-g!!!%!ecBL*P^?o1SD*ttwzT&AYzH}dg#0urJDC+zJ>t1#JK%`?;bH&e96j7C+ zlyTk|1;x-Psxr)&!O7op{=H)4*b16OVoLgE3C|c-L++ci&?Mav>PkWxVusn%b&;8= zy3xh(eiP5KhcC}12d@*TDDcKNth*awivHw%U&)a=EW9gegvI!tI~(sL2sN>)(=F*Y0W z&s5P$c{~Gu)~`~8ji9T~3QwFCJv-B(Edw0kqBBmR@w@8pVmO>tRuLMbojYDV9upNs zd+b^BnC5f7%x)G2@DJrROEL(x}DF>YkHWY$~`g(hZY`5eeNpt1kQ*~Yd+$fQngmi&%FckAb+ndkcJ ze^JcI&=7De;lh0g^dNH!k7k@Um=(k9dNrsIEADGi!*tXs;W>MZwu zk@{ZxCPsIYFOwaF?~9$KGrlcx_n}n>e%2!ZnUR9GEX42=7XpRJ`lZ-n@y!9yBL{ME z@kR$z%auB<6?`*J@v1wki;h&!PC<|q6lk6z9@I(C_E~7Q+9%)or~P8`qU_Y^MHl{F zFci&!NK*3_s4HCWk(`aZHX99M*T+@CXlbu+oP9~nc@5KVqTB7y)Fv=7IOaQggeMXRI2;B8K(V!08zF1*M-%)tnTE_2eevTpjJ`fP?1tf&YCf}N zhCtm$!T4WF0MuF=02#}T#DMTGV1!Sp-5Sa#KZZF=0!x!YZQ6zVK5KcgvsMREktfW) z`hCCF7hRyNl=%P7Hes}HXG{2#YZ31NY6=Qq1nfWpy^&tzMWOBR8HDeRd;xEVQsR4{ z&+#~tS+PfVd`ktQs~Nb)uMY(?PFmOhCsvj%IxzQusDPNpy5`DqUF!_rO(_mz`H%4~ zl-2W#iLt6UEaQHXUeL!VYm39)_U6XIIPo5R#g^ScVrFmT777Bj!-I_b_Zfq5ds<;a zq-!%G%u-ooa~BBi3zjc`mQd=e#;QQINr$XU+amFc?Xh$%A zuJY9ULCgHW-65yui7NCLjA^pQE;(V%2l|7mDYilK)jUC`Tx5GAohGua8UN z9hc5o)Kd==4+tIM4q+1BnRpGVUtlcGc`J}bI!;&e>oHg>RrW*h;iZIaypVtyUX$RL zhT~<3w>Kig$Cu)F{1G_*5p*-A+u;U?Q4O#i=cS3pU0lh~?Ir}b0=o&CCCSgAK{STh z3oMwKwqi9mj-+Xg`Afozk0yn89&!zb#k?zFr4b8iH6Z%~v1(f!DJ(U3p)LxU#68H^ zw5ir;Oc$X6vmeEOBusj171pBIT#MZv|!TRIt z34ajk@g|2&Q^(pamXK3;41ocddQk*PNf9vf^XOFV-I$e|Tu&CDIXX@E;uH%{0BtUt z&D91P&Z!Nq(P{n{M?WoBV|=b`+SJyNlr)Rn{>+DqB~^cUf8}za8~gvSfc$;9 zD*Hf*HJL=(bf07Z^)vpLJo;fet)1$oxvAT*an{PrR>rw;w~lWuiNHuOQ-Li)d)5%H z?;8p=zC53<+Lbw`Fqvsi^C3{3N>Jg8rOLSe2? zbzzd!XDAi6acsQ#r+uWQY53^>uuOB*Y|N(*DjAjc_hMezVi~yq2JR5i*~X6-{T)^! zBdcu1wqpKX^l{_!hSth|^l1>G3xnV6y)OBuvb6zxF5(zCw6$5>@FRKv35CU)R33w8 zXhLf{-s7G6oQEKkq-GyBky806`HZ$YZfVO8lJ zO<*GHNuQjYH7d^v`>GaK{h4xWbKT7cKC{05vQ;iK8Z7i2%VIFDG2jyAi8#Y=YRR7Q zPOxagBlZ*(tvM3AIl5j)@^&@iXJQGC73%!6aI;N?k^e19DbDYa`}Mbf_+waNwYZrs=iD2XM?@pQgVo&h=XD$T$}*FPgrPJDrUopZIlGNwpbx3P(D5t8j$l(j6&MdEpeG_ zs|=B4b!OaO9yy$%iqI6r5Q|WCPSw}uAG#t-J6!$7evSPnuV(a*2rb~L%;8b_f^wO* zat4ve$R-kLu;uFyo;BbVGfgKW+=G_KK>U4X`$=z{tcvat#0cVwhV5BBUD1`_-MHay zvo*NYvp3k<+^#0D`u(N8;fZq}^e#7p3}*6sWJ zPP?sdVS%g%!>rBxh3KmJ_P+CdU4GS#ewxJi)2ll(NSjT()tv!`WBQEF)u$4O-k)VG z0|yOv7)-p)C~xh-g(D0blWAj|hr1>DLG7wFZBs&cW$4^eUY=v_jKw@Lkh8eZW70Oe zPOVzmCoN`TSmo}H$`f0yuUD!0bI&Fbudk+QX{)ajlYYq%e`>1Ymr@~K5vm}yEm2$bJ5Kz+J8zWEbL&yMAtFs42JIhMJE z*IzI7q@P;6zEE(B&66j4emwGPR-EdXU_?3Ygkk5CT9Yb+mXYEst6^=X6ZWSz4r=uV z2cD8Hol!AWGtzZ?!hDQ^?(+Bq=iH>St@~~$*CELlaN-ept zRoR0Yj?3tlDGc&y4w*A8Hz;J?8TeZD`}gWvT&l~1m7B$CGCs_5pmvnFniQ5b5hWe( zFr-r|O-Fnrm60p;!4jV%ErpRa5&4Ha&VrcoMGrLS6;YUdlpJ4Gv=wzPe$NDAkDRO~ zZRiUU+8c`Yu)_ZfMt3LigSa%VATVh+8pMy|vnG*2Vq{^&bge4lEI^z`2`&v!CM>)( zm~!T)=HxgoSc@K~>dkyYe{n582wTqLT5iGM-{?2?$OJ9;ZMAQ^?Hw$M-=*pDmiEps zh;aiAMbT%}ytAd&@s=pbk>xj6kA)kCvsWh=@(z0N-wyUxb3;P7jP;Wk$cvBxC3q)5 zYKj_4;K#%15ZC;}KEdIgas)DYm!g2Z{&e)gpZj*6tlb*>@7uW~aE#N)2J5x+G;D&at-k z$Xj;HH;@505b|T@C~3+(kNJcKIHI#dmA|cz%blC0rl5epCNnzEQxIhz$8}s+Q~SjE zQ}No)Y;?AyV`!|-eY zhxwoPImN~^9lK$TZkZy?Zu_Cp+;miPV~wA``-vT1de}3sX6yxhg<0 z0^|2_j#e_ys3d3!Mq=a^osiBgaI)*_B$>bvt9J-mk-IX8%+$G{8{95EEj#*=qRz^%SWFb@7e}!`aq%gi;PK3h}Ojs zdrOoxO7+r2LL5LlKWSF7*t+h=!9F61Dd5m+%g{Lr-HL&_4IUV}W9>Upx1E*Rq+5=> zF?909G8KlEG{J!1Cc4c^{9{X2#DN7Yl{Nb1$HDqdD(lp}NcGr(o3TR=0VH9;Q;ywt zD-C?tZ4IvamJ|15GO3sXVnwh%n%YeSnr(U04)T+$3dj7;$~6v#|4G~BM>-*ofh-=W zg8%*cmSfQmupR&N?^Xv5Uw-d_Bbz{>LN`O!e;r(-XaP6}V+`m9G7~o2u|KeY8{h#j zLhd;r5Z2T{Qu@{R+h*w-E5Z0Sne!JX_!)2|M{;#WYGu5|jDRU(h%>RXn1dWviJlZ& zT9A&0Du8ZCQv~ z1Y@^S6nNMlDF$HU)D#oI!A7xw0JjZ$`EU5Z$}ROT2w)Nt06dNyjHCdL8>qVPi(r=UVM?D{nQ%~VVwQ@wr(QQVumMA5tlCo9!W${VTVrj$n~YX zklNcI;|T;`R_yTfM0ipC9x^Y&*Sv$=H~g>s0?4u6ELFq|Oo69EnMu202!E zf+;8;q^UvcOU`NPvi~X12+c-neMFkvDL>>x%hMS}NSdi8nq!y!LtU{7ztc8esa6eH z)~}LFb_$I0%s?zpaVY%IDw6d>K)!`ClM+NMD}(-Hs*xnD5x$fs0V9V9%Qms;qAhqr zponZL5t)a+jZ&~Hj_rLCi@r*B|rWNcz;W^Q3=wcc!Z`@`{czFcqj$Mg06e1CsSJ@?NLw02fO zyJo1pXYu>MWYq!JZe`epMPyH)drt{A>@ZNBDB2Bl_LZ`|ZD>Rg(PXd&V1% z#$?VIEY2Pna{$G>+M}10FxSH>98jn88l4o(qmQ7~EucdHr;Qn$#6{q>XH1eOuS87G zdC>42LwJvngDXP0x-LKY?dGD4Ykg4$wc_)DgIsdwrQAA$Gal@}f#QJqj4TC5#&nmL z#!I&#^AN=fRp6;`GZ#=eE%lw-3c$T}p8 zCJSI0X$qKqcpz2}L~iQa=kl$5YN!MPx|JA*6Rd3QSYu0z1rw(WL~%==t>rF>WbClI zW}kvoLQgp?jW|%=FnLQ@N85i94JS1OuO9i$$14}ql4U*-*N_>6g8E2jd~JsoNPgPS z6j0Drdx2+UTFHM9d7BCpR96@>9@bVD;W6XIy!canFnm?+!?PF$=b==R9BU1;EPv>$ z+f-V4b+}sI6`rh;-)}BU#a=3%ag5S*V=x=}!mr}@(*>o%u+B%DCo@r-+)Q^UI0MBc zY|_k8GHS#w1dlH~4;jYs>O$1l?NNi$0rb)FoS;ks0O=Pt7ifkx0tz5Yxgvt+vj{uB zuq|pvJ!c3((abern*)kW=hO_fZlfu<1;U0$8$MX)+l6wFi%W2&M0a#=PI<%EJc?FD zakPwLE5nXsRj`;%a;Wnl8$DAI0rwB61_$13iir2(Kp@+t{{@Jd(|_$(-OBTZ=`~5l zn_Gyvf>^>3KiKE8^9QMRYM$8nRkZhzH@)?N?)Teqbwdmufl)z(WIrDHr z%?H_C3qpERgPmOv>f}b9sbJTWxBo`DeuTP;pmw@*R2@N>@K;?WZw)`MY_yd3TbOl* z2zUwTb}}1JOs0?oJsCoRdxo~flZhS*^ACfQg&}NG!iUui;NLG(Q(>mOIlQ4A4TAG} W8!VVrwhAGt$p&j$lnnbyiU9!e_&Ztv literal 0 HcmV?d00001 diff --git a/lib/font-awesome/webfonts/fa-solid-900.woff2 b/lib/font-awesome/webfonts/fa-solid-900.woff2 new file mode 100644 index 0000000000000000000000000000000000000000..978a681a10ff0478581436eaca5c5695c97445d4 GIT binary patch literal 79444 zcmV(^K-Ir@Pew8T0RR910X9?s4FCWD0~d4v0X6smO9Bi400000000000000000000 z0000#Mn+Uk92y=5U;vp`5eN#3=2VEnD*-kFBm9D+l3hTgh~iLA3L) zRED{20FY%BfBdgI*|-DH9dNI3D>Pg&wq*@_91!3a&i?=Z|9@Mu2;1D9TxN57ZxBUL zB99SEwYF6hGaE87deYA9QleN?)uhCTY?_*M<+EyvGH)K#)^dAnTYE}s2cGwG04C9Z zDFVt<-M7}ti@eHSwFZx4TIPA45A*OhmBig2Q~^m5P!UiOP)@m)l53FDIk~%vfi8-G z>Z;rWysLBX?iaO!tO%$Gs0b)bYtDK10gEDhWzRWpXG!B*;C1|)fGb0<`ldfkQS znNVo>Ip}dg+#%8o(hQiGAtQ=rfD^R|#qQH@x$B&eNsQo2ICwjF!QThD%7?07o6`vt z{EZ2Fz(0CYlRS!FF4+SnOq>TK*=zgh-CL7%3&(Ij;-J0P8f)E1!5MM{cR7N#_TKxg z*8v%GMCZS7NAyqr4_$&ifbBYfKq6Y$gD&S2{^R#+=g#}KKBy#;X!9oCAd)$FjYwlyc{69vroJ2|n8V6vHCTC8pu>cI<_B|BsAp@l z)UxY~VGF8=@I2rD0RjL=)?ziVek%TA&={U~TMyYo1QkS4+$iWyK2tZKf^Kkun&nKi z1x>MKWkXw9TkTF;R$6IeU3PViZR5JGTUpn*y6f)z)25%)N^OAcpx1W721*=s@__{( z$nnj%x=GRQw$l9)5%i=(C=Z@Tr~Oxqj1NMHix9Gm0Vl?JLVADRPbsxq^s(u-Ohr*` z0GPC9do@xYU@}z=14?K{3}A4hg`=8U)tWu38aW?k2Nw{W zG}zI{@+7C%VIH6EZ`VDirv(9$OaU@407&u?jjY8sS|kNBCDSbWsn|PaL;HL4YF=-P zML6)m`bZ-T*qB&_e+k-}O4`8XL#Gx4@jx(aXu|;^RyRNUZ@yIRtLpWwY9?1;u&v)c zq?lx@gPl8s01(#aMKZcSXEy2J!v(kkyQf8=7!(Jn#Cg_;z|2`UmAEM)cr%pG{NdV6WU~c zAOFi{sdvi$@75Xs;tP!-@yzt@j%l__JJFTy&OxG={+AkvB9~(h5`hsyJgTYv&m^~= zN5h6X3P7_v{F{ortsuGY1xOQSI?Xrz&3Sg<*lss1;HFOhtLIpl8f2h;|Xm4~QAOsMcLrC|Ykbbr_%uiA_nUo!X5NC@!tVL3+ zlA5OUJiA`|W&c(EVAzvh{yxV!>v|BjVmuhxBZ;JW{>QQ6zu}#4 zE5I^AS9B>FBXyf1NsjDn=Q!F<;6UK)7#6R5b8ks6>6C`DtpBq-yIP%6dgYR?$xg5E z0tn3fr>b?8tjLiwQOfk{WJ|wiJiXuc&1v5pPQV915`aJnfIcLMhAfJvB1)D`kb+E# zX3QcvG%fo>#j%Y<$6?;mlPNsQE^JIEIZKjN9{^zLm9ozgl`d%bRX+^e^^o=#ykJ^1_qje|WB%E-A zc%u!%4RW8P3#As*lN0~(#yLTY5bZJM;y~+1LqbDeQIRlQ^Yy2>{r6w32YT5r(%7>c zE+Qhq2_b|c1aX%2YyRG0vvpbR*czpog`Opy3`{_MeD8=k>lPNUB#~s6+7zrr5v6o% z%GXOaYv`%9hZHJD5v?JK?Tt1;xbXxdSX3}^!yQi&P{HHi)AQg10uBw2jD~|x!@z$` zG74&Du3#K|+S1)q5E2#2G&YppQ}`jWu4GauG2J8F@t&O+5o+b312OFW>$|C1sRUb*vpdy!u(BrfqC) zz39!j9i;uVUA{@p@}nd#n||JY<80L=dT#OZN?cU&$wMQe6Oz(0^NY)C>Kax%q`<)pZT6oo(5<-A~S~*Y#~3(Sgx%gy0n2394p!ej$vK z6w3>evRRJH9blmuR@5!~`E)qd$GOGL56@jj8*A(ZEOZG=89Q;xl6AZG+;8akse4RW zxc{%_D_{G~-~MA7WEevhDbv-`3pperH)LdkB%ZhM2xbcP$TA_q`8A% zG!;f&E`3A0k|dZIaw+1Lj+Tc{ze1qpRJ5BH@1(xhGqdt*T(9%FFM7or-|~)kdq1A~ z4CI~p{!oZ-94X|9(<<)So%;m-Y=(h>OB>IQDwCx`SSl}XK!sUvNyA5CrAGO{F)#6 z`2PAbY@~ZxWN93Qf$vzRE(<(IQv{AeAn@V!eC%Zsc&?)e97FxtZ8zuZ)p9ZKyEcZ} zS7ni#TwfOIALHhm+9J1AK@j+OIUU>7cTHVoX%dHlCy6Y9Bk%9mxwlPSl|>o_zGKOf zfT6&Pj}OdxK7+Qp$Nu2AxWQ z0K8EtWfCd|2#_mBsw7xPiEwwlY;5>N@c9L1RyL|p ze8L;9wv;LoZ-B~0&s4uf6!KCUC&G!Z7 z8^YSU5MaU!$AbW7Ju0%hbq@&>r=Gb{`8ymEjY+dXrOe>+tcT&U?JWyb8~0G6g)EXd z$!!-F%|xhJ1X*3vVptAg0OEcJNzyx&^K$ZoJOg$-US@i?a@x7o#9)uwpfP)I+PW+j zb-h|$IVsy*tmGxgn&)}dJdcm|;ub;!gO4)b<~y{;2Pxo!=Xb(Z@M`I)dqR?=X<}PP z2n^}Oy;sjefw!AY*XkWOm)jvTh+yNxDRnnk%ubJu zY(y>%7wuA83B36lT4|kZ9JVzEW7-`pBb$wvZY9$Dra}nAd zVEuht5ChXY}_QW>-5H&Z? z858|ox`{x6uZK>#Ang{VAbaj(bDp?0>nr-A}C76EQO~cLvznqN(V@vQF)e2r( zkE>oL$%p{=vj*V#xF8Oq@Ntyw4jBZw9grA@@tEg>)tM%X#;L3(LmO?M1KG4z_xe=V z9K2C&miFj?1a@CP7yx;p7CFO$`Z9Ixb*fdX8-ug8#?X2y9cWEGFgFIRS0cI{THb|` ziJ~TEF*k+Q`=GoMuM8hqeM1T2-;G&T15&#kFRk=~O|^ORU^eUZX7&WK_tMuX(&d&M z$p!^gj$=}BfP^{osOLfpctJeR!;zW{k1r4S2oe-iQ;mpGJ7a6Y^DtT4V>}CIr^=m) zFeq2Ls&@TZbk=*Ompawh6s$ZTrxIC-W{;5wX5)$JHv%AMI=-K|R-y#C)!Ol3(3d411LHCV{q5hS~LA_nolR0=#?x-qcI~fjN-V{ zmBz_jR!7<|nsq6-k zN3l+|xIZQ1m30Pt1>CAn`GiLWXUH@TW-@?8Ra_eQYxT?33vu{5A%Xc5v0Eo@sCn^E zt#2)N?w=F>`b%3_`xkIQBsx`SI3t_Vi@G@G#kcK$fU#2OY$iyLmZi`$Qk#!7D%A2(bsn^r-8Xy!S2bZ@1w&He0{ooa5Upc7Z18vt`4%k_zR zoX02gzSr`alPs?YBF@dWFmiD^Q52CQWcahXin&JeOmo$Ek1-gq)MfS zL{({1$o=DZywLhtrJUk>=>^lL8GYpY!!@(#G; ztHxtR3#Am@(aqU<(;Vv6a>a}03*fAbY|oF3HycA}G^|ujmZH2kOWuqSv>)}lV0yW-C>64VKy5Q)R~~x&2SMu@R8!hx8P}jV$3H81#U%K z3a_AK0^P?a?EOrcn~&j|mPI@_+j=wc#e~_%Yyfky&(dUTZF)V~@*|wTkbX?RY4I#} zO&TO0K;|I>>_Qi-IP-EfVhI7t@~-+2e59hLgs_;SuF0p-AR(}r)DT~j7zIuP@}?s7 zp>}~)#;?8;6}a-a?8U|XXfzz%kE=~PDTF&uxUju($xFSol6oIuSH6&c|fCQp$~g>o-Wpg~@OrK$M?3q+47@sY=((eUK7VUaCN0rrIs`*8*t zU~2A>0_EsE54!RcXfIN;Ms%r)NT|reV#ZJ7)TXUiBj7E~f%;-m#h9a*ZtynhDt5FY zjzuKVS{E;|ShuF&{AwWYy$bA_ci6DxvyuxjTf9=!+k08_SkX*h)BM9Xh+eH9GadhO zMu5x|Vm0dPmyleU-`CI&&4JZ7szVsk0M>OcF;1NtvH%92M0T^b2>0tF8nQ&0mr0ht zY4DA*K{-OQrA-8+3W57R9HcsN6Qs`DBAonAi?C6QKNM^}X0Zr^9hPGVJN{J92ADu=jrc{vncH-N zTX*Aov92k=gSed<5ji(e#c_ZZDggppTWb>cq>3Oab4SwSdw=J~_+?c%vyyOo^y=|^ z+{vp3q+vRaevyR@!slCaHyM@3d0I_ffHV7`EWO{=DZ{q|GyIIzfTk}XCiVOw6L$pI zg+@07p?D9F8f%eC5mRp@@!=E}WWLsdW%W34CME@XYAQSQz;8w5I)N0iIvfXCwmSwV0kCCx!UD(rWdUEtmqph zadL(97ox`sCse582gh|f_Gy-;i(&xjS`H9ZRV18)>vE+?3kLaR;7ILF#m94Z&mu@! z(M4tpvQqiw1Sy$j#1svAOFD`a-S30P&dY#1%c!1zGWT!)u zK)w0C6~1qN*-6yg-UF^|J_(iO&pu97E8lt8V%5%_d6XpQ_YBYKnE?m({=?d(-+lT! zz=}V8R1vDdc^#=~=zmOwdWF9R5qYG^=TckNMME2GLcRY;NArBAW!}>Ft*5XPh_uyF z7kH+W*sZ$f9X>X%&iVVX#dd6_@X@*1YnmR#Y;4bv)E#*8{e#*1b9^U}Y^>$8J;gfU zFu5`IOw}8mLD`Mm!~!%H>)|jQnc2}9Fh2;Mo@j2klxdOPt4d$DEXCi z+Od;d61s-xiAlaE*;8LJK2Tz3dc(qKOc+UqwLx^`b~(#s;3|_zb%U+fs5 zY-lz`9t@Iwq`#gU6|0T3z&P1VW;^#o-NWs14%Vy{i{PdoMxP7lW=pVgwffPRUWGO5 zvmbLP9#GJI;zJ}2(rg{2b$`iq(&7UB6)}}#;p);0AIYYtDg-k&g-ek>R0Dl03aBu? zi?dtK_?Aa_Ih}=m$Scg#D@in~JwSi7Y?IQL*FRapo8M%S37Rn{e5OQ_B9~Y($9(t<>>A0WsaeHD{1Yf_@3#icrC9OrsnO` zh5x5-zkB1jLE65c1U;qO#V!hWkoscmhs$bjaLCbicHsa6s|!56uaetcIwI^rcD9aY zLcl44i!I?N4V`EA_Y;_#KX)!P>$7J!t`XQ6EO1&}IDbB_>@QzlzsX67(sjvaCY%-Z z{Yvv#d@-)Jfj^T-$zo0s9r@OcjOw&76st|(5nr0NGEm+G~q`D+t9R4 z3`}t>0xK#6H3$nTalQ%z*i6Lpbj%#uYV-WY3NBoA4f&XjFl<4%n|M3*1l&mpEfuMe z+wlK=&#*Phc;O+j95{$e%=M`cAUX2`<@lGGvt|gZ4kD9>LNZ}BOJI%M9zc&hJPR$B zj6;W=zYje=bXPZcDyf_N-k=WXgA3H9i4?Wzxku!a{ySJFqVE1-E@RITyS9eHLNV&m zP5-URRpMp`YdHbLVt*P`<=1)V3Kgulij}mZZuiY4rLchCs==6ENS7Bl^F7F5Mp;#n zDvvDu)#7@xvQ9Vm00Rgc5ai#=RB_Z(hTP2}HC-^36-zEiQx!`ycWTAh7FB5Oh_yf7=U$3*>*Fe)QH zt_C^6#6nRlD=t`KF5jfN#2;B!tcX=?S5caHP!lv;4I{xa>`&f+`O6I5j>~mm+2@>j zfM@r!v*ws}S|!Hz1H`XMd?TF4y@{K)Iv;vonBtx_FiUD7j@RQP3c`h`l=$;m7rS^G zawYcuGQ(qJtNZr2p*}4O79*H}I2mC9(J-puhGRj|DJpC1gTdtyCT~U{WH5*y zfZ;=qIwgc=Adu?u2+i*qzq(R^nj+pS+gw>0uj4S#)MV*$3yZ=qtE;|ayvkqs6~!U5 zlbuB3^Uf8jb_olsYNp{cLVYm?zcAn)shH9{SU+E^S$Xam1*8aWRDx~y%s15jYw`((0Sgmh zRksUB{vEbcKe^^#UJ>NyjU~H>dn-Keo^4bBK@iiceOyJgZ@<>=ncmGBu2ku3sFMH%eU-BK+bF7s-5>?;z!m!9)(q8G_9qLp8 z*m#X7Vi9@C!Y0>*BoMY2g(Zc}qehppDA2>v<59e9)I82^b|oYV0wrDOV72Q&&Cww7 zz6ED}D#hD({z#ppMn3}N3eYj4%>?}WK*t2 z`Ajva9&}7JLD63{uiSs;fy>bx5u}MBw)Wbc4BJPslhVy@dstp0VQ(IHr|m-P$^2%_|5p>3 z8;LF;7dCAUP4{op1UqeEIF|Ma zcPOWg%ig%TAA93wpst;f+{`f$(!{p zUOBg?9zeHR0C#7(E+nTe1~De6gy(qAnR|dlr)yF3k4Y=S1&#J0=@E!*$EOA=U$4&z z?{K1!ff-2QuqUblvvY1KKym(~@*h;-0HLmKAUG|~3l#)9Trw15mP<+?2Gu~%zKD3N zQi*^u7@fXs)wy{w*-R7oG?@;T547m{OuoS+v4KpGK#8lxdJ->)7X`i-)?h^l<4TI| zuZkZVmNJ|TBTJL2k}|v+L-`ZS>R5+V`D(&f>ZH|9b2Mjt=OX>Ro*h%84rjPP;h@Yx zg>0C7cEblz*P@}3A-3;PgOOW7}3Y&UwdZ=mlCM{bjy?Z5` zr%c)=%;MS*HsxD-LwTO0rT*yl(M$&;9(!0HI|n>{WZ;_Urvvz&b1r@HeJ8Shom2*6 z1?747LGX{mwGKOXV2H~OYzy@l60Uv}oK)ADVi zWxXzb#T(V`-?tSxiY*>lHwRX}V=A}UZ%$vy znv`Qr=I z`Fjhar5{x!^0vH|YL!%l0*V)sSY>qSfb4}}ggq(lseQb*N(Ck+8Pn_njWY|o%-W9T#$V!ioWWAK1G7qNnqyd#{K2J>)LdHqf`PS>H5}2Xji7 z>W-6GfU;W~YvJBr$Yw*R`&O}WL^C&B*mW$B5cIg9Y%LY`qT!c8ED1Xaw?DxPI>px; z%iCGxPfElvWv20rj;qE5Gx|*KO8Gz=00YYf4T2vFA^A34i=|%zkqy||)V2l}0#BeuZNI3&n*yEZb~o9@{*j}~VyHARG1i(#puDh_ z$Lh<0Coh`CQ7mi2w)fd<3uXm*u`5UOv;$3kqi3SzEXR1~>ZiO_a{UoW-#Gh^5-*SAoBe#i zsMZFx6(7N4+A+_fU^##NmQ6WYJ9cTvK{7{ia7P%QX#onLQ_8<2md~<#>*piT-Hp)U zG#D&q!v_d<4?`RzSZR=GlZ$&ae19D)M>-ogDE5M`?&c;Le6|~y{a1H*j>Q;el{~-9L@jf7 z6TQmyCpEWY$jR2@a^q^-JF(J}2kf=TF~PhiSFTSWbjZP1FVSyKbN&y}ydU-2;uxQ1=F! zBHyP?jnfR1RGxsQbx>=I+rknh>?yMVTNy(z9@h=r+FSvVwP_nyteT(il&nGTYjBSf z&Q?+;dM)48`f`>F>>s&(7w&1RIrjRx7y21+*$K^wZ(I#I0SqyKa#UTDKeoG)a%Ca9 zH4IV<(~|Il_Nm$n#Ns_5&l+m}WA(V6#@@whx96YxHvsvW9Jrfn zedk|TV0vXG^<2=gCJlPwdYjq_oKM(!9cvQkBMTRSxmJr&s3}uu5E)3x^qhrL77Ka+ z1;+ZFgVDsp5Hw?i-K&0Nc7i)ch}IF z98c83^jR`!?YkS;#0VOm#YVSus9pTYVX%58A1i8!1Q%R9;o=G%v5v*ryF%gy$^Tsg z8X_kG{)DiDrtl%dt?t?SI^-InLuX?128WnIkahQx*_uaAT?~62wuLstNR;nhQt;+r z#!Kq^OBeoI!G03z`|)3cixAJ{AFKF1k52%kBi6Gx#W9aA@nXR^{uN0%vF?QT?)^Z3 z;8!3>9b5egw~8kR@vZy!qa+LTI4iukKzw|ebwkT-A#6#jvdx>$b(-aXEX+(O`R6gq zB|EoA%&-DymT)BJk0mx<<>@2FT??GVg@Evr5J1`Fso9B_jLT920WyUO2F_5Ybg69S zrYl*Y1XO=gaIu}^=~8A+2!7C)%q(n z|NNwW5%RL}Y2QZ4=eC(p1F3udFkzV(YXD?RFP|Nf*ny1g;VJyv_EN>w&FI>L(bWju zhw$L@YJ2(I*2YTO3A#$>`}pDYi~GRyT9thza16#X{D6w`%W$r^p^Q0fOqscRar8?T zW9QUQhNz&c_X!F5uw#0-4UFbu(!EGwi(TWeJWns~@An>xYg6n>B&d=GPqOzInN|B> zx+=5@hp_k14IUG=v9yIus_b?Tt+HOtV?;9|71Y&AKu=cc|Ai)mE0K)-J0P(imO;?P z&i5L7ruvWHf85)8oNP9_; z*DdR!A99lrT4R!M_uw#wh%j3U)~>9y1#aO2GPYvW0QkfQYMGt7_JnYPy^;W}w1r@n2Dw30G9*!ncMy>dF{_pj60ko_Rq|~aa0D&Ya z{#jp4HGWi(`1!Md6S|75A2raMB#95<;^RWT10sSozM)m4XlTFLs*&H3-Z$ItX+gJk zZ{8upH0k*cg)V%arR*n2mQ1aT$ebhkuVnUD?!xIgyU*^-m9qeyBqGpoBnCSXU&X^1 zgmcOtlFC$ZxLRoFufZ4f$rls(k$Q<&3uCpSsFy~)d?weW^anlI0;f4)#>xeD=Ci&Z zL8l8!9XLjxj0-Y0LR6wlCa!f9HiMs|LFzIQQ(q1N6-lV2ktWqz+W6*WJ{;;Kb413Z zn*@tL3wd3_TRFb#LF)Lz05NyeS#Z7m@T3lQlRxp$0v%IZeqz%#4-U@FNe@9NiUpPiE z;04)Y+6CAFV1BQ2=T}!3^~S~JsP%0#3SuG7@P1*PTq`K^b%E`xmpmR$)4*ZX#yo6! z>MTQ4(gVoH-1=*cfnbn=V15~-O9~XCO#(lv!csc?j^(^}asbr#$iJb#|0TbumCI-t z#nXs7`7X1bLrEzD+DYp)Kjr?aPiC=h0~~muXow|}msHmAx}FD~kv?E+rmZhu93QTi zMW;M%rB}NIc-fNlnKi^JpyYjt$nDoV9JEq3pt^ICcK&yVB;1b63S07VKdcHO2zc}m-rmNK z{|$XB6uvFdhMvVni;(iVV-O5?pTsM=-?6s#Ni+g281SJ3GbgVtDR|f3{_i9n2OkcT zUN+RS;o)BzMT$x)ZBuF#R2&l~9*W9LUXVtzNJymm z0omyRScxjzN8K>xc)jmDf`3zG`he9`$XlemMH|99Xoelw-Y?c4;L4&P$MQCP;4jXFCnv(y;=ARHRltBf=?Pr=#r{dO2*JDJ{(bJdl($D*EF3J}r}G!r z4nT*%<=Dj77r>bpsSM(?Be!V0Uc}HY6&73XX}xG$m=;${!)oP zg~CSh-^l92E8fUAa&+Rly6PSM+HJqC0mm7L<; zcVf=MYHt_nyWL&EAeBqS22wpafkcQ~m80t|vQY}4*#E%}^g8UUW&7ZQ|MBH}2m`z8 z6u`Ra~XWr=m{T0h43Vhs!#Ju(hF=fkO_q0ahfgqc&d^qXY$>o>cuLEUiRc&`R zfB!VeW(W93o01+u;m4EN1MGzh(hA${s>2B7*U3T4aPAhMwb~gk*?4U6gPa>wS+bSm zXYUW&e$6TenoVYYV{EKUsNL&Q%|un0z8ah0g&3z$B$%v&XXJyd`uRa9%^+(QJjALX zm7|M-b=qUPW;Y!yP(847Z9N)~1APxHjnQ0PG#cF5%nC(Pfw}75gdj1qM@sM8tRZ&p z@*#?=!HHzr^tsz#+|m9Qy7RXYBIph!sQeX(4i9n z91lI<2^mKRg6k0?n7Il9j~V_meiKlqYsOuddl#BKDvOJo>q#;Ba;q!^X}?{3B5Zlg z#M24Rb8e6|egOatM-#PkRx7Qh?sOIn_Ek*$D6Q+)?OwAT;;ur?tR{3b9Hd+|$naP6 zL^4pS0dzLTPQ!TyoDna)d@v6$#bD{O$S0v6pA%1#&O*YvJe3%^EL|SPAWc?A%|5n9O0=aZX?lglPWIu;SQ1sLL{oe-9Eb3uHQrORePkavK$?s-6r5QJ?;O1*FFKR#D&>b19yWe-hbE*; zigS%s7|Gc~F3P;tDyTq8n5|%9w|(+BmR3NBh?JNX#WuuV@Rui+MRh|qv7m}q`d<4< zh&Yq4Hs!DiZ3554w_3(hI8@e(KumPB+fX5{lb4%pY48j^Qv-bUXA09r8YDL&3#6e0 zyS<>FcW1+04IqT%1Ag%{2jw}E6&oY`<_v|ffw(|`39q-#ee$2cz;WNu#|bcVH41?C z7!s!DDsydTopT{>*LkXyZIB!_BaG_`BP2xG9XeleCv(x38shCKy~-2**t*JX@(B_y zThL8i{y`q_pFtj5YL#FRkf4f_Nt%Nsv=998|9#=U`FiQitH#6(m;wZGV5Jp`GE@4T zuNoOo`cQuDl9onOnsqD}h>DAST+110K;}HJ28_Akz9oQM4OVrM(KLJ`e6I|BG83Y2 z7rPBkyr2SQ_nUr9rZVNj`IoSUR446bb|=xPqUO&OFJS+Ajd+ROW7MYTIHZ)>B;c2z zr8Gs94yp^RG~qY&Kpyi3Gd@q17QpKrMWAtSh)dh&bQe})%?EYkLj`Lho4(d(I3k2eigb|uTGZ;0j zhk5E(S(qR|OeFvw&`t9Xq}2G0DWSh+Qg_PTYt`CvTN%ZdY?bFMe=a_;8(dK`qFZ;ykBC zG_7>lVNQpW0;J21+1=X(yqPCRmJtr)kboGyEey>E4(be6MBmJ_!=Ztu>0f4J14$F4*w%HC?pYt#wv<_!HSpR_HS6 zc;QFRL=#6@ap(A*3Jmupk!m@~6B=J@525s$Vgjcs|GN!X-p0j*2g89vC*$Fhn|tP{ zbN}B2=V5l2%wCxYpq1w{8MAdh>jJxAZqW~E{&P5Nj#wH^0q4=a;RdG zltn6@J?$Us0$TE}Yi`GhUX4pRY7~NsC+5+bF-kOy%BuYrvM_ACX%-d!LpUBIYLhEa zJKeX6aryX(lH6>}ybj0vVeYcSW@f3CxcH;n5DB?!fUUZ6Zv|3|a)uFXPVrH!#!+R6 zyw%jZBj-L+lesQW=jqg3>Nom&YZ+S>781<*(L}Rb(9ft!u3{fNi1IRs&dEz>elP(m z_`Rb(DN;bON-=?9iny>q;%NvuMO9?^m`1{s)M@EnGD(|72A_=5d{_HECTc%dw!IMLidSzX}s>XBOk4 zil9|}wp4loJIArVZ9D^^=Lu8`p9*FsARzPW^av^@I1TH$Tq}76G*vy2xRwZkkTJGQ zvBi@7GWeKkMTC(cjS}2ph}_43N5`Q)Qz}MLH7$f*rWYHvbNLr!x0wW-ihV!-tH=rL z>Qo5<4?{C&X$Y}Ls=7d{Zz#)4Fv?jBj~-DtZmx6(XQP|!ZmCAYd@O%b?Y1t2bDx8+ zc#nTH^{&B+&VuOOgvdrEHsqE?tuPjmI>F5&4Ayy&2hoF%9nypq(qG(3$&DBXQSD#kG1(X`<&6P4e z^99cNg`4Ch0 zj$>Bw>&Dx1oZu7gk1Dse9udMj=@#LoK0vKVzNpX)qeZ9~g)*eSg`rgeT&6LH*70R_ z$_k)s1)n1rbgt7>q#?3eJ0*J?&TFgkYRe{j4BITLz2(_D=2`vfiH`BXrYAd>@`r(( z+~2iQY-&C+(Xm)s*B_v%LsLzWIbd3Y=MH71F<$=GaiqRXj&6F`=Lz%KKAPuYAk2$^XwA5?{B4B0$uRA zz@W)czo@~IkYMKZ$LCs3)-x_H>IQgsT8(VKrAGL5ejoS{5MP>%o&g9+fJKTby#u{! z1jjK-sj6UTMGArnEl(s;P>vW&$C5ESK}x?s9*|ayyd_iJAWVW?KsL6kWwQPtEHvX7 zZM~6BpGi4Rb)TzBVdR!0=&Wz^gh!^=0jG)$V*mDm+@_;I&q

    wQH z!x5@5IMMKZcx2i{ETA=N)-A8?)_daoo^H=A^)qad(Jh&Z-znS6N%bmXt;MKZhW3Ui zrfJk|>Kkw?fV#yFS!Zr;iFje@eO^V@CHHl#CFn3XgVKL|w-N|XVBxkLqKYT9^xw`u z`q_$XF)YZjU-hbYB31*E8^z?nVJcZ04g7~fP`#WF!+Ma~`5%Uh<`I26#tATl=3r&BU@&Ga0-`2_QYaaA|oJ zM}tIh-z1>hH+u>F04@xjRQkx06HOG3>V<`jejcw?9c6-h^%tapPr{J^HyRn&g^N!9 z1+aggIrfTQGs`xq{3TX2m4fH3fuKbqP=O0Q{s;XPv)tWn_G*~%P`s@*OD4=-RF7I0 zOLeTbMsJd0)yN`IYyG+8juHDc|2<>M3Ex7#WGP{jGYe z52jr84&G9!_2td9o5i{j+;`}k#Q^d7oYQ$)Qfic)H`+&Mb(7X&?D7=~c|aMYg(=eG zv6VTP6y`2mHgj9Yt%B7{ucONVuU5}AJC+k>(St0N*rb*QYQ@v#g0WoJdUW<9@kl-* zA&HZcLwN1`ZRf&Nd60VN4bitu^7(pkoEB6yM1$s!Jo>OXqBf7#iQ5OjvwDzx(P?-B zRvvg%j_8#g*pbo!X>v4r2$#|X#IEhtsDbq8+QW;C^kThDCm}k=&Z@~#%!DOmS$h6` zD)i+Xb_Lb5(WI435pm6Omd-RZt1sbr!ht}*^&zyn1VqPA_kR&L%b-*H+IdXc^`CzA z0|Yk~yhDJ!37 zB}Fe`$ezMxFXg_{SxA!E8PO)`A!pM8Jx02fO~))Y*gzM$8r9N@jjrqa!lmgKt(`CU zyD`$}{zCL>bQG)6hk~btnPG$H4+5b!c`;r3yOa&}&mI=@hgj&$*f z(NrW9VXnj?aiNy)Mr4F^=7vX7jK3Dz)XUyZVli z=S=2SpS+IwsB~6DegW&L@ zCFvInicolYRy8Y93+{>%u?$S%74UNjZdauA+wiM3t}; zaozh{`gM)>xiY&%(%V%Q1e3L9?^Y@6%oTqRR1}9nsDPDL!qx~>jZ7+{e-sCC#O>-X zHwMp#M846K=F*dEE53k#8v5niQMVzq0yceVasyjsPwH`)q_OxhfYAF<&8@QA{?BV`gI3twNwg_LWbzu~`DG?cpKSZEf82}z~Pt_Mnx^sX435z_qyr!1h%_P&Vt>7F}jW#yB3GUFqO(sDd zjY;rF;yR32H<(TrYBUV4B0FMG#3fl$`cy@BFi4%D*abDjD%^@dCWB6JGeFWXS;(45 z%1)JZ0}ZuoN}#C9MWTwi@)sa{p?;0@sn7)=^B1(Gl%3}s%%VWnU} zU{A}b69e4wq!`OtA+~$UT3R`|N_KnyDgTAqZOxX&Q8?Kxb=$!}uwU#dC)ed!ZN1 zrsOII?%D;UqIxbLm%rBUSjT1aCP}(pvRViHE9oC@#tpqt3u0nui~8>g*XWhet>0Ja zEZ@M7a3veEe}3H=-g(>mdQD9D0-DB0L>b0>>L=oTRa=FUNitPYY$f2feJxulS`PhK zPsJX2WyPF14^@M?W=tx5nNBGY+rLAJds5{y;8qKNnBJ{z0@!KIr;oaT_?DiXL|bu8 ziId4bilQedrS-?_75=@kJ5L9?(M1;A=pQ-xn`={SwG#V;Go+eF{?0tv8TYI9Q^!BG zol5HG7R`ll8Z?;3;+)i?liY%ITQSn5Uw4^U-(F7AGlpCVmF-uCeMAkNVwvvYshcld zK6C2g8^Xk@vZNdc<~26EZHL{F8mavCO#Y+dB~l(5jJ5;+1tRN1DC!Mm8)dUhSxlEk zc~%y7At~%&wX>8lthES&|L*a2V6~LyU4>4~orvQd6+|#R-X+^bw3lLMzkZFCk*;(}kdIP2j#tg#k(QbWO8UY}cB}&G>>4Jvjgx9rT5A0T! zqoAAoH5?pdrZAL-K>H5m2VRw=?|$wnbEkHo4pIE*E%SO4D_F7`=Q4 z3FMa1y~p$1iW`NI!VawLS@S)buXZ(P9Z@Tr`VF^FsGgkl?Lrc>4hd)!#M?JZgkdQ= zoCYlXB!0Z|{h1v4BpG69y$}BNB`Qq|#tr#MX?(;TVvVv(*#%#jRyjvHJeyfEf~8MoL15Yf$M-6$PA5rfv=bT&Y?re|OAydB z1t!d@8Z5CA>1tfb<9MjnA28~gTp4P?gC7&(n{6ah_17&hZ|h`46~B!qTwKLxdBZxb zHR084`Rqv|tMvnec7cm2e2Ds8DA!h7Jq>DT z1_j&v*67wskJYUF74Ug<_lO&L@%Q0$e0oC}d%8&a=U1s(>AClPefW5yIbd^mJf*Egq zumCKXMJu`qP$-v&EJSaO-y@f2*u%SB-sBN=j!JG?@@D5unp5y&c~f$!*-(USTWVV0glT{`Nrw?k>UVt?P&Zex$GreO?1ZX z<+atHS2wWm?ctfPt_qE;Xq};DtVuozot4RfbLlHgA=!}Bc#mDuoJ=%XrpD2EG#ora zFX9Lvb(|U5&GuxL+q3JFnKVkv77RkA;jr35Wm`8aRjINJz}~9&m;3AsnYA{x65c_9OEAu5K zMT&BD<)fbHne;x2136_v8M1KF+Y7zX8XEVFOj-O1Poz#Q=c=kbrSZLkjrrIL?v?0C zXa)H^8(cUF6A|rt6F`A%nNuu+1D+_aXFp47L!8ONWy+M(Hhr4$jYYb4l(55}zq=Uv z_+=2uX*m=vq9(xy!N4XP$I)s+lIj(z+MB*3SpD=T8nkk85NZmBfeT{ zV;92gh{}oyw8JTTrq~$60h-7Dp_#Iv$RW0TDiDc+?pghNj9f zZQ^0o4rH%;z|!vq*||8+^gY3BGe0TPh!LqLy5Fi4TZmfOtU%vtUp%utD z?}{wfE~M=1JTIu5VETE}K!&4j)ZZBEt+H!5g! z>14{qma`NPay1^YcC}9qZ$v!#nOK)q94p zC~FQal~43G#<>`?p^USwy#=5Lx0UKOF4;8Gy`>{X7adJRnH?*`>Uv6@yK|R6m{Dn> zkhH5^rdag^x4vhs>*Ssxv%NUx=r?9J9jF6IQ@(ROD{hd}MJqu)qg=eXUEVf;G!HfS zRkl>1*^AJ0$$7aEG5EC2SJsSrqOrtG2SPt7T$fI0&Q{G>n?%nYKvf&P%x)mzm9&(q%*{O*kdY-h*AD4upyhZ!btgKyCjNq)LeeRHq(j`Y#hA?YxnklR&xck&{%`7P< z5w@jc&!5R*edul62W0wy+4f1elvb(l-_+YdpVd$_cvG0#%p=1|$1KzCQ*mc})|=a{ z&i!r+Pwe*HWNH7~f5v|i z{_iaUAa8*4*gOf9RKj#29ZO=Bw-KjjGfHAW{seZLqGQ>{AVCe4kVmTMwRWj4+J-I{ z;ffsOjkpQ^WBUjY!#b?9!Imio%4Xfdc6GYniU#NUp6W^4MUaO!`n(VrG>lyW)JwuT z47Zsx`Lm`qleBn4M+EI5jNoo0LGy@8uT+Qlu0VFeI_x}|(Sr=&Jth-vN4CI1F)qhV z%;i!c4)R%K-zVaw191(983^pOy`EvmA#%x@byPbJouB_^EA@TzNwm2$wvf`65*y&B z)pz_$-AiNzZ*%NqYlR6Z$6ufu4cA@oH%8~5sWXu8M4Nt$yvf|4+o$DcbzpClmv=vM zJg+N>eO-D?d&C#et*tidvB|XCNi6mVcD5NM$LHVta^1WcVN7OXr`TBW-W8*s+rdk1-~qFnblx-&+Cp7`ViPSSx^1W z>H60Cb&D*88K_a$zHXdh3VCDYpxP(-Z=130Yvn@H z0~SS5z>mVO3EM|9K~ddVvgDTIA3XmR?BfS^L*}R)64!N%6bE)qV1ZkoeDR z%C{n;a|Ux?JG?9L_~q=9p zYpg_Sj}BBtZG$HOxVN94D+uPx%T)j``2kBEzh~Nly5H1snan#K0&_ur%4#Lm9o-yh z?4cdn;ToBCFrr%uUfGQaJU0aipBc0!i^wRnl-I2E(1SjGl_A1^-k{m#6`%Tp)K+@v z+f%NubwZ;LM!nu>&&Y{rvEQHQ2<^u7K)w*D-kU?y633k+t9TuKJEEP1w^e*;0~Fqp zlJ`*XPGBa}C+r;eG(DxJtqC5k-ow_`zICRxB$?aHDDawc*HTDHqyQ=2Y85Q+QgX@A z3lw7~4Vk+h4B?uQZr-0Xw7B}(G>P0n?^j1!t>Is6mQ}{cyra(Vpj3Nrrip01@^Rrl z%XFIbvBlo|hVaUY-5*hQw^v)F`!+Zx*Vpif?1=hdt&>F&+(L>wla>LT z-SWt>N_21-s$6-FWCQZFJI}srvf-Sc#Jg$kl)kaFnUFDv3B2powd`P^*>xJsM!)r&UG+Pz8Nis$+g9R3|E;^j*)-O&h9iMp3d0h!b zvg#8ir4qqjWg;QCerhWCD~imhrL>j&ymfLRzC{qiSIuL*)QjedwPa}TlCAS?M(!Ev zvyg8W7WoLl(a}=!I0*xLiPnQn^oitzC1{c3nn0+(V`b7Pa`%*j9@}yk(jZZ+{X>H} ziuu(Zc?|Bc|3N>i-?)D{!J95p@#hUvs0*x5VvKA0w^~kLg}v5Z*B<>CtMO@x{p`_E zZGcYYFZ%)Xxg;x|keod&EiDxbfr_#;8Y1Oh|6%~<&RzBzN=#inasU!*{RTu|zx3f7 z_4wt@ty9I?$)=R_fN$kJeJ0A>FD1^-g|Vl#3}1WPJmeH_)~&<>LW&-&5}L+}60%q+OKqV6hr9FIh7so;bPnuZOW> zbLn|qT8-Alzse^q*_vk#Jk!h~Ft2JTY`Q!pyo8(p0$Rq)Fb&%|nm@be5a`96~w5XVNSw zXsY^fHNNRUyQ%aB-z#^O$gPc?)CXFBsN0Yi*9IGDGZBa@X^ zpkc6zPqTGF|7o0A=HRot!&y+OdIQK${9I|HwuIhEoOYG`20T^<>p8M!Idtp4$f>U_`?% zHr=|(S^FCkxExaLw5n}{I5s4`27l~3MuMCL((NntXEqiHyvo9Or+}J} z!dYZE8UX9_SuuUa$q|oI>SBH8T!jjkx=A~Et`(1crYN@x2tkpL$O@;GneFqC?2K&J z4y0QgoVZ2M5qAdt(41{aI(y)R3!T6o2XPqNvf@;f;Ej3 z9U@Cu^YYVyTF-bWVlwqC=H$S2Di5PabF|Nrc(8tscGOZq#*gIbWo6<6O(#F)=SGHC zJ{eL&u;NQqnFCWl;K&k>gmZGg|IMF4AWYpYUsWI^tFj?GTuMbHuArYSdwXqkZIz{L z%D=I=S3eqgp$@%T6o$AHb-{0rVY83@`)RWZUoc!Wy%qryyT=j`hUBvjTO0R(en0JD_pYU$hhZE_hOo3w)~a+EYn1 z=YjCSGnBgPljMgu(9`!sU!4Ok7HD-5(i2OVb+aku zb#>t2y;@k+qkxO$Wv}m9taKDu`d@>(ym#{zR2IAj$*HL}zU$2RYO2^9zpEiJe;l|e z@H;E88M#-Zwx7K-qcb-IVnT~p3+k)}$?NHv!L|yV_oVC2muCJMVN|AX~qH9r2yXwZM4@67Ax z!?37S|u^>k?_tLchiQJZA&Fq%HBE z)N0dUmIKo6NfXrTlm5HD%Y(XiU)>@;^q=UaXZ}c@VNzrrDn`wPMb3`t45=t$1$OHr z{tpgM#_kSXIe+%r!0oH?duLOL^D_-Y0&T3oN1Le}B)6Wpln@2pngCJ&^!_)#0-~e} z?Ent9zVDJY`Q=`f1lc8=&S5aMGnhLV1v;agvp`)vivpm27DmYj0Af1hr0z5z)0#|i@JM)zdf}dn z`iyll6Q+yNRg$d$yz8u zP&M^53fED1Eg)n&=0oo^m=cqwDC|4PTI$d(kras@k^Qp5e8S~z}lDj>R+@G z&Nopmz*+hg=*iR1 z3GjXpzCWK-TIHEYYmZHPIRsW@Sa~jqja~zu1)cV3*+sEiNXSDr=}6UnaOM3Lq;`G# zhC4iI(!nM+?J1#O$?U(^zWu*te!^lNwN`!7(Qd7klI$1;of%LR>+ApW|M6?c-A=|E zkPy2`<{h5DXu7BpJ1Yw*u3VT~+Ny>-=+f9m5eU@0tGrSyw>r#jd&#LQ+Jd^>w8Q6y z%_LbdHE~*KW366`_m&4vE80m<#m@HW*xY`myL#n3RoZeA?ouW|&15MROxo?7Q&Rps zy5*x>j|kJZb|iqht_b;#2A>sQtc3|ERD#G1KnQI<>^qt67CWhxIIc_rL9}-$v}hPZyGkP$ zGJGXthJ@1HzMccFLtVvVOJAwodT~vg(ky;RBSYT(~%+;ut zTy@~~w4fFgWZg=Im|01y2CRdfVeuWXcsaq@!51Zy$~u0)k_Y>^bx7@F=D>P8CXS$R z{Eq_&P8od(DWsgn2AR^*EL%|rFQ_z@ti!y3^eln>ps;ygm2Mfd%-+a|+n*BjV;Q>*=R5 z`8Zyn^^2Vqh|>eq}@O_7W#UO z0YR2U8dCJ2RX3KtylONqL(4H8kJ(cLRm~kVB;_ODA_+~p@z6@yrB{hP-yrN#gC8S0 zsXakG(+Ad@jLXD5pnyS_ry&({%SZfFT`$1`rkItuIn{R?^jGgTDj-|u#;s$W&7V(k zD{CxodwJubNSPE`2@<+xueE~Ct9AOp%aih$Vc0HoNEIL1f9QNupoKaa#N2bve;2%R zFZv#DlcP{cS{qs`&tHq=I3prSB;66vq96^na+F{hJWED19iGX^Li-9isfkb0IoJHi zz1K(?Uoq!Q$U?&JlS+vc*&bsI>}>XT%pv80rle681GwSC1lJO`U>)`%GnpqP*hGM& zxI7UdVJc{l@jQ#N1~$y?e!9_CkPV30+V~>NW&+lnrdZt;M1_ z2HNe?FuN$|4-+$^<&6`ad=sK;*!{zV?4)}|WgUs6r+Cwn+!^6(#CT**o;v!HExNoe zWDy3oP8IA1H=X&J^}?H++gCz?>@g>)8)>pw8(S+j)jNo~JY~9X;;xn$A#YE7Y;^hJ zBuPr#WX=b_MzrJ=`?S|IvR@my5OY>fk)UFh-Ofa9S>BHh>P*wox8U#JV)80?(WWN* za$PI;^Uw6{q|r6di2!nlAz_TS>Qa?vTOH9x8>Q!01a7DW#Ny(9PQQjW-zR^pN)*N6 z$J5Lc{Bn2$c8jtetgCW6GJ2UN_FgU{j5%y?oFC?Kw_k>4 z4s08etSzCf9}WpAA&18B8*|oW*ta$qSu2j#AoP3oYl-TIS zuu@;CzxZ}V{u%+t#F2L`7o89MpI;GOo3C=hq`;BsHI8FXo)8UkBc(69XiizjOkCxP zkMSK4Y{MrH2m9}<_Kw!JuFexejc{n8VA_h7JM6CXb>Yf+kVHX*w(cet;Q((KF;M11 z95T?!0H=^RQMG|SM9!ks(yraw=;vzj3kqdx=jh^btaU$0-EPY(&Yp%Kw&u?74HyoX zSZkF&;xE(f$yE&!p53}tnx4?fb?mM6xq=ijNHM=!RiVJYH$7vd;Nr2&5>`0RDoupK zEOAhy89vm_8+*|P$(hu2V8%@uzBZciPlELQa37mwc&UGv_FctiPo&j!@aAOQ~M|WRT#ZV18^jlo!$} zpmC96qse3Pgt>zQ|1>~{qtmB`Mn$Q2w^PTtOLp3KTJwY?@e@24i^7nJsD3KVoRs#I zk~O+{>ar(R7uFQqH2y6BMEtJSBrErLRED;n8hHleIdqhyil1)EJ>0W!>M7MW2^-v`60rEh| z%=7z=^VMBrN8v*MIUb(OWCr@MWTrKl;Pas;2Ue=bN%iekFO}rYa0zSWwvn-kmf3=p z`ms+fT{d&YVb-yemA&{DJiA~u^p2J9BoK5`X?&3Wb_K_&crw42MOycTm6WH zYk}eS@kB;WD)@Z{J{oTvs~&WZWY8%BY&$>}5~l(=j$E*O_=J8QWNEp$A-& zYX&iPQTxgIM(ENN&l zeYIc`8CNvIVlLbUvr>b+S6|ai6_IUdt|0Rs;Re>S0W`c~LvUeT~-YwP{rs(u==r-@6rKDc4=!I9@7=EGgr{*Hj{WU zH1{3M_?px+I{H%bwS5<37;MVKymj*b`XDCkZ-W05`s6sP;rjwr#Z!-k(dEz;u)!Hz zPjlw*Gfh98`n7e@F=9A@*e~lkLJG_M$jF41KEKVf-&TbjIU7b{|t4+0q&k3J2l|F%b( z(o7J4m?c2&z9Kg&z-MqMfy#7zg(`U zi}mi^iqxk3=))BGuys28-?NrUpmJHr(e>8>4X|?&tP79ZHD)O>dM!|%SYWR3#6pj~ zmdS2clfmCu`LtsGjVAuUKBLHP&KbZ$3dN{L!!23amy-lO55;xg^?$v#>gAar1T59S z`JC>VcOaQ&kr0sDgHZ5*WifMl_O;WXgI4SvtgF-KQ<}vO>z0J5Ugy6k2tE~52HQ#$ug+lA_Wp}l2*D(!bm4Q82A@yPK;hjAsy+)4Uu zJZc|R8ifvN!Ge)S-nARW+jPde@YR^et8WE8RMfu@L{K)^{2R!H$7qnVE^n?_CGUa0 zUSsv6p)Y0f3H!wl^==^}KIPri8! ziv`!fOu;7Q_N*|&_d7NHt~N^dDZ<%Jl`P9WmIkAz(iBK%&1fhC(dWJQY%p#Aq>m1E zhBsP8nt$;m<&g0zgRoOYu&7G{#b6TQmt4&ASqI)5sGH+c=Sxwll>XD4lOBrksjw0n zl&qyiLNff(gj=FN4QfvGVw($gO=UYd@W8}f^x<^7yylH(hr==Zfg(>=oC}{IT}7>| z3_r-ALfm=_WZW{1YzMat32b_%y06ejt4DTN>IBKtzcY_x;rhDoC+)pk^lKKto=}GJ92}wSjGVV+7A7_23l)(3!4wUtPHQOk@<#c{-m1VKm_tIBw9--*wn@WAHK`< zqGond;-_NwWyryV>c9gxE4q*}3gr#ISnf-N<{5Md&k6$|uUm6urdOF+bX0B2Lto0GI8PE z^^?c!C*t!n%N1*hJy<#GSr}bb_q*8~c?oN1Sl!4ov}vy^XZ9ijnNmMd@V<+D0fCg- z_%Q7R(}wBQwSO)t+guspuU3Z0-^pNgAX5fOFPIzeG8vc0W!|znnxkUcvh-(iRXs^x z`ReRX?TL{a?J*Rn%y3`aNK|J9Iz7G=%8Y@2yYD=_cxiQ&KTy>ONsKa~m$0&{#V{=?9gyJ zGkK9so|kIMqdo6Bh_%|O6cr|;s;`paFkbZ%AV{)>$HJJ_sXogS!&i|*OBKWzSv2gH zkIP}d#r-|SC?HS=f3YEf$~UmumH6FCl3>H4vaX~|SJEsy(eCB!cfFEF4*hbLyOsFe z<+QXbX{UqqSv|JQQ133-x@=-{9tbt12nG2g^`yuLPZZ#lG8|=aU%if3GnMByb)8*LO(=G)o z0Ci3H+qa(EW0G(2?Wc6Lz4syL*MEqooAVZCW^SEd^ZR)yZr-@lE6N{85arzJE=Uv+ zzpUgfLSlXdsto&^z=8F1r$h%OT<@q>{!EUZ8-eeSKHWpG2Y*WkYTU}~S;5nF^}FhW zcaj%RA2au4jhTXkM|!YD4u>>nc6;XRWA&+p?1HQ;HP$+4o()+Mxii~870s+ag$2c$ z#e1a3iMQvsv7Z<%1gf8M#@HrmBlD&Icsik|$%TRFpI{j#eS3##jONVWr^H$mfku^QHBsJZp4gmGtCX;d>rzaO z_TLa!RLL*!;LvK^2OH2ov&r}=TMnHWYNpbb)b0};C2Qg;!Zsc{3ef2dJ5TSC`$lBtgO(V4&k;iWg z{n{rM8BoOHrESx!L->Dvi6bqId{YBciwmFJ#Z4c{;0v1G6D}Ql2~Fo9`6TaM*_UFA z`Q<{w@|&B^lCs)Lz!%yoyq6SNBc@`omeDc|u`*Yp*wiS60cMi*_XwzqWIF@I8AWUwaBfQr&VYK&n5Jt^_l-$7BS}}rFkR1_mCZD4n`|B=7d9i^JyCB#{)I=E3|Zoa`+2wUB3LVjm9w-cPo!(L)1TaL-Zl}Uync(QlMfSz zFit}csJa!I52Jqk3`Km>gQgrsKmQGXY_p56oc=lbOxv(IxA(uJRU6GsgBP8zjV$fn{ zky=Jsy0S70j-HofY;8SAG8vPOQ*hotP}LjQO2LzK+0;MNVQi%5jBeU5hA|Lbm(tHgNzF;O$cL)^3UV(wO z^gihay!mn5ByN}yJdP$h6sJHR0M^pEDjUF-?1nQ99YBfl*x%^tlPGTtfQG6^xj!6( z%&cfc+UozcT>#Ni9M_M#L8(;{#uDmj~G$M9?#2V44 zFl9mDjOD457M4Y6W!h?M-@dgmt;*;Ge1%pPs3T=%PENa_i{mUS`?!CXp*^DH(+Ct) zMxZjI3DiBoJEED4Vg+OW^*XKgh8IGQSle*qske@P^`BAIGpuXZT9x&3MZL=3Ep_s* zWvZmFQDNm@SHbql)Fq`Fb!kbWpSR|7d05ofBvtYK@kALj&E=qoN3+B@Fa}`D-8SQi z0n8?kK{*3}x}Yo%T7$TJ3gATm|1V$SkEpF~Jg|=&hFia^U>z=nyRU1BF&Cj<+Qs+b zcV(Tubo0407X`*I>B`KU3{p(d2?B|K7$)WT<~?YZ@Dn|_2hC_iSenG*J+YiDapFU< z1cj>CNRgQOx3hZ4JW7DTO>2ye|K4~c_%1Iw8R^9`nX(t`xbpqlwYQ+G{Sp-E8eH2R z*CEO83UT5+Ar15nsCB1az%9bN2^+ek0Kr}KBGtWirL2Ko>Wi=;(Qq58Eu}Xcl*Az# zaXqRL%|Q`*bowBoJp<{+yYZQ%n;Xk$LVzN&*mOo}i39BB%?65Lbevtp+7aAjn60i@ z+yn+Z{ciBvZwhR-o}5=j8Kp24?;8N+^Lga;(ox29lIJ4w>ZLoRS3jW%yylv~fNcA` z>A5uHR)Tth^kR3QG+7t0^{Cqw?3Zk@tj|IvyDQzo!V#E}DnR0?Puj*GGL#K+dA-WA zZk>gf#DYYT@Cpb#!(TnmcSm7c7`C@=xoT(r#PjHDa*K@dKTcE z;#uJ@3yWNcvb-u77(nlKOK?Y~RbZi-tR&y|c?W-!#Js+^LJ{Z%Bh0u#z}?tMvfSK- znGac#F3GbyzS}bij2kOf@qYe+_JR7~fXv4#;3PyA<0f%5o2Lza4>qMv_XYvO+$?-i zqX2B>?`+&yEynb>-E)T$g4wu9+$i!l$3AgcW9|ju`{(P!l7828j`wH;fToTmbY?PxnNCx!3@ul z4oa3HzjI6Wi;K2`6`_Mtzq96`Oy@G6#H8?Lf#-h@`Tcz0vhcOiIZks#u{9s=c%I!3 zadufyDidU~WHz3R?=bWEW@y~Zha7y!X(j`V!_k4Iz$0TZ84=Lj1di$OQ#Fm!STHFO z+`xkZ#wcU^P2RpeDVn@?%|!#pk{hB?Iuz7aq%jOVcy?VDAGR}ngKxgslCtH(`LF^~ z05XEGB|#d{hMzK*U2hXXj7$pM%(ECM{M}$)Y#}!Ods-3dK~!@DM4&1xp5(JaZjRLg zLb)n#9hxN!W}=#CL9E9hkbVi-`<7lpWG9_ObAuUZs37$=jQ0u?8I#lrsDi(A0iR0l zQ1o?I+&ZEEiJ@G^XxrU#wfg1|ZcUO93FLuoh6$3ZlG?lMjHHfYah1p&YrsZ|o`-$i zQ5@m`{a@`lBT?$rkw0v{kk1c{UWrM~%JsfwHs>&UDJEf$@aVe&A&IGPVMbW~b=TZdLFC7I@cnj7(k=VFlqdB(~ed!t+G z*d22!>slxPG~DN-<>g9}b$Dx3L0TDBQ4Va~SSO}s49>N%P)zHPxyD*UAqeU9o@LAG z%t$Y$XUtbz160S70q)V`@y6c2PPK7tb=X5gmo)uAbwY9x**HJ}h z_cA)a4AW4>0ixPp3~m-#+RT|aW{vl^L81g^P9~n&8fUWPpfjDq(TPnTOBcUwY zz}6)|ZGH;^Hm7cSo*e5r^po`nUr%tXdO_p@(~ViU$X-uTI?#7Dmyez_xS`@+89>h; z6Rj^cK47|_c?sdo?c)gCn3>=PjdGoiaGp~$IQA^y+2qtXc|cxx_N;|rF<9W<0}gs8 ztD@K4*|W_I(D`h7*bl**+O{~FKrL86ph&out#J)ThI!%VruzMIsB2nml5L!l&#U+? zi6NlbOFPGsxyO%>j2K7yH*Qp``~Az0SgixfNt^U3gSpH7mscK~h#`v3k$B%>!|=1{ zi1O1zoSlz1asm?Lc%OayX?Rl{7`4C&O~lXG1U>30%0f{AdIxrUEd}a@QQ07RR(N`P z_&v|apHbuL>v07YxPty_oYd>;(UVhK{S(gkty3q1XlL8XZ~+Z((1?8ulc^1t2AHfl zT;gC@xM4y}E?F@}QfCA2-^negvNc%X;MRZY7nJWc{sn_oY-zB)Dh)P^80`7@ER2?g zYi66}&4Zgyo+8(RXlrE^^j%!~eo<(4P>_e5oyyi?3_9|&H-YV%KcrYUIhX66Ow~z9 zYVQ8_5dT|sa&UnfiRT*YM~y_lqX0iZz`u24t3QZ`M8F$A8zjRL?Cr%Z`u~5)q4T{G<6PI7 z_AC$7nS2U5U1&x~OClW<9dq}>L+Z59y!(mDs2q4qHE`7&ZZ?-1+I`I{FKxpPH_-c~ zFU8kiyV+b2KRd{faa3g)rtJMxd{$OG&@DpIac@O7ZHw0FN6a}vVT>{;D~|xEAU+@6 zE-ct=_Sepp?O{!&&Zp*maZEM_i;Y%mRchL$br*Xn2dZM)^Su03o)rCr=ld^X%*_9i zq8Uhf<$0pc<_B0|8R#K;WzhY(`{3obJ@=#Uvyv~Zn?OC~zTwQ%v!U{MA~v7wlZywe zeQJyz(@Uskf<^bLM^tdeDjskl3Y>_1PLc085R1ntvA9}xAK4V!w;9wxn{KyqGh>t? z9v6!yltG=rA^U-rkrswUt8MBSh~kh&bxLv)UsUViw^`yfre*t2@E`v!LAFq?HI-v6 zXtldxMYNjMgDkE(S^$Noxun9R7Q|?kGsOrF`FA0`m_uk2`atGD;xcpbrp5t^`osafvmtmr?o;TBI#6!#)b> zw=gYphfiTULMKmCKH?^E59eB5X}^U*mTke}V$y)yl}Zn=UVY`BHScSFT$o6 zmvOyEcdl?3hv;-6VsBv(QQVw1Wfml?XFYCCTCla4g6F1$V|jG()`dxP9V6czqXWO0U8fF%-{0Y8Mhlt9}2MNgGM8wzRC}we$@C3clXoQQY>9OSBf-e3P0+^ z?P2yF(O=lMAgTE=6HGVTCZH%>NCo)FrL9HAc+r87XTZ%{`65@Mc(-5&Kg@ff^o~K! zJRpjPtP%65q$PcIT0FVFNEapnfSg#Y#Qf^bUW*zpE_@vWTCxcEep_j(al=jGSf2|N zkl-YC80=#wve>VB{E7sbF+?{2>!u460;!l{)xO<3FYUbi%K@D~5GP9Y)j|8rGped?!|_-p-vy>}wiXqEpGUpv z<7DADJ>1o*@T(2|>oorC{DQtikIbR9^*%Y@pQ2!UOJ83L;gW>}!Q4y_wF^SH$^SG`S+XCh0Ey&B2QF0y!r#<^TJ+^pSfjA!;dVsygazq4* zy_j~E3h+eQuIOy1LYx5-+b9O$o)PK@LqvAOo!RIf!hmLRtg@py3yHJn5(7jVe*E~R zM#G2icKf7_i zQ1Dw^O1aeFE~5ml8>G=w8FfozDLg?Z3TcM?#tm|5wsJY1{)Fo`pXXpJ^Jki8>iY*g zBTu^Lo_p&tLvs6v|AU9yuU~H;4*vg#+maa`Z_mx`eq!iHK>CAIK?}Neb!IYczZ{ruP@8Ie{`KE?u~LBD^qm0^+-`ed&nDC@>(QQhShDm3Md5IH zubqaKgD(c9@u>n*gdi+k@&SY}n6TG^j~Wc&tjSrilpQ<|g^q+mR(VeisV+vFnZnEu zBi1!%uA{8p-ofX0H1HGn|H?eQ8XO6Z2EO0IonU9O9cW%4yfH!h;3f2=fhSsO;hj5z z2vvmO&fN=C-9&#fJ-Q@Fm7IdJT5%=Cs&nV-QG14Kcat?(O;Y09a9k?nHcm-!nu`Cu zamXBmw8c{2YH9AvjBff4lYiYSAdXC4X5nnY`vaJ>MWCvA*a=QP-36${Ab#2sU z!kdLw2@C+#iO1K{C+NKZ*N&56*d6x_4LXo4GGY^bF~oQ90dmR6urMsjFmQ?@si=@p zu8nM+JvuS4iBZdTkS0hCZ?Ib^u{?D#3yKJt^0hJVVg;P_g>w13QcxmRD8wE)49Y`c zfE5-B?-3rgDinc9W+5Yp1L7>6T{o&!j<&_;wQ$s6@xyX^^p@)?oUI^UrBJB!*``vK zVW3;Cc2m`$c~zG`DM5$w3%B=rMx>-f)IrSC3ynzLfkg73C{hF^2P*9zBAbW?F8XKs zB?W{BTy|tOo!X4K@0bZtG`at3<-ySO9Ne&_7gqyZ$lkrqwiZoGV}sL76WMg{N)g%t z7@bH_Pcu8eMqQn4&PsLSS51ott*UGdq)^N3diGe3qV|z`?-nCnNfsRL@u0X~aNsn^ zym|8;wHOAu!R715cSbd-YWw<;{)&PAj`dEb%{qMYZNSu1(bNxjx1{N01n2rpG6)wM$jRFbca{^YVewW(hqG4S)d+4Q)xF zlOBH3}w+P}^HuKB!+`Oc{r5FLpp%G*eq|Gs^4X*#%EuTeMcpeH^ zqF{KNPmvGB4?S|t(hv>c7cZm|L)OyT8C!=4JXG{0@GAnt!HsPd&6}*ZIC$-kByRs} z5B9p~YzV-J>Mftj&zs|G@|0Xk;60<$Kmbz zjEPB774sIf-a(tN`}leEq6K>u00d@vzsU4@4RR*;>g5}uNts+a*6yvvd1@GVSlW*< zXgBwodwPgq?~zvn0Zsoc<4t=~QuY8;9hF8b`Kz7doH^6U>B`QYW{5p&GzbhvphNqh z40k;*a)=!i#wY_?W)cT6Cd9u-sCqf}#sT zQ*RZob+0*-RV&J#L830`N=^6!tixfT7<3iRE+9kd3Zgo*CrLYivuL4V2m6^Uurnfl zDr;?RZniKiviF}R`CiPLh!#nJtMF6N$jB!l-thE9OduA=V7h9kB@YVKIF9r$?uoC0 zIoUsyW11f9fvv2KT27ESN>3+j!Pd-&op?|U-U!v77-nfvAVRb7Hf_wRyy~618sPIn;{@;8FC=j;(2>DTG97=f$*kK45lMW58L(?-Mc>8b~A^7>I>y;UCjZg^bKYyH3Vlg&9E5#W;^3(pxX(!Rhd1{b}WCu$Sd0P$yxGYFLQnhCY`^pLE5 z-Pb22^r{d<>dR)uvbMmmE=xASXfjX*c;DM8&QBZ;wjkb?mViLuN)}NI~=9 z;~32e%zW|X-9yYbKUSntuQU6N6=9*vTPjQ>TbYkTnwM zJz|Ac(67eOV)$>77K=h;k8oQ?kxGAaB9+gJ!+# z<|5C%PE;-#R*_OKOjK8sie!N3DtNeBQjcSlj78j`~uQ-Qu!er7WS&ZGFSqv7~Fd-%h z0&qe-4fbwc6>S&(TwYh3X(l^$0+9w6Mvi!bO6l~G{?XJKvJ}W zKgOGgvq&`QcC8L1N7k#6d4ntl{~+vomV32w`sL5hEL~z?982n54x7QmwG;{8_x+{? zo-Ri*VswuC-aD0m;?NTa{d%Oz-+yPI8Dy*%D&^ctY+iWbRt<6$Fq?H|@Gx!qtz_3G z3BG#MI;sC`f9blVM!fOjD#vc^m#lPc;%{0n8qF@=|AaeSdeq)=kizd=M*{A$PbDBA zxX#U8!Sycm7UQn=&KY16D3g>l7kLF=ma#)YW0E8R@rQyJ0o1$k@#eO6D|TdgrSm1K zvaeR~l^oQui|8L!(D?MsUo=>+q|(bRA;RP4f}e?0$!tD2;+H=r(H3fg(%KWv!X#m? z>d(5K<_b!bp% zwrGEP95*jopM`aekRMu_SvL2oLmj%NsChpg*qrWIB!!wHxOroAYV=G`-G6thXhTD8 zd$v`BDs)5`>KaTh5V3xJqxqNZlY0Fb-R>V4HH?n*e*jp)d1%8H-#z!~5aaWP#L5$H z<%j^<(~TdmEdQ0O7?u0$^0jl9)i*q~Dbq z-)58Lqi*zx@aH*#IlXfPISKjdrr4I47Wk_`>AaSKxkVaE{nmFCRRe%c+rKjKCV{+Q z_E4EsgL{Zu2p8ZBxgX*3hXNyDSpq<$%@jd~il@8d46@X}!^A#SACq1S#!Jy^B@4?N zZ1*)7tHppojn7XrdwcDf)vMP&Fb4I~Nt0<(G&wy@Dtda}_v6L$=4-VLN(HbuBHne1 z%00(nz(rKHOKMOc4*9?2#j(%bgR% zVaRAyxuA0^XZLO(j-gfiW1~S4(%D44`6Xm>GHZZ56~7y=?v?Osj= zotgLhA3M!ASdbnWauHu7~wxp1|miJ z=lDlNhE`#q9V*E0Iv3%^P-lBnCIa)K8a(f^D?4sdTMZtzX7td48*D&!?u0k+c-QMo zHgjQS0in)0SUtReG(lQ>Z6BMV8HBX-Ag9efFS+RF!J~%J0W29uYzzH%CUPvett}U> zEroCHZj(r?N(*K(ah*?z)S{GFEDWHu-_hNFjgl1n>tC9<<8krJUk7CP2b{jBW+r2j zlPK5vJB|Z(>}G1XUI3M9v*C}jq>vYvVKY_>p;i+{qem|`F{(^ByB%AT^)SGit+2)C7-UH5tEwFa8n z^XQXD5-1s5#!^b6i;OYN)+V{_5i40Pw-RCpsaqE=0<;(4_ z3Yn~mLm(6p=;F|^E!N=O_+({7BVY9#)Fl5+VW|3y=97 z3-H4OR_3v$R$mJQC2kVO+HpJpJO63D)62{18s&L+k+Q&VpD zTpnUnUSe8eGG8~Wa%NaD=}fLVnpQb{%YMWKL&pALgoL!j_*i@x{<1-!?4DSB0zO+( z&Yxz;DvzJXCc9QBKyCLWv?p^Do$~P{fdHEN8iwc>D*%x_?$e|BwyqO4Loku1pN5%c z0b!4=y6K14c*&pWFETqbE*x!>9NZ}XpI1Q2Ok^wiNOlPlZ@M~%6g?2`=lyEkiS>>V z??(b!0hh=)0F3&K`ztc1#qEDoMVP?VQvmbN8qexFop)&ac32eLkjM+Arm0iEhfI}9 z)Bb-t@Q{JGZGQ=Jd;n7uY0(I#U_2an`sDa;@4|zhATK5gP2`E;GKX~j+(UV%yRyb- z)WOuAt^&6T)W2?SFW0ERWJ20e4#x-nK*+32S6N?X%kE)9$5`KCcoqwue2oQSY{_b? z8KVp*rsxmS0vt;Vr~`#h?rlJ4dz+=fbpvZdQ3$`x|7~X(KeQ-@l^jx3#_#+;mS0vB zVtiiA+uipIDN3%Gj;Sj>lSU&n03D%vj!M;}slH`q>g=-m(DGj+iz18HwG>k87pfOL zOlAR@Y@FfH*7F<_Gh-sF8nZFg86ypvA)xz+DZ~s4|H-xX;;6mLXqS8}{<4 z3Gt7070g(+ilE}~o+HE_&nXUX4EEM}$=9!wNzy{XC*v)81>?7Wf%~F~2o;;`ZpH5@hS^;)4!abzul(|o^76|Z zJV*%BtugTdMo7VsN`jLt=qWKIjm-Np2bcjh$X%s2L<~9MfGRB+Z=vxw(T6})38zV* zV!>=Lrhv)Hwp!1}1+)I9QXaZBpnAznOfSBa4=J=SSj4}kI@n*=1`_=dwLZP9=Mw!b z6BcZLv)6ZTo$8i7Abv_{Iz>#NEC8l7v@%-0N!>oTa#>l~zN&A^o$P~+c?C<$Wf2Cz2YQU1<2wM&&2^nGc5EWhP-T^ z)iT2Ud0I8%aLsJh_W`&>SS+dwt~Wf)IOKgf!w~yN-kFwc{!V<(?zlK{ldyBQZ=ZpJ zQ*zu4_bPbT4P&jOuo<^qQr#Kry8h32WWjyCT7}uD?S&4rMNeCEWaaLj+y*u_3a89M zU2)=KH(VIXC-d3Q#>HXvuBCV=ow27r)3rFIa6ep|g(%yc-^>yBXT(gj1|8?4g2gI@ zN~h+uxWc$19hbWwa1X~N%Z3#7-1TFmyZOWk?sHQL7xPFG z_2*S+UNScnPxN(sf7luyb0I=@p75E#wuirprW)OFa=x%@nmI8a?%nCce6E(bXJW>Y z67?(efu1=hq;Zn|@6yUaETRp96(Ev&g&2 zw_^@H2M`4D8A$fdIK~~?-aFVC5Pfk#h01nVRs1uZeD zLK`(R$F&l=#UVf%$elq9UIffuTO?_CO8Hjx*J^^wM=jJgkji$tC?JLH0@>^cNM+`n zXN-Z}lNGRp-X;>lv3R=X&l`WGWYH>aJx%dPK2L8|2*{ORZ&YN}XX9eiez<-6hpCf| zYpwhY_b5n@GQ`}0S&daWEH=clch3({CE^jrDA-Mez!)U3uVVpV`NyjTd^oQtLFI!a z-7n`J2HCAs?8x$N=PH$7B~b*y0eHQ*v2~k;VBO6WCDJgzm#t*H8SvNGj0uMuNnc6V z;Aa@1UZnZ?G90-hLSnlaw1oCUy#VrNh;HEsLRUo)Uw&jtK}s1^mXbmwKia^SfD?j} z9Ze372d+R;nNDIzpiWD{{7(7uK~!L&2gF$4CXo!2Zs~rj9t))l(mj+Uo2b8kR#^a% z^r5Oq)@t3$Xu$uO1vU}aW%F$9>w9(JmCzTvtvdCW&Q*fRlQ9EHd)T+R1tG-StR7fk zu4(x`(_{tSNLYO5+FEFWD#STA(GWX3$ZA^%21~Q=?0(NcB71mRzwal`l_e{w#EF{V zVF`wQ*M=JMa$)lZmrK%22lwl8Q}P%6WX$@BP;=EAfr=M-4|#)_C-)6_O)cMRuE1xr zWT$r~4UTDm{MbHrL?>~!UO&U%TdolL^KE26uT-y|S1wD=N+VBKJPjP*$OwuKQ|Jsa z0ZcrVM3~^ZwghR1*8j;%sWN25nQ0>@nrOzYmC|LGQ>=&0OBc#a;?bE@*JwUn3PR>D zSYWqDP60RAKHZ$e-EXq#M$^(JPHxhP;Le@WTW%sPZ4?|`lamZA;7qkF7Lw&CsVA;H z*bC=nxhbn+SYKK6N7*~H<`CSapBFQ*{J`qSu+ogzkIGGMkkFD`%ZA<6j^`^Bf!Mu@ zvKE>EN)qwrxY-kPf>;famz1ffr;0N4PhaN%~B(i;*|#s5Gwi z8JA|)18#^ofb*QM_Vw@BJ_(_Fh^go30iqTQ1l+o}ZdH*gpGg5|oK%jB8?NhEm529? zV3P6hT0LwB50M=Obl|iJb#c0qk|#0Qyi%n=0BSc$0OK0f%s5clS77(y$7y<^D+ojF z!tFiX3K3m@&%$t%xcZ4y&4VsA zyp~ig=~?(qbLZy-CC>?rh*a#>WqQ(sc|l3_SVHJD&YO~L4x7yMuZhYNcujfXcFsr? z24!B!JU4?hQ6#}FPz9ljF61fRLjs=D#<3Zur7%p|Xf+5H1)ri*Af8W#uBFgI0ErU- zl3RWy0S4Say?937v)?WW1t0HcMG(aTr{2Y43<-Uei8yHVj*U2h_~4%N<6kQyJY0-E z=@I8N-T@JyS(05@=W^|#2AGXIGPT<10|I+JnW6O>)Sgiquv2w&kUn@QXu(a)WK!2@ zbCPCGlFhG!rVzZ!H#~F~mA1Nml-SwxKz}l3@w88ChDY z1F7OJT?pwqh35Q#b4A%4ktu0NxTuJ#6CU}(kLJ*^B2)c(i%ju5$RSu(m1o5#q5Axt zziO&)u;(t-U)9+xN`cT3*Z5s3^-lq&)u8hU%DS9f^9$1(`2rsGML*Cjg79TY?xKxR zcUU8GFMSB#)#i$rc^I!~EeD1i{Q+QJt{QL`oO(B_+^f3M5gpQtISGsxzNnVCJcPmirDLg7 zEMN+L$y{!`(E_1=*2Ra;6dB+uRM%mzK}f?>k)JSB@T-#SC1j>Y?+1 z`JCLia&}9o@7n?dFphadB^aJlZ2;4W(hkr0)w~htjFmrqv`hvV9@=7}&H%&pkrB(l zz>_CovFy#Ss|HvxV`o@MLV+VY@aEOyWS)4z6*snt=W7d~SrPCa5G{QX9)L2S62f0d zn~xvx9vB@q9=@f}Z|mr#2mrexT7Kz;o3hX4K9thBjNP)HZu(Nr^#K4pAAb(n-yQc0DHRHJ6f{m`XCR z975WC`sV{Bwz8oGCKQQw!m(>OYiulkI<)tuSfwYv=`{tY&MhURns?3%u;jiEJB3%{ zuSRXSvJndfh7vp09w*Bgd%Ppe)=N&_W8a%%eqgVV`-?ir!m?D$sLs?vhU)0wkApOSp)nN0XckO*4eWrpgmw6R3Kgd=bPwecZduLzf$h*PMBiV3;CCkAL^kRm z=7DTP{?c#4+{av`t&~~)5T<5YKyO!Ds2GNw$;QV_w7|BIH%S+5M?Z+->t(*DQ`W4! zaH&?-bObj|LcZO`6C)iWfWZvXsTR#JY4;t3-uq;kFNI&Gs0a52vSEJ8xNB;6xqRW? z1pWmnJ?qX0?5v9aXI5OJQY}M-IJTB;5T#@5&DmI{7<`V64*dZmDblitI8NUuRr6gz#zCxx*8)%y`GBV&K zt$&KEM{u{wZW873J54cjwKadRb#9EQ zbM8Br5;uvy8Lu?s;_k_qti>vx=-C9oRjV*ux!j(L>_R&^T!)~lgk#`C`4x-FRUB@- zDhet}g>BGir}gGVmc5Kaz+R!2H!LWEbGEnWyiPXjZa(S#d9!a+5!9sjSJoaaV3__O zPJGD0$eowawm-KQr32m_-zz;;Z#w!k-OQkUC5ex&PAbz z7-#ShUc6Jp+MJ28B%cQveJL)OWd8LA&4L@SJ=XNOgznS>tt(Q!bKp5X-IHUReJ+PS zry|T+;ef#?3WNV=$&bd(a~@BN*s$tDWI`^#H+Xk$MkRqE_`t!!)tuE#H@E$6EGIiG zl(hK8Vmu5g`QmNZzJs)1lwk4~ST?$&g&fsaOeynR>5KwOT5Wn}lp; zHPn0|k$$h)H^3>vfR(9`64uJ{Q5^=?yB2J2x*Q zU81P;Yui*>m0lWN)Y9s=X?ba7ajP<|Wc@qPVp)ph0SzF4?nTv{YH^k^pfAz8>1tDW z&e!s2_tPX1m6h4v-2d*}{6lqqaTTv$o2QZ{wg%*FFt&}bBsfGvPD|jnESi%>IF>^Y z7wBOc8=S{P)_YDGzHZLuk*zACwLA&-Ds{_NukT=s=Cu)@mzhCPl!>RbcbP>%^TAlY z;{+Y&?2liuvUOLN^3%$fCwYhhL%yE&+~OpsyEfKcskyL!o0{!(QRM2Dl;4IoNbN z-6q>oWC?rb^(>x(A^Uj^>`p&v--I*BL#~($))5SOAC6W&67%5(jYuPgGvY_NnlN6c zyopz>NPEPDW%fYeffgwcg{lkf;3zau)7PW3?)BC9;9k}?%roRcMJAhxi8=*FuvG#C zbnTe3(PdA2zI`((z^vzU_a9 zJYSbHS(=m<&SXq=Z|4#oeB(6u+8EH_aWPGF&A;lN5z8I%*F-ng;l7zEkc%F5ce)c^ zw|SorR1s}w0s1PXJTJvb475?~EnuxTrWh-UE=7#oSv*3a(7e4D$J><{o^WEMNap zKa3P>US_C5-@_CxLJ#0Kjn#)6$dL?x+Ep1ZX&UA}{YzZ|aw&4J=AZfZX`4#&nF+{K zi`2A_A0u7KKICj>e#s`<{rO&Ym-wm0FU9o+_=3%rhS4}M5Z)TOOdl>KnMQ1=Y(FcI z*B-3TXXZUSHJ5V(fp4Wd-$`-^wy}FHu5AJ+=-!x$P-7&s5@5(DFI4+#z7pOH!Zca9 zKVVm0iGN_j1>}X%LA*6E0mMZL6JV-)5sh!=EZcXcmY~C423o4We3@Ip$DWCQgdKJU zy>6^*P+B>86tUT9vo-%M)0j&@3==oGR0SfL_MQEwDa(EPZ=oOeOEfA#Z>Yzwxj>dQ z#z%#O+$cpN<^x3ZvB3PEg#7G$&3!g53*|$5WKfhPswDl7_tt#6U(_Pr}Xe z)X7Sar>9}*Y)mh2O2MlM7IfFNJyOteq`%-e$^>xCE9pWsVtQmFIk_EY5;&T zWr1i9rAch&FMcGTw%eq$W%leg=j5bla&%JklB+aEVk>2bs9a`i0Z-D+gO7r}tzw_C z>94UsKGAMxGV2MWAw1RknP?%q({6W)LqP2!h2e-_c2;KQg$vaB$e#kam!;QJH*cGJ*HSX0alw6LjWjUA=-cBMOajte zg01y2M4be@i#<}Uk0C-htCimRB~6K#g|3DMn=NiQyBJf{PM|DQV@`I-diS$GSEGYK zgb;=#=!p>4h_feq88#22St7`eW_r;1ehWH;tuc?NU|7NTADUQmAKr&y6goC8$NM7t zOrN{zY@2FvX4Uki=Yf#Jg_nO0+az&-35ufKroZQm>JbatC zs;a+#H@GR?CKBQ9eZg0f*HfYD5wnC<Kdo->E7BR9Gi989zU z#6#t@MUc}`6gaA0(CqmPO#L+rEg;ES;Uk`0Zm*|xN_io{R9#!rF4y(hN7n2MVkVZ7 zVqnYjmuo#b;2b$yqIYHRc12;~9Jx^R_jmW>nkJw@eCSjLbVGbeMPD0$gwOwx*TIc& z<+t=3dH22WzeM#F>w|>X-;MZ0D*Iu;L7E27UzrMq;#0?Z8V?zArV_ll1e>!jJw3JZ z>SVMUOc)IRw8*R9GgPI)IJTTkliRl^z@3X=dlM(4)G(8k;M#ZB;iYF-?RzbtDNm{- zUq}xUjb3>!2*DCreD;$QN4aA8G0tldI3^EQ9#P2ucls5-9wwFDo8)4D`N(JVS&TMk2SQn!0&u6uO?bzl1qT@?~8GvXgP1C+^*G>E* zYs8z_RZLj6jEWYG!rfO^J`kv^3mOlq^D9KRELu+qN34eVGU zy=s|L>$nZ;m?KQa{Q)0>%&w)Ucd*L#h53dl``gFNLmX>?T%tbLqI%w1F|_!-0CEV7 zqK-F+cyYziuSju4v|B?jwukFwA9sZ@yFKM~wYqVL%zrlh*s48Wh~fDg0#+xyI;uwz z702s?YWYbHG#;2H6n)DLoG?xVa=$HdGYF=Uj#Wc*bx2yR`RLi@gsy5j0gX+!C`mI-FB~_xQG)d>jlh+Zo7COotB(J;py4>W0@D@)Q@k7?9lgHQSB@9gk*- z(U8%$INZeQ>3l?Hg2mvdQ0?VHEboTOYoiABh+2Kkaw zB9fZ)H{)oXh@6ucavUUWU)A|Jt|W~tO^d2F(2~sJhKWtY#ycK>*4cD#?;(KxU#>yS zy!`U&zk8Na^zz8s;#zN`_!v9+(q0|i9n-CTs(7k=>P0K6GU(pb+pU@a^{LIw#NDw{260r>0yTe>x2`kG`eXB)JwN)u1v^D=fK!35+9e z#_qZy?}_05f(b+I%PG_+T>k<-q<=Z?OCWVNJiXEo(|L7ya)H&cM1CYm9$ zz{;ixJ+4&pWDrorzRs^hVuhqRYnEm<;I9R+cs#xuuXyVJh(G=o1wyuGETWV?12q{L z-Z>dF9-{)VKZobzM@^_q#pq_+9Wpz^J3_c9AL_&tPCV9nVWPax-2Zd|A=zowdt!^L z=k!yzIJRm}U_*9xK(wh6lZg%hew+7^7c^b)iKi?csVd$WLJ|{0o`;aO;QzSe=M8P> z?fx|W5OsneeGF9iic2}ctsLsKDoRnuM4Po=^FwFbDO<&BkL+vqSud(ts$6tPZ7spp z_Qm%{0!`$q!h?l4=AWMhZ#k8z$M?E5R`LwIlz0_0>HUv)PtCd^hmJ$z)6pBtqtj;F z901&4&GooYR2^g+Oe8d+H?~UUTviltP3WglL?CdDn zxFBL2%73RYb&97ve&W zdz7s8t(PkEtm$#SJFwSu+b~H~SZOpPdf0N$)~>2k!PM!S(}UJps~i~(8f0gKVMC>gg3$>LWUIg=nmRhI-K7dKzzdL`nWduN{Iqz_^3I{O znG{`}b?w0&A*t23q6PNdAC#ZsXO8~&@8oZeZDLF^ENQQ}PRkBcZJ+{hWJX?-18GQ5 zVP0UPR>Mvv$N6Pt5q2pot+>acBCgnUiq&2n|&ufQsVZ& zpM9!vWCP7ep#9?JEsCXMCBjEOSn-&jQ{o8xohTr!6R3&5&s{alW3smuxGWUeV+fytntnP^}cIvy9|(F&WI%qeq3H8m30@nq1PlyD9Km?y;@3 z1~bs0%M=XBGL;dI@s$Dz70)>5W(ykwF(MsKM8)K4$auO=3^+m16|C=J{KWbP5idX7 zk63>be~IO^!-^lGI$ivDKK=4tOC(8Gd)VSicP-)MFA? z`n+##hUp||2St`v{@o0iFf_Sx#-?ayjE8D|PC6pjYs$?8xn>AU zIZ3>+uf>B^alO-Ky!tz!m#p>RCJ@SEtNc3@#0AA|Y~c8?rrzpOw72YndSJkCv~2Hb z>HP3IF_0n>U8l8AW_0oWJ}#o62Z-fFy}l?>h!`f~8=%?-tEQ;+ItlVSx-g&~1trmG>jG1;G@Gcz zz{<+y+o+Gq3_17An;Ai}z0ox{bFROsbM;npiVvvUDwkIV1`;-!Q?C`iOJF&1E<{*w|7&uc13>GUTv`0=SRqQZvWi2=s=%s zFLzfUxtja*dM~8?g7|}GwLLnZV$>kPVfQm#f}Cv+utDg=1kzFm~NSM4l!)e`x^I}&lNxAWP!~nW3ZN`5%%u0?ET&wTgU(DEF7X#T5)Z{%uD3Ox5$p&PzVuK7LZmIkK9@QKimpKadI(LJH$dIqR-}j_@w= zfSkS}>C!YusFt<917MN79kva3##)35lzNL_v zVT_G{&4&PwcXqIPGk(6xv`1+RzQG4x>`?8zd1hVM9Ew+jwu8do8TceMutk}Pb^uXP zf4T{&L-i}rwxE%XjrMHJ-{1b)?$Ha3*@ocvOHo=!AC4z`4m}5>&~}xPoGA{G2dZ9U z$T4mjPMYY2`O?sJ!1vVbR+z=DOSWR`L-;~{<75!%k@qe|#R~ZaS@kMGj?Gb1MbO}x}=;y&0lmsret600+3sjJ3N%&?wncgdfl}?uSi8yJI9I%u34`I z93!@gT`}2R14gk&c6o{Hc9BTw&{rrNphwFQy!|(J-Y9fJT-dKd!L4lbX;#0zt{iG_ z;-^khrN@6{j4}>?Zl9IXtFeI{G^!^5GK+JS%Gy8?H{6P{iL9bIaWbQZd=X14LrkH% ziM_T^W{jKTpDD3#UtW18Exys00XGy`Hx@4bJhm`$B`zZ)13)2JI9WlsRw`ysCf#02 z(pb#&y-##+X3V(yW+xFdf&S8fohQYE-tw7IWd%HeBQqbgPSe;p~& z#kkPF)&{;9Jq(MlSC-w@wrZ8lsnfSPM_QZ{mDaWva6v$cyWfx#xLj>yprU0CZzvb{ zm^{NT)vv&VVW{}q%z(th|{vl(m`I)e9*!B|2?)FN&3*#^TFyD_m$#yqy2kC?JHV?YJo4SoOaY$JJtC5> zup_#&viWa&a0En0BQ$1wwzUTuoUIqLPpV0k8Ue+4BnJ;l!Y-G;6PGli$=SDbUma7RUP8%X3A62BTn5C(R}2gXLxO& z&7bv;4kpaF$Dz70Cgpn6(a0VG$&18Ex$T1irfCIEG+KEUMpvB0ap#31-+4+!s9fWM z(J)iiG5=t|#`IhT{&(1?fj+@Z$aPRucGWD-S{>T`;h1U7RanENv5TtaR?YtC?pT1<8~|q+@fFQm({DoJDDF7Ire4$O5&8(S7UZ+#EFSSZZM=aeX zrU%&J&N&hrXoJBpq!U_IMkOurKaW$%H`;udiFcJ>NAi#);)$8W_}67+ z&{nvmXxu>v!ONtna9N1~`}XDQ*Lbot^$eD0Wy;?)oEQ*;lL9rxr-5ZqNSl-D*ywWI za~Sv z%f&#~PkRIz@l>IASIOo0CKe8VLE+2}AIf%bY_EWQG+NBp(ZKEToy(qillU)p8%io8 zdv88sVt&U0S3JD=gyyLKNzs)r{>A^%VgLTPGZT#nC&!lSWAGZpua z>6jS{NcmB=fcuR9o6E6M4|hnI^E0F9Ye^}Nrk-IY7?_i}t$Q0`r|5Ic z6;qh3P>6-SXf^rz_y%S{Awsc25vsu`f{t!f_{4wI7ceu5h^)7nKYtPXcCYl0t4gVg z^Iusm@t2>Ow~M91GnfVXkMRnhjcQV6khd^i$b7;SG9E>+kjy(M+T{a=WjbK?FIT1j zJG?l2%`s={lLF_lM->k%E;tLGq}lf!h!|43IfWoQojPzK*k*C1xLSg2cLq{Vr(Hfj zs2qP*CA%?DK*}~lLyxQ*ckP={Ek?>x-kJhYO&q#4f5B|;@KF$BU&+^K=6Z$vhO^*3 z(v%i0Ws^p)&cJ4RMS)MAg}!|o$^d;FID69wRT0n1aN&DBty2Q<>tBQ;FwD(Jb`BBd zl!|e_9JU8V%1syLoXE+?yIS-7YkK@T3`m`3ftA}UkXAaw-wXCZDru>@Cqq82i1 zwI6E+cKeqYtm1rF!lxMtGPP+HDTX*Z3iOg$_?mwXbHX^xM>MQ8e2UUl;%9GGZ1|(_ zA6c)$x^JQX?+awhTsZ*)Wl?+e8+(#epb{UTU|%tj4b(8SK|veHf~E=%thyD)Q6vF~ zABn|mh&y!H-v(;2LdBhl#W<`Hi$qsCB_o!mM*vs9dz5yXiTW|`(94F1GDn%i^sWTF zu{6c>P2!X3x6G=QSC(Aa7OsfWI5(7-uCJRF`l_ua_mwEvCjvabt|f2J&0d(nHE?yl zcG$ODe)`$!uhoj?6Y>ScPu7G4#*;9-MmlGGkhvyYWZudA81J4?nb94k*$kofVoZkt z0@$NI2YmEMlB@4@bGoTiVl@a7!1(f+mZF)*Z}Yewv8>i7G|6t()#{5Cb4vkCyAU2chxOu?)J25F{_+Qi~NIoCQ|9LganM?YoZ zLna>4u=eOQj`cV=Vu>I_+Ko4jcW!$(7HF5}=MJlU&6?f*-^yQ62q z%bU#l#amn?U_l!%;MABrSS~GvmxbAQ8R)GznWx5$rQ}JAmYpgG!dF7+Q~=ULXqJ$h zOwDLEEs|{OH|;-zj^n1xsNj0f8=IE!xwAJ4?F{<7c}r67!Zd+n?2srJBJy&f(O9@A zo<5C9?&D%bC|wRPDq*uQ@0A%nWBg|B&5sF=2ctZn3-dNZ)-|*Zv^6Li8}ux`cr^gU z*!V9kHaoCpFEWg(W!&k_SMu))YU#OC=iq^0nQu@W*BIh zwOh~3eW3~5_&MS1b|qOBbW~MUcg6uD>u4zPwQFh;U-J_x7)J7=!K%birmhlK^x3jc zovM(l9ldVnOm$f{WZ$5w`Xlj)-5kEI>w~6hC$TbE=RQ=XvpAeXqplcw>pBUMWigf@ zG6pi z+XoLFa~^LyRz1@RQB-I9Wz5{5OB-u~sL2Tfaq`Og-?tn|nU&u%uce@9x{Ele6uJKG zn`lkzp;Z%IlGb@Yq&f$9{b#0jfda^U%mB`1%+>j5vJT-G_O$|*$iJI-hn2qgHe;;j z4a;BQn>p%!9W!_mgeIu+_6K@*a6aJpy9?S2p6P}vF^etcC8=#)CGfwbX@~tCh=?P zDYjo-(54G@e(Y2c@oS4-0l#h}?@>y*-B_CNJ-#sB;t#>T8V;vlhynTtmO^}Dj60PC zG()pJX;`D(&!ws$40~7?Tzm37U$;og&-L4mO;Hdi_FvIiGpr)c%07-CfOt=kfc9zR2sUkO=d{a9!14HRaPv6it(w$=d~jt#OZ(0bY0OIgW;s= zI+DGrT zTl*!+znpAwM4)NJQu@*}$ym-+i^w8H!%XVj)Fzi@bqe2wFj+*DA|Hu}@xWx~oV+OgEm;acZ$0kGwb|p@YSiN-)YXg!?1jETCOW7+ z9^mhtDqn9^vTx}Im3Qf;G(3FbSy@vnJiPv9 z2meT(P@u~5#XWNqytp_bA`=N{7=lE|}^;2oW_ z4^>JJ@m)-?<>@Wrh4gyOXr3puJ(pPzF5$b=xU~Ki_;;bzj=cFkRoT5HD zlB~CftYaHng&f-aVoFnwrj&^B2*W`!6PfdW+uEuWvRu`|KJAJ4AG6p@Qm-U_{wm`C z=OGL)cY*0-?uUKp^>Tqq>P z9p{D`)!F4Nf6=@bQ}df|t1=^c&|9-M>M)s9B-?qti_muT9V6#QN5^|rlV@?69G_w3 z%%inN*|zl?aBMJjTLrkGwt5nHJk03Gu|Y2clgUpmi1{r7Kst95Wp84R>{+;|LrMiR z5eJxxV7*|t4EHalb|#WL6wTAU;#$93JSUiadQf`BO`?XlpDK2we-g0Td?8kc3+Qa1 z_fHP=_osDif4_)`vqzFa(Y;uAXX7=v(V#?Q5zCkI|&G^Dy-&Y!+GA40QYxFER zjeJ7sQ3-3}%1ynG13-V95+w7!W6_u!t`NjacxJa}OwixHREg6Jx)BRNg5^28+aI)? zb430*#BOWBHem$*8fQ!@m^A|=iiosq^knba8*Vum+)?JBeOQFG4lin=widJ}Ts-$7 zR;O|6T_;{wp5trCk^B>$H1wEhYGLMp8#!Q*6xCN}3|Q`)I2o7{mza_~v+k^4_&10{ zVzcP422R6YsVh#XjmyiCFOPL&&w`s<>?Wf;@52{$<#GBO#v3fQu|b67f&tC#L#Zg& zlVYe9_0JlRYUt6vy=B0;3V#UgJc@6R`sBnw}N$SC1c;Am=~0Ex!DUV=N=%H%tf z!>pSlYf2G{zPGm&%~}36-vzgJQ{gz4L7xI9sHmHY;2n3xDNhH8o|CnFO>=EvTc{MB{LRB{+o5W-!F>EShtnfsMA_qQUtn ziX-qW7X*c&1yUr1&~;RPtscciVn(HCE(}*Oa9-ydDwL40v8tbe9e;^L7`~d=DN3Yj z!wr&2pNYhx&$MQ3lmt0eYny33W=C51Woz{PlDpoRb0aLt;0C9AOJ6-ZTGai3XQ|n#n z>IB@6x9Z|uOKxwgLorA)nf~JJm=R$}I|26tDYFf^=UMS$ID!L8cYjq1Vv&x3ui1 zYebb=%9@ftc`Wy1g z+LRIhHL%|o#bk?K(3J^q$b0@gY!2KxAP_-5Z|OM6TQ&YT>$KRNaN50J`t7pXa{0R00D;G4}w!J?)Y9 zn3iQtI?xuHvmOAU??j?9!Cl1&s_HBBtl_{f=FsG7BSNn9-};Z6Tzr0bi}c?+xV|yk z6cuHP-u8fffEeq%uya*hF`$dFgWE_fQHTFv@?*^@p-MPBv_@nq85XLOy2pi6im6aa z(oww&WYKDcLck_(IDgRpo$;W+WDJs(rkmH<0Pcz6=33tQ%Ft&Z}*4itb$zbL*DmZMJLkqQ2+X z+ju#*|L>)Bza}8#{x-~M&s#a0+F{i^Nt;B(XS==#Pa@56$Pe!RZ&|^?EF2sK>4Gv* z_OK*W9xo5KhbN*dl5{Ut;LhR<6*p6aN6=A8S%u?b;?@&Bu+VVObqKwPAX$8uKto^& za30KDCt_s=T6l+Gm&~I2k5m0Pc^2$5X?BXQRl*kYhSmjw-W=Ef%;6dP-|6B@ z=TD!4+mL*_>ww<@XT|*(<^cGE8ZPF`9Ks4wvZ#Nfbcq$o?FI$J0ldMH81-WhavJLS zEPi+qozP0}q;F%tNc2qY-n=_W2`@rM`jk)-_WWkoX1odgRkFsOaGvXeBD}ka(!@M& z&^0eDbR`Iw$0q-M{2-Od$|4d|K2TBAMV&-i?Iu%>8GhJ*AX z$pg1>ds9;E_6XtdGq-D$s?`2Hd+hd-5^tk%q~FrtUuhli8da+LfzbQc-)8T~ao}3S zJ=tV#u%-q6_N%I)hjz7R{U~$5`wsy^nkvm4wa}NOT^MCfQ)Mqf-EjW$OXdmG#;3)6 z$Ap{lIhrPOPG2h11)nZapWQ96Xf3NX)b{1jtF+Sjp0R*#_|7@00|T)aTj)dI@x>h$ zG%hPE-F&TUIf1i;u!=g|98J8!oE@u09VDjE2l4XkY=t>K@k$DKh#3Y5+N%=TM7{iq zYla!>=}@qg!^Ml1Ux^rcSn={d+Vr zcs8vXeDTFeP8GT`s$07D&X%_T2CG5^uAutbF;hQQOH4>d9O2^E&d+?}!bYJN4D|eq2QbH*`1VwFZc? zSX4Gv!s$PCeSB{I05AP^pPNOuG^J&#y&4`&!V^|}^4@2OokMyNsdw1Qc zTV8fd(0Pxtm<$3?dV0~HOy{vG03;jjc6xS10!;SrsSck{(yT1g+So&#voSq{!1_UG;8VoO@`Dt8B6Rmbp57zCWnA&;{_UY_9 z-hp*DQcUXU`Hr>KKuIl0q3fI3S2A_KSRPkq|CUWANxFPW={>T~YTY+-{Ps4S(=E13Ldtn zl7fF%{|1Emyw>-$8;WNHy-oBSeNIkk&~uus4!2Rv;i_<{I*l^VN)yZsgPp1B4kVlQ zxt&$xx41x&k^pQWeaT{GkTT}-h5kbA@5w$ms|WbwRtIPjiCLz533fhU~-2?g`g*oP5FUtqF@@MdX8q3Lj1(x_Fj*70LPf zzfZb+3_1iTtXsLYo=v@5#rIgpQf(fu$PArM0sLhv@34E>S1Oad1Btv`6J_`(mn1uK zF;{zC&$>mvUN>B7t?xmVU!pfi&ul+R+S7nF48O})zaT%otY_fo z*V@YU_!I*#i|M(yJm)_LOqD zfZ@+dDW(2dOic8sCwpcy%pol-+kh4$Tb&w73?c{Cv5f7w2S%3V@8n|2geGG?%=!jp zD%>qd0A;h8FYt_aS(snZm&??GdC1`(@Q(v+(MzJmj@hc|F zDguVC6#QE}yFFd=E6hkK2O6(E?H1BpM|0a>$wIrh0~0W`~amHXXjbT z0IbaT#c|3-Ojb{4(;F@c@p{ob@@oFThEJ!pv>@d^CTULC+H{m7Q3(Ysv$#vE^=YJ? z9DFWz$rJ$PTF!bFyuukQuSpcQS<)NQ*XJ(Eg1Ivm0AkY}v?S%HfiAX0r;O z)WTpJP}-&0bNPBSr>W+jf#8`L$J=JWWpe~apmP-4&8S%bq*7Q*QK66sIN!tivgbYuc9NK1Q_NHm3_syZ5D;@{Z(BBGXnk2+b5Z6X(~q7B3IlxmliW6xKwG zftZlWooG0_ZXDmy{VT^gO;=?xM3q*7M1)CNjm|!dXvA$iQrLucU^%QzgFX=pn%=i_ z+Mhk^;&jO*rdeROA1 zOt%^4q_%sf!KfgkM*?|fdgLEtuj_cZhM2xDYF5EiAG5y}b}u9h^=zLQm^Q@Z^6FlX z{gECSLY~cURn@>QO1_o!HS9Tyj#}m(kE|!pp{k~Oj;zOW-IsZwZ|`a7lYC1*1hX{* z9;5V3wfh-bO;PhD`dV*0OBFeV2=0Sq^_YSv`u0plc~$Lni~<$`aW+B>7vH(tPLveA zzvycG(T@pka{B`;!nCua#cgBWLy2R^_t-K>_Br8WA#VQA;5V)`r;n3^FTmGf!rAMP zC?W(kNU9q*rGFOf%x2!hh1u!3)x(hiw17x05j9|1W(Gy`vVwd*`<%~g%h-P6mczwn zp0_pyWwoUvg4vZ2r%?!BNB|Ii3CuW63ZHxdWQU{lP zjAhErU6uW@z0LPb@bcN*f9T}NeT6|$>ahHCJ!ZHj*E?Bh;2oB-+$_;^d?Gskdbj*e zHVlF4YsG+tf(dSA|LuH#n1Udb!`};ccn*IoaOWMV%uF=%AzYay!c@6fr|XI>?6!mE zZw>?#@aZt;Mkc8sans|)EH}T&3?U-C?7!akhj|E`ybdt6t-1U>ezz~gM0_J$sg5q> zhASV)`H=5PG4I0+;a>t`eEd%6+xe`IzqnWFU7obKqW@G4S(^-7b}RCNFK z)F{+Mwr03urZq(gu$WCtL-nD&$OCM&VF`JXew>X{vSBxJCkZ>YS9@-9Y;T-c7slOZ z^+>?*_@mq3LC5!ai?>h+CQ>Q~*+$V&WVmW$4L@xPda9V9aG#nsv2x73Bh7S2=93V| z^-{4nkHP397cI2fuD8^d?K>W(PZ6<&^#C1?+}KFpBfGA`hzJQynx+84bHbRz7JG!i zJEDR7l3FbRZ^F+PdIqG*0gro9N`yQ)qlWhc@`x1GV9+{G_5=C=^qJxb$S4*|jnFy6 z9Eje8aP@bQkO%c;&zq%Rg?TLM*~)Qy0@ZDVIfhMq2Y>l9D?32wJ=9MQXt`!EB%+G+ zX;{{d(}jn~Oj0eF%R1t|J@k)(1}@upj)y9r97!ZTH5bN^h*x^5Neb9fJ!@)b>ipsqde32s9ER$cY&9+#;4!wDyD0hxG-~9ohad67f4(d zycWvdN3DHo935{3j#Hrxx1uU4=$V=eQ)Z1>l7rus=m8=cnn8Ns@A^IP(?Cc?zM!uz!@&|OqkjEg|Z*7^xnxu7QEYg<;R-z><=NUQd6_jaeXrt z%k^Pcb6H9q(_!6!FZyYo4CGa%vi&ZUt58W$ z(&OKUK~6(5z~oML#L3J+^LB$CjWU@t9rKeJ_ZyX>+9=Jj58xIH0Y6X~w~GtQPy>j* zDtc5xlLBu05CxEaj1OR9rT@1<{;jC4E@QY$U0v{J zoe<4u8)nTrJWtu3L?W(%3EiaF_%_ON99K&E=}pi2I`7 zQBi6Cj34Nq<+RyD?6okv5W$I8OAnH4U7%|1_KAQ$t~4^k+oigIS9xR8)oAZHm}L!?FRtWIweHpWpMMSk>q%Uvs6puRhDc;@^p~-AdVTR+ ziFqAMKRdTT_2InMe{X9Pl-R{qh1kdTIoqq+LPbI~a!wlKh$Xsi$B#VETWj*8oA>N_ zbK+Ch-16l8xT8*FJixjBt--07zLTRnPzPkSH?t@vEZ2C-oG4Ym zp4Qeq6WNVQUVKFs(Nr&^C5hQMd|dJOUS<-sZ6bM1I$-8E=@An&zJu>R-3q2HGYBwh zgT=C16afYgIQ#v}eRd1EHG=ghT#GuU2?)iPaq5M(=x*nPKg+XilQ9DzPZ^L{oJfOd zc7G%hnd(2gmm_NWQ4@F}qCo0*n>KZ3kqcI|W@a9)WA;n#7ni4jsaoIqQdi&NpO^&SKb>g-LBe6&hTrHBG`(Wpsym8t%DCdr zs(G%OX-YmX#$AgGO3)?!w1*)3so2+5#K2`(#GU+G==S!z^m+5QOKE&pO3&%}cQB5P z=$H6|_{|U)GST(Kl2!i1b|v`&Hq7=|Q2D4hxu- z+|Y|{UqUgRYEjC{xlU-^{lc-r=idb+wlrm6Aa>y-wKYB!YPF;8A^_H5P}n1s#Er7x za*_eUdGO4%wa*N9KU3Uo?MP3OuK4@;0%+DO0mQkt~cs#KVj^XQqrawJXwQ%Gmau=&9dz_{Q7JiVzQFW;L_B~%M6 z#~aYBkPH2qvIO<3?zoc0?G)I<0F*AYj7T!G(2*q*k-Omym)C&0JcBUbq=(U~+nUhN z^HF37Flx2?@RFst64V{S5%aHt_c;rgMY+&=zT1OK6iVVq;m<2N}^G8&8l%dR8?Js&j63R6Ls&zh`cdZDB_`vZl?&DkBKnOucrZA?|Rg5!<{ zLmbd7&O3e~q`FaLmgnG6fSu!jXEL*PNprV-Yybzj`{sqG%;({O#|6%j3x1goLp?IG5l9Vc!V+yuY;37l&abQy&1tU{ zLnTWXOPa%=K|^TSumSS2P{Sb99A@KF0?D31#bS$D1W*#?WKnucA+DV!wV|I{t2l`qA{EO|pZdLN#$i@?2ZCO$# z;*MORayB6~^^UKuS}KLI5J)+Ob}fpI^6qmdIQ+F*Doy9E@D{24H8uI5Z`xHc&yA#j zr*DJ;h9D5%Fqa@}@^4xgg~5>j$lfMSEly=Go)Ed`WQv+-O_{QeXku}WyC^e}mMFwf zXHSn@*h@>E04{fL;nUv0y~B!4yV2ImmoLl)Gpc8vzQIIf&~scWkYh*+B`p~G*^3E( zovA?7r_|@_#1w>RQ;EVUjqjROygxid_WACVuwZjp0hg})N4tj3D)HJ>LPgN91h~xW zoRNZvp}XX_KdstIk3!sKvM-dE4dO3>BZI0Qu!U*IcjjCd4nB>!-o?7VkU%)5EqPz&d?TznsU@FskQbtD3H zqT6Sdx6neoaXB5x(6Z%CO~joMHe+DGFVF7FDK=lVP(JNX3rUoTKVZ5^!P^?Liymd? zx%@A@IC9pBjbcyy(0TDO^*v==B~>K$8YCqAzL)r)#GAc*1$en$0JBde zVg@F`h{CHp7*yT$W?3vDOb|}3sCP9|-6&tjD1_s+Q#PY^YI6A#x`)~wWc}HfmPU*s zF2d>y5lI2U+gX&hl0u=6b!dV}Dusn5uc1NSPsRhV5f6rK6?8r#b^kJ(b6;k7R1R*9hQ41%*r|;d z+RgtSisQ=HED#=d=)Y!JtjX1uA@LBzf2=3*2#iINi6+`|3gvMo#f9Ke#&M}F-rc#{ zP!<)bBH7ZTn7`NK{rxv+IZ{_jL4g>ve7Bd27 zJRskn3qw%#eX0wp$>q%bvo=~SUsK7loy(2(y?p~YO;lBubzKLM)UGsJ!PH-u0?kPc}IF) z(A3hYiS8v61N|dt0)$R}^FLjxl7{{NMgywAwsv$eEtpJD{#Ci1a}y%E3(Soz4EfR1 z5>HrX18{s?C>^JWh9@^9eTK)MnU~j0;>{Iq6od+Wstz+B^5{nBLH#9f-m4>#DBaoc{a1m`uM>7@b7WE@hVo5O}De zepE}DVD2+d2OIq0VI>RKp%*nVe-5Zn!?Njg#DQwM7BEwCPFAjV)XHi@I}t^~tCe3E zZ1CINfwc}x-VMYc>h39OVJ1cdhh}PC0>3(nv~ztEH$l^i+9F+YWmpG4q5u=#gRr)) zbgIjqIy3_~Kb@8|BjH3>itsNBV3Xb*-to@t+sv}NRl2UkKa6uNhHxoQr&c-3Xp=CLc*4#$Z zvP|`X$=r+&#h9&IF&}ke(fR~|bo(L>neL!Nm_H&TF`OdR*bc!CPHM#t!}Kr)UqQ2k zPaj>bW`@D8GDTu7Q+W|3r6@^Y#-|N-Jv)Ig=yX61nH@4i#&_W8F>Xi21LEW39Kc-bp&Ixh(yelI_cjdgMGGO6?iB8q?6;=z^&rNTReOwU< zHsMF>^$DeM8#C`POI&G!mfBkz;zc}f_kS=(JV=a5%v-)=dDk}J-cCp0^)I74Ha1Q0 z(+^^^M%sZr-SfNXt3Z$IgVPzF6%}k`-lgL~Tw``v+MA6&#-GleT(Eil$AJB_Qb#n5 zEm0dlCn6eU;8cyJ(^>4hWp^%l)NwxPwypul=pO(bU`oBqX3kvUW8rzlNFG?ld9^7% zbG6E%8u+eX|1M7uk(JWI`UpYZ8SM5gW*vDXtaDSsR=qfmgM)k1bm1!luuZzx7mcA| z=}&h9T;8VWni$1I7Gu5VS6{jOU{z>hXuLTQTXws2i;J5*i29n$8WAb7m}GpuD){r%Brx`^FE!-+Mmz(ocXzy+39mWEOiw(%4(^$ zvQy=nLEYXk;F`!g&QM%upu43bFhaYxYw0IM1~74_^^uC>54zR0wpX~iVGij;?eUob zAzUJP-Vs#`ge#huos`(q_k3A;2}D_D2gfJU2PE)A*Qt+?9@i^S(NvsYBOO!pSGSG}u6*PnD-cM)?HN#-pXZOypm@=L_D}!)$XP*4t$rxoU!z!LcMJ=|7 zLT*QpZ8hz!<1Eoyrpt*}+?*{k%`^k&3wg@SmtRHP0q+cV*IoX)r9ua40KPe&-TF3o!LsxL9%p5Hc%8kce3J&;~ad#{4F#V>Q@pO*Tb zlzwabc0qz9^9ZuDkEWB7Q`7nfDh3ANM_0Dy<%Iw`JzQ{iz;*A{I@_H;7hvF5g9E13WB}^a%>mp^JWD5@gv_D)W@+6q7>R%TQj4^l~9l zy1=Yb5g%y5x#VL(LRqcvRHl?nSmQEX;o%joT4Ck@V*d=f7j`V_W66G@32Yzo`?Cx~`n0z4arli-85bR`}GY=t|= zz@h!URr&S8{%7R>Zm+ts^U7-P`O;iLM7UCrVUC!Y*+o>*7yVv8m{JO&7YarNtCB4J z-Ga6Bp_nP1?hjvuw^mLPA(6#~Sa+=P2Ba<%49`cLZGHe&H{2oCM-<~P)|DtEV zWjGoq(Sv^{-OarGm*U-cRFb+*t^^Kjr-SdMudmm8@pU$s2t-G&OZ7rf*)HaMp4puO zCs+a*Tf{QNNqah%z>8}~!OO*DG-BE^5Clu0ZGb3-^01tWW!asWO`lZhSJhC``xKn7 z7GN_U0WsU4XnwF7JKPp2NiT%0xet2#pprk{yA#R(NGHNS5l$+c?YYJN6dOlFoP|uG zSj034)uETAe)*(aShb|%@x}tZE4@~&CEO_etnzq zFYDGGi%maIvZtiDKHYd%Gu@@y0^KQ-itM|HwGB{}3~KF|d`^BWt_}2JiFaRdA>li_ z#VjvL3cNC9{`{~kc*$|PA$NJvy|21OzZq1+A(G@O#swxCuL5v(d?9++@MJb z)Q0RLQ12r1i*v8DW3E1kKO^vt%?#AWQmbE?;)TP~XTS8BIAJ+%C4Ik0%p}k2rZv&g zYfO)IB!}&FpIb>-j)~Lv`lfW4nPyt>Y8zMPkRD@|(Io1`a4+*`0j-R6Oxgi)x4pW# zrZ70(sq!t#vI*y?Q$<`_t@?s>r+>fw6T!7kE=JPf1b*`KR^-!9BUchVXE`|>^YpAR zBOEM>K@Z?=CvwFKq*MQ{brZ!-{Zd4II)XtDehMsj+x++t%iS-(!uTv#VY@{l74=Fe zpmtK1s99I=eIrKP^J!+@G4l^YL37nX!?q1KV#RAvO-uKUX@WvB#M`2V-=)957vHab zap}c=_KV>c{T$8vkGdWAS6|G%kB7#iM#k0w-E zE&v0-Mjp#20x9B}dySpbk|;osVB5BB+qUiQ+qP}nwtd^SZQHhOWAE<7#LUCa#{NKM zW@Wxr);ZgD+H(pPOXK0j)yDxgyF4@auU9oJL^ zF`!+)HzK*?&Q{~wTCBi@XrJCHV3rs~;iT8`xD)~rS>4g`hn7OG7gw<%rtcvg(6~I@ zC20!j2N2SD)n+51ZB&Un$Z=}I4I;M=d8^VU0NZe}Z4IWOh2G`t;QQpcJUU|0_N@Is ze`dmzMO2^u@ZUYXko6u(DirGz; z%c$DHZO7eS0!E|!ZLB38a}$sjGYb}-_yMb@A})LPigzl+;TP3eObcfd-R^FfaN>Ep zgke_wmGn!-MC|9GLQkFGdHl1o9)o2-P52*z~XRLqSU<2$_DJMr8M z*Z*YqGOuh6_tK5nFV?)W^=B9TlkG|+`6Cn0Z{oRHq6FJ18vC)T^HNy++tz9)Ll15{ z{Yoq0^W5RRsjy`2k1J;zo4RWe7?6DIV>s1my?m1Bms&!H0X*TmfBU)Z-_^uXl&lOpBgGlgxTVg#IUV=zuyeQtmVMe-!}2=4CHh><%yV%VJ_ zPSwA^+?jNMH|j#AqK>xpHn$|^=dH{umZC0ct^T2JPUTjKe39uZ z2dXhnTn@69S}W6`m>PEGtP4*|ONBiblzD-jjQht@XfbYe&pG1j##wzF;Lo5>dXVZQY?cx688-Kk)D2g*t!3Tm~al-c^>%_#xR}o9duodpH_5(Eo&%r8TKAKJ((MOO-%Xyzr zz^u1-Bi{9?$A-&(+O>h`s^Y;r?iAE^N6C{lo@+^a1k4PXWOIMv5Qvmi0u3qzPI8Yf z{n4#pKLO*)9F1W@ba97vyeAb*(~}Ir?nx-b&Cquyu%Z}54PH1a`%#lQt$wOP^`mC+ z(6=UGFQs1NO+?a18f-aHvynlPhdxt#h2Ds$!BWD|R*1zHe_q}*GZE*MTVt8BIG;G+&SOj1#K^Ju_RyJ%uFs)A|(yG zMzD2DR00jW)~RTSWz&>|Y`#t#<6oAIA)e~E!i#N;1i;AW#Gl9qd%(=S}QB?Wcvm##okTWWLw)^~>S zCxZfJ?9RMlLIzPmc_0)9G=5qzMZP9u)hK6!j|e|GfzeL#O_Dk$U?l=3Wqft7acRi! zYnN?#nZuW2JpUaH0Qr!?&?lq9@W6VqeWFA9!wGW3KW)coIY~0OKx>@esi&3LF46X6 zC|XBFI-ICT+pL3ZjZWTL{zrla^+XoQj!LhMJmgztw)4dDX!%q0g z>W7D6oO$&HXoTd35AB2VQ!#W10tum!8kvp0ukarWXj#M~d6O_^6R+2+-sNqj-pu5j z6yi9VG;9kB_`$9YynC(@_0(_tC=-gzijHwRj2H1eR2ZR4lBd>dcicv+H7DA}v{sU` z@btk~_w4QPRoeT6wl32P2u0Me?8z0D@H`blmA+`R*^VTOm-iX*m+K#!Bw(^=zQ6%6 z-^Vkz#g zl-b}PyU_Qaac8Vw&|`Il1sE+8b3vy->=`}1i%?#WS1YpBm z_mf0TB>9MAbu8rJ}h8SGuiu+k9b>1H=&69KvRr?-NbePZ_x{tfCTG=EWi!X{CJF z7i`}(jp`{F*evNxlC*zfl?6MhIXHLgwpoZ_xjc`T&F+Sr%W8dVnI$7r16a3-^h`!U zaD1|D;BSew41zI!wKvW`G*QG`hNxhY%Uv5aFPA7PE`*c|@`uOk4E6i|b=!C2L;y$Vux#ve zq2JFW|B>SEASVZ(mDg90oo=q>Vdz34nA3$1xmkbT$1rIgOJsZq^Ls+!d)x5r+z5NA zZ~Z$i6PASuR_T+Ai{JKu8xL8_7eK=7iwsQGWk1|Kfb07JodF-dOrBrbp4qEf&^JN2 zq8n{JIeI-+I=psN7NmdCC`{5l(zF^L)g^PMs`*xOnGo_wUJVtb8mQDgQNfj2o9fAmJocg zKzrByCJRi=ddxYC6B_}4?Pn9}G=?CdCKzykDQ4P^Bg0$6CR$nj0DJ;gH?(4IF9{3W z@c6W40v@<4@waEUrY#x%*yjs6sAp)f|FvFB7~t{#RQi~|#E%9|6Dv&JK7QpfA+V~6 zVaB!9!_S?E|aA~BDXXeB{-MFzsCq}olgYk7cwz_ zB1@!N#FEw6oj!E-%6FGXXFtgt>J|3sUh}aWADNW8h(vP>DeqM{3R=!^gDYBbBS1bS z@J#IuB++Y9)!kcqfP3-{pRsHRuc%j6{_MMO>YEr&D5X`Xrg)=!BZU23S@6Q9N|j1+ zI;{{~8p~c)&S_-ZZ!BXr)_8jv=_>d~F~++Vj1!|W)>jV7S?S;XE%Q2TF+U~@rL+R+4!+RAt5spT}qMtBhkNysVIPXMzk3W&9ez& zrH9~O0PYFI0ix=hYHfeD^$-t*xZJ8DqQdnc5mg@3x2c4Id++lHE%y_-!n(Fxi#*uq zuZ6}f8+SHtBXU095N#n{AAtosRBD?1Zc2+k>SW6dg_5iS%vBhcOyvNyLetd!lMM1%1 z7h?0B3QRz;O0%q8VH>)XM9=NF_-Qqo$(4vN zF);&}L7HAYM%M?cRnYew!6ZAo4OuXmq;ma`q z)imDd@X{8!3`RB_FekP&cYA}7o2PaJXBPEPF@1^`<~T4KAZcat(;6vjIbaqByg6j#MowaJV#so=J zCR;*#WzLGLaU>Xu4qXHEQKBYZVKXGLsa-odMP1NAwVNSGaYuNNBujmBJ_&&W#KKpsv^oZ)j%CuoV^Z@rjY+kD9A>5-Hrd~_~P5Kr7XzS+27!qo+Rvd zHd&!A5sP==z5jRvTCHF*e!`xPOCVSGb@#b+CjmlX~c3<{d zU9x%Bt#&9(GiDg=?8tH(auN1o2=tX|lB57#u(;v;^;w-xzB8iNja(s~BwMdA1E2$w zGPNg|nff2RAmHZ*%^(Fi_c#kmA=R5f4R@HnQ{1q8{sC{IgbyLwP{R;OY@;zwI$Px$b)kVG zZ?Wq5%eV}4iKnyO)t8KxJXL6hL7^<)izv z3U|UQj#Zf#Gw#H2mf|%k{yttYSBu+*IJL-MS!W!ef>~0do%2UhyPv@f7Ep+#7}oeL zUCiIv`jDUhsWl+NCD?yYN%GXjV$~yW%r6AKF}8}Mrn#|Z%J%OXZmIc-omM4Py*I1k zTBq~4uD)l-Ql9ub9!g^RU4)>FfR+r7`a=@lYwj&?QFxwdFdBmWTid3&A}Pg}Dx4z= z&bP)-!rk9a1eca>VQRq<0;L2|J$^Wfpmr}r1JA^xv1#Ojby^OvjY@Km85?xdkL;bq z41} zDF;+2=94BTOd;pu*!~DYW7H8;ZVNzxgC@$6XT#NJfCbI8oZV?07<~7vV z>!vmZW|Lav4kure?S{e5$^C1eKfSNq!vuP9dxgy}F>A*JF@4NcSJ}77xfzQLuA|$a znR$?@KnI0?T)T8_syJ0A1`9n8K1mcIJL+w5VS9$dnc3t4XQGC=yh9ga)kUUN0PfrZ z6eBM1J(P*q@bL-s&x(8NQ$uS=VJb~Ww0#VDSh6FSh~2}D=_YW>yB%p z&Fq6~e;8w&mT@?Ud`_s$?0Cmq5~PmkJP{j^;l*7H7lP^_RKQ~vk`I(dSK`jvQX8du z4zI%02SI(2f`*BD8p{`dk*f9RZje&g<%kuXAiad!e?K_+dy9xfk;P;gcN-Ki`M;JpL@%-!PocN5cY2}WTt?^OTj zPMvj)sD*9mwl%ZHqO0{wlX!2t|D(e3eD`~vR3~L8G1o3bgff<6oJ7L0S{mIzOEuaV z2x{b+MJ1X)OGT9QI29V~ylC-mmN6F;9wL%ph7vM>227Dfq}Bl|NMQuSAH*jKPcUK+ zn0iLHi~mk!0~Hvmm~lRlktNXE0Kv_3P9)ojZkxcI&vo~M!?>n;dcnNlQStsI9@y3G zfXX~+_aj5*RuheT!u)F6-VsJX*0W^bt_yS%GiW<4a;tICm>&B+Ymq@wM0d34&fkh< zRRMRA?P{LpzAb>uVPHWK=$cx&uub9+^gA~Mpal11C1DT+u~lDrITs=L9_~YP}xOF{BPv>WMJ6NPkR?ecCM#qnXoZF`dn5c=DU&GY`Z{5%fa z{c?jJwtf3Ji?@Na5bWQsF$0yNmqK0ZP@(#U@VB=q8-~aJ5+qfw4CIbkJXFel%?Aa$ zIG=wikZCDI6A7Qz!{SY&+xuM{Tc*ijxbznWILU!3r`1{f{UHejitvv|&!N&jQh{Jh>)*A2d;v01UDpuw;KloD^AX_;l zksTRl49rXtX{n*y!lMwy%%VZ64@~%JO!l#wUri#33tvrCDg1gqlt3btloiQ*oh0m| zW22|&6b@y6AQAeO%7DKv84Fv0qVm7fE`d(G6gL>zK)TDyD*jMS3WIIR(9Zhjj*%BZ zfux+uH93p4WeLi^824Lq2l**$mpf*+%VzDI4OV2a|^3(rqv;gL|vdC_U#WEw4x73zD+T69@yOOE~Uf^ z`7>Bvt{^Zv5i4J-`Grz{_jj4o1%c+xFCwmM5F2Hr^hi5;ouxNSUC~Lq<&!|TYEXR7 zm&V5c6s+oq976*V|K?f^C^`*?Q`ZWJc9ZfeLMc&8JmIPK=};AkT{C&!B0s;>NFQve z;rUzFqSEH0InEvt3TrVf$MZ!ocR^M7p1+(0VJjk4%WbcaxGiD4+JWYMl7wxElzRdo zEN5^9jKhWC6nd%L7KvZMks5J|i3P$5vFgwJ?*LO1u@PYcS_+kfI)q@>DeaUhNNt>* z;lN)YSSo@+)MkV*fq7<+71VscHyR2|Le(@@Io@+G$eCy=;;tlyDrcYHV3A6U z%gdK2^&-kFCkU`a6oz77U`U_MZ(WEvC>GJFW{T?EB0tOi7r4tYTs1G53G6%}zSl6y z1^FB*;(|z+TB3xHRQ<9J3MR(b^^`oMSYZjX0Pk=0)z~z~(L|9L>t=g&VgWeuX=3|U zd4*_dFqr$r-F83%v{KdGB8|#&a6@Qem4=Kw>7UtdckOD?iV8|eV&aN;(83K{(gXYi zNb@hNSM#_kk7qF7!c;)_jE*Y2RP*2x+V14F?{LS-c4eELR;|U zr~?mOUM07@Cbh@Kx`lX3V7%T6Rj|sJ0%8kJ;)*c64NelDUPH>)avL+#H)=*aSZQp< z=ERfsgUAfuteKhR(_yxXKL^cJDW{o`$)>&urwo5Iud#dkH0iW>Y19`!4*NY6bm9q_ zG>aWI{p)Bhirh%>(iQ`#KFAL#StIyrx`cl+EsdIo*B%8;1KM|D!?sOdBry?cH9h?4 zgd4AcYULi%F>}abc52^Uw+BU5An=u4Vd*n2>JZ4b#v?6gmf9=@Ni$}&kH>)u5eYwc z6Ddpm>EuYSawcBWleZalNnFlb)AsO*z$+5+Bn~vF-It)5B*{8!CC-6R?vQU%KJ^H~ zM(FV#=`|uwRa;j+SOvh<={Vhlp4n9{{z>G2Tr3xT*=-KJtSknJ?`ku$V3i*+_D?>H zN-9!y;62ggD+lyBSM&$0edZI;6ht99tMqmo^ zl@quvlBtGv4Rbre!itrvU`0Axk4L|u6%4t*m|YjO9PetEJ*ZO>qlc2y6(LR~CePu}NM=9t=W&O-UWZVG}Aray|e$334(y-3Czuh*y5^VqeS50M3;IrZJewMjV=8u>2sK_bPFTgL_cW*)gLT!+@?x95SD^Zk~fhSK`%MZG33xxB!(-O@s&3)x_ z=TKyDk8zZRn9(!AU+4AV^}D{*CO*s@MtzGp9VoZs>^51^=$+6+%{d?|w7`-5cEzu{PY@h;pnO0oJau)zP(yrsa*@!?8lB_{jvH;&=$xQKdHlX5 z`2=9_*khmVhMLN*iBhA-jOe-#PC)M@qeMzGxEIBl(nzE#$icaM&Z4byRddK`nf97H z9#a?1GJdN^id%5o%df-oS0#G@hnUQQ62^s>SUX{2qOpp`3DXP*YNS#eZEqF>e#8x_b@JQe z2k>Vm_J?5i^6y)nqEq@pe;vGc<~Z33RJ4l+AWEj}3j^D9e0C&9g8yCDLNv(f99)Bk z^w0o=wS(+UBB~{^Y!eXivK1(AO$wnE3oZN3>Iie#!z5XW3kbrDnU)sg_*qUsohnR5rY zs6)aAHXqclf{Ix}RwTc=#b&f{|D2WnE z5Ghfxa0W{nKCw>#Cm0B3qAOY$;hKdXppaXjp`WO*-KfDIvCv(y;h(wm-GK)Q6)^bq z`PIuHfr{xrxC0@Ikomg+RxqpIu!chyM~@IWgj6n5qfoJgRV`=7kU51`IelZ_v9-m^ z)7|wu^^pSOj}V=YAs3G_U57&-o>*0uX`7yU-Ij|Vte9P-p_{C_-IOEp%^>&KVEgI7 zrB~x8H}eA`0P$0J?SLWo#g85$RmhxP_09f22E%Z&;r1m~v_6LnCZVn1PYi~fr2><8aR1^#4!}An3?~n zyi~=_iu7H;{iYot3j{-&XiJDLI}bKlk3N49L?4h$KNM75lw5xpWS^L9zZgw5 zU%$nO)EAS2C|PWB(CM{~#_7@fLm*GI#YW{^sAfS|NnK+5=x5w=W+P|QGIcY%YPpR z9scOWuBuJn?A7lsdK@3F<^CS-pmA`V&-8{ac$?-_tm+rCSdm;vf0+N%=P&Y6T~O&y$Vx8g z>sB1(45au4mQgxGAoUi6cc2{hlm(I-W(F-f;*C5y;uD`l<7-#K_;kT;_6xYhy7I6n z6S}ZB(}S^{n%_?>PrHuf+Bhqj*>(^sfP|dv zk2-U^fXt3+jxYY840@d-9;gtx-IJo@G>;_{sl?0q&^wPhwz{?Zp!j;oX3cu*eStO= zYYJNAB|=XWEVmXoCa(IYOO+63p>)|igXUL&`fQi$B}u>daW~hW=WT?8?Fp$z1`fOyUk~_>2eEF6`1d=RZWF~Z6soj>RU$0kRA$g!>JgKIR=eK(ByZ%D;-cQU14I?izgpq zd&1A@727;_yMI%LT-S9GTtjj}s){{l^;Ty?t&+(3xmqg|Pb64hmD*;I{ZSW)Z~YP z9Oj?YX`7XwUh9@mA~8?o)ClHc%>_1Aj^an`Fd!qdl^=_pg?)bjH)VGr%62P(KmY1p z(Pq+W5VGKk-f}R0kFt9wFd`SgA=AXO4>m2VQ|yD#K0xPH7h|URi92Wm-kyhXVft>- zsT2oEoP7V*Ub+kWJY{P$YtD8-k8e?yKz5&eSyyz48s?9RL#m@6T-Zi^@O?q?HqSA5 z@e%uYgAo24aU!z;aw&Y$5@8BNJ+ck_=@p)^d?00PqZs=(Td%BiKrpNC=uENN(M7g9 zQ77yeiUi76A>BXWytw&5#r@#EkxjJ??Yr6~mWK9MhGNq=&!PQeHSd&dY_0{0F;u%; z>{K=_=V3+H?Zwg!h-R@`B4xsw|K*a6L4=LLOLG0<8L5mCBAgqN;O%z`eZyiYJ5KJ`Ldx!mN0!W{HEMhlV74?33t zWI{h$H#m7@b?^z>`w|(!2Iz2}qpa!}Rs`@kaSdSc^k{>qpbYF<1|ynK?2b(gsY&Uv zHSJa9`_IjZ1&Pt9FmriJewRe({NU0N29Xl&PP*XN?#d9Yr_UiP^OOW#-OT%}h;Rr( zz|{D<(ubbY(P@z4)UkJ~t4e3;UHK9EweW(TUXf_j)GUASHeC0F#x0~-Or-&`+QTPk zs|_`Z8B$A_*%!q31_69W)nMg61nqU>CUnpBF^Oci34iq8w{`hd>Ale4dMf zaW|;tKvx-do6G)YD1Ve{1uOC8CFXs?nwDF`%-<}7FMxVrMqry<6nU=5eKUh|XRuldjP_N|%8?}f~3L5|k2rHgj>tupmUhiZ`HjAJM^i$Cj9pKzE$i*%q(V00LTwfXon8a z!WPZUG%?_!^94!FpE;D%TiH1v3_BC8UxInZ{xN5p>Ic(dKg;sS z_jcADSKTl5HOY^Lbfl}HH(%b~%1N8I&VntwVe+L2i7A;irqf)f?^R!5`D58n^*PKs z?Fi|@NXQnmHZ_0R>iuxANWWF;d%F=}#*J*-Lc9c*nUnZBm`P6qyq@O6mcUTO=%hRs z1LSfZo2yO&NH?SeLyd-Ce1hvXRb&g)@SRpv4APzB5H;iPW0m#M=Wy;N1QqFFm&{bIp&1nnE{f z&CKl!m{TV&;$_$EeE%{zJm=An-e>pcC}zh|q*=V-1D*nY5F z2ts8v{_P2-YK9fxu12MzEt5bDtyeLMH6Ea+A#cyd%i;TQg*ZZb3Df{Z0{bM&jwAgUzfrl=Vt*Mq<- z+zwIQ@GDIu?1}Tx9dTzE17*?1$c}>%5Y0eenO1_8hOT^ zsCIy|OKcsHq8<~tBT(FvKxNn3#lTJ_FASZo`y=cv)%$?y{E2X8HdF-Yz1dk~qLhYw zsQ@*I#o`=l(6a{lPAww5ff+rM?OiU$)b%Xa zm&gE9fv50&cQ0%wg3S}D-P;KMaq$<0q?iS%w)&H|RXj9o&D{ag~ z9FYjfyONF~Z;f%Dpn7y4sDc3+XS<+WaZ<8M#8bjN{!Ku@w=#snH-Nd7=!?oW&*lt#mU% zT6KQ`llEFIYVF%hZ#3b3 zc%+g?9yW+0sD z07CO|ecF=3m!P2=;7l-a)GPb~NfLLrg%{bidm$hfYy8AF0vDs(f($RN)3h&Tu7rXK zSa`Mc-n(IgF9!-mG1Xsd1?w#OyoP*Tq-wdU3cxs2BabgJY0<8 zB?jX|lRxH?T8NkbA#_d{0y<23*dAE+*ym_9caBN-tzu`=ivb_Jq1J#QMZC%+%OP9* z8KLAyuqCabaAJSo&xP%XZFS>Mz>*-{-J*AX;?uxa_>_Vg3O6Z_onQ7gAYf6=&NNzr zCxIh7EH=-R@<{Z)HVQe%>a~OZ1Ut+%gA-#fY&sC7&Hx;ATn9!f#mw*&XxH$dXOoJg z6mb*no{QGu|YH%Pm`sU3Fl=+-cFKyONBn9q>4g=Djb{fY{ z2oTO?tsGJv(RR2bSf;8#C0o;kU=2>@nxzF2FmcY%P|n)r&=C`Z7|dlbif2zX0Lqy# z5NYfSAWf5sy9%Vo@3QmC0DI9RDzrQp_z=>+{j+jIU(QPGX~hIjmZqINaM{mp^A7dDvo17z-^cEfm3D=40xHy53Ru}v4?Gm_wTCS`JK5n z_EAk{+O5p=05ul{;_zzmn(PV8b%}_A#7rS4 zW_g{ejA!1l^UhV5RpmSTSmAj_Hks)t!fic1)hTf91V^=3u$r|VU`~DhVEh+g6MP^_ zV7Hx40`&fB*vnt}k892wNLKFxlB1$9;2b9W42YSNnb88R3R$jG*9=g<#q!~Ue3dSA zj*yb@F9mC zC*gx4DCf2pz7^U99~>1H@r$ zZ*pKvOn1=%{;WrkGjZxED{$6FecJ~h3%l8jn<=^Ro+xd^V>+PY_S+1KND!}TdV{_H z_4l4N0*H2$6MGec6qy`< za-1zL*ZLhV`=nK_I9RIn1p|DvJ{L(6)0=u#a-oAZnL`^p z0A=67%)TnxfDTb|#JHMEcBF)+5_?U;sNWDXbMqH-9Jsh=$GMe477H^gm?$GLS-d6P zXRKvW|AKAN`_E8EgULOT>#CYM_;u~Akr4DKD6(3WPp|6ak)ke$Dd${|Y*TWgTe^h- z^^==2tvAnk6dC02pfNPP!LF&6yF~^ArJ1TS+=l3RUvflwyl&%#CkTa~gcyC1AzRo|^2-ValSD)&&-=s+$=*S(D?>)%nO%0p7a6YHTKoLX){y>6A+Ht< z6t6w*A^`%v!Bw=3Sb7q*89tl1szcx>M#XS~$;n{(fKveubbI$|$QodfvgdQfXn?YR;GTW=TK*0093BVBfFK literal 0 HcmV?d00001 diff --git a/lib/velocity/velocity.min.js b/lib/velocity/velocity.min.js new file mode 100644 index 0000000000..58244c80e3 --- /dev/null +++ b/lib/velocity/velocity.min.js @@ -0,0 +1,4 @@ +/*! VelocityJS.org (1.2.2). (C) 2014 Julian Shapiro. MIT @license: en.wikipedia.org/wiki/MIT_License */ +/*! VelocityJS.org jQuery Shim (1.0.1). (C) 2014 The jQuery Foundation. MIT @license: en.wikipedia.org/wiki/MIT_License. */ +!function(e){function t(e){var t=e.length,r=$.type(e);return"function"===r||$.isWindow(e)?!1:1===e.nodeType&&t?!0:"array"===r||0===t||"number"==typeof t&&t>0&&t-1 in e}if(!e.jQuery){var $=function(e,t){return new $.fn.init(e,t)};$.isWindow=function(e){return null!=e&&e==e.window},$.type=function(e){return null==e?e+"":"object"==typeof e||"function"==typeof e?a[o.call(e)]||"object":typeof e},$.isArray=Array.isArray||function(e){return"array"===$.type(e)},$.isPlainObject=function(e){var t;if(!e||"object"!==$.type(e)||e.nodeType||$.isWindow(e))return!1;try{if(e.constructor&&!n.call(e,"constructor")&&!n.call(e.constructor.prototype,"isPrototypeOf"))return!1}catch(r){return!1}for(t in e);return void 0===t||n.call(e,t)},$.each=function(e,r,a){var n,o=0,i=e.length,s=t(e);if(a){if(s)for(;i>o&&(n=r.apply(e[o],a),n!==!1);o++);else for(o in e)if(n=r.apply(e[o],a),n===!1)break}else if(s)for(;i>o&&(n=r.call(e[o],o,e[o]),n!==!1);o++);else for(o in e)if(n=r.call(e[o],o,e[o]),n===!1)break;return e},$.data=function(e,t,a){if(void 0===a){var n=e[$.expando],o=n&&r[n];if(void 0===t)return o;if(o&&t in o)return o[t]}else if(void 0!==t){var n=e[$.expando]||(e[$.expando]=++$.uuid);return r[n]=r[n]||{},r[n][t]=a,a}},$.removeData=function(e,t){var a=e[$.expando],n=a&&r[a];n&&$.each(t,function(e,t){delete n[t]})},$.extend=function(){var e,t,r,a,n,o,i=arguments[0]||{},s=1,l=arguments.length,u=!1;for("boolean"==typeof i&&(u=i,i=arguments[s]||{},s++),"object"!=typeof i&&"function"!==$.type(i)&&(i={}),s===l&&(i=this,s--);l>s;s++)if(null!=(n=arguments[s]))for(a in n)e=i[a],r=n[a],i!==r&&(u&&r&&($.isPlainObject(r)||(t=$.isArray(r)))?(t?(t=!1,o=e&&$.isArray(e)?e:[]):o=e&&$.isPlainObject(e)?e:{},i[a]=$.extend(u,o,r)):void 0!==r&&(i[a]=r));return i},$.queue=function(e,r,a){function n(e,r){var a=r||[];return null!=e&&(t(Object(e))?!function(e,t){for(var r=+t.length,a=0,n=e.length;r>a;)e[n++]=t[a++];if(r!==r)for(;void 0!==t[a];)e[n++]=t[a++];return e.length=n,e}(a,"string"==typeof e?[e]:e):[].push.call(a,e)),a}if(e){r=(r||"fx")+"queue";var o=$.data(e,r);return a?(!o||$.isArray(a)?o=$.data(e,r,n(a)):o.push(a),o):o||[]}},$.dequeue=function(e,t){$.each(e.nodeType?[e]:e,function(e,r){t=t||"fx";var a=$.queue(r,t),n=a.shift();"inprogress"===n&&(n=a.shift()),n&&("fx"===t&&a.unshift("inprogress"),n.call(r,function(){$.dequeue(r,t)}))})},$.fn=$.prototype={init:function(e){if(e.nodeType)return this[0]=e,this;throw new Error("Not a DOM node.")},offset:function(){var t=this[0].getBoundingClientRect?this[0].getBoundingClientRect():{top:0,left:0};return{top:t.top+(e.pageYOffset||document.scrollTop||0)-(document.clientTop||0),left:t.left+(e.pageXOffset||document.scrollLeft||0)-(document.clientLeft||0)}},position:function(){function e(){for(var e=this.offsetParent||document;e&&"html"===!e.nodeType.toLowerCase&&"static"===e.style.position;)e=e.offsetParent;return e||document}var t=this[0],e=e.apply(t),r=this.offset(),a=/^(?:body|html)$/i.test(e.nodeName)?{top:0,left:0}:$(e).offset();return r.top-=parseFloat(t.style.marginTop)||0,r.left-=parseFloat(t.style.marginLeft)||0,e.style&&(a.top+=parseFloat(e.style.borderTopWidth)||0,a.left+=parseFloat(e.style.borderLeftWidth)||0),{top:r.top-a.top,left:r.left-a.left}}};var r={};$.expando="velocity"+(new Date).getTime(),$.uuid=0;for(var a={},n=a.hasOwnProperty,o=a.toString,i="Boolean Number String Function Array Date RegExp Object Error".split(" "),s=0;sn;++n){var o=u(r,e,a);if(0===o)return r;var i=l(r,e,a)-t;r-=i/o}return r}function p(){for(var t=0;b>t;++t)w[t]=l(t*x,e,a)}function f(t,r,n){var o,i,s=0;do i=r+(n-r)/2,o=l(i,e,a)-t,o>0?n=i:r=i;while(Math.abs(o)>h&&++s=y?c(t,s):0==l?s:f(t,r,r+x)}function g(){V=!0,(e!=r||a!=n)&&p()}var m=4,y=.001,h=1e-7,v=10,b=11,x=1/(b-1),S="Float32Array"in t;if(4!==arguments.length)return!1;for(var P=0;4>P;++P)if("number"!=typeof arguments[P]||isNaN(arguments[P])||!isFinite(arguments[P]))return!1;e=Math.min(e,1),a=Math.min(a,1),e=Math.max(e,0),a=Math.max(a,0);var w=S?new Float32Array(b):new Array(b),V=!1,C=function(t){return V||g(),e===r&&a===n?t:0===t?0:1===t?1:l(d(t),r,n)};C.getControlPoints=function(){return[{x:e,y:r},{x:a,y:n}]};var T="generateBezier("+[e,r,a,n]+")";return C.toString=function(){return T},C}function u(e,t){var r=e;return g.isString(e)?v.Easings[e]||(r=!1):r=g.isArray(e)&&1===e.length?s.apply(null,e):g.isArray(e)&&2===e.length?b.apply(null,e.concat([t])):g.isArray(e)&&4===e.length?l.apply(null,e):!1,r===!1&&(r=v.Easings[v.defaults.easing]?v.defaults.easing:h),r}function c(e){if(e){var t=(new Date).getTime(),r=v.State.calls.length;r>1e4&&(v.State.calls=n(v.State.calls));for(var o=0;r>o;o++)if(v.State.calls[o]){var s=v.State.calls[o],l=s[0],u=s[2],f=s[3],d=!!f,m=null;f||(f=v.State.calls[o][3]=t-16);for(var y=Math.min((t-f)/u.duration,1),h=0,b=l.length;b>h;h++){var S=l[h],w=S.element;if(i(w)){var V=!1;if(u.display!==a&&null!==u.display&&"none"!==u.display){if("flex"===u.display){var C=["-webkit-box","-moz-box","-ms-flexbox","-webkit-flex"];$.each(C,function(e,t){x.setPropertyValue(w,"display",t)})}x.setPropertyValue(w,"display",u.display)}u.visibility!==a&&"hidden"!==u.visibility&&x.setPropertyValue(w,"visibility",u.visibility);for(var T in S)if("element"!==T){var k=S[T],A,F=g.isString(k.easing)?v.Easings[k.easing]:k.easing;if(1===y)A=k.endValue;else{var E=k.endValue-k.startValue;if(A=k.startValue+E*F(y,u,E),!d&&A===k.currentValue)continue}if(k.currentValue=A,"tween"===T)m=A;else{if(x.Hooks.registered[T]){var j=x.Hooks.getRoot(T),H=i(w).rootPropertyValueCache[j];H&&(k.rootPropertyValue=H)}var N=x.setPropertyValue(w,T,k.currentValue+(0===parseFloat(A)?"":k.unitType),k.rootPropertyValue,k.scrollData);x.Hooks.registered[T]&&(i(w).rootPropertyValueCache[j]=x.Normalizations.registered[j]?x.Normalizations.registered[j]("extract",null,N[1]):N[1]),"transform"===N[0]&&(V=!0)}}u.mobileHA&&i(w).transformCache.translate3d===a&&(i(w).transformCache.translate3d="(0px, 0px, 0px)",V=!0),V&&x.flushTransformCache(w)}}u.display!==a&&"none"!==u.display&&(v.State.calls[o][2].display=!1),u.visibility!==a&&"hidden"!==u.visibility&&(v.State.calls[o][2].visibility=!1),u.progress&&u.progress.call(s[1],s[1],y,Math.max(0,f+u.duration-t),f,m),1===y&&p(o)}}v.State.isTicking&&P(c)}function p(e,t){if(!v.State.calls[e])return!1;for(var r=v.State.calls[e][0],n=v.State.calls[e][1],o=v.State.calls[e][2],s=v.State.calls[e][4],l=!1,u=0,c=r.length;c>u;u++){var p=r[u].element;if(t||o.loop||("none"===o.display&&x.setPropertyValue(p,"display",o.display),"hidden"===o.visibility&&x.setPropertyValue(p,"visibility",o.visibility)),o.loop!==!0&&($.queue(p)[1]===a||!/\.velocityQueueEntryFlag/i.test($.queue(p)[1]))&&i(p)){i(p).isAnimating=!1,i(p).rootPropertyValueCache={};var f=!1;$.each(x.Lists.transforms3D,function(e,t){var r=/^scale/.test(t)?1:0,n=i(p).transformCache[t];i(p).transformCache[t]!==a&&new RegExp("^\\("+r+"[^.]").test(n)&&(f=!0,delete i(p).transformCache[t])}),o.mobileHA&&(f=!0,delete i(p).transformCache.translate3d),f&&x.flushTransformCache(p),x.Values.removeClass(p,"velocity-animating")}if(!t&&o.complete&&!o.loop&&u===c-1)try{o.complete.call(n,n)}catch(d){setTimeout(function(){throw d},1)}s&&o.loop!==!0&&s(n),i(p)&&o.loop===!0&&!t&&($.each(i(p).tweensContainer,function(e,t){/^rotate/.test(e)&&360===parseFloat(t.endValue)&&(t.endValue=0,t.startValue=360),/^backgroundPosition/.test(e)&&100===parseFloat(t.endValue)&&"%"===t.unitType&&(t.endValue=0,t.startValue=100)}),v(p,"reverse",{loop:!0,delay:o.delay})),o.queue!==!1&&$.dequeue(p,o.queue)}v.State.calls[e]=!1;for(var g=0,m=v.State.calls.length;m>g;g++)if(v.State.calls[g]!==!1){l=!0;break}l===!1&&(v.State.isTicking=!1,delete v.State.calls,v.State.calls=[])}var f=function(){if(r.documentMode)return r.documentMode;for(var e=7;e>4;e--){var t=r.createElement("div");if(t.innerHTML="",t.getElementsByTagName("span").length)return t=null,e}return a}(),d=function(){var e=0;return t.webkitRequestAnimationFrame||t.mozRequestAnimationFrame||function(t){var r=(new Date).getTime(),a;return a=Math.max(0,16-(r-e)),e=r+a,setTimeout(function(){t(r+a)},a)}}(),g={isString:function(e){return"string"==typeof e},isArray:Array.isArray||function(e){return"[object Array]"===Object.prototype.toString.call(e)},isFunction:function(e){return"[object Function]"===Object.prototype.toString.call(e)},isNode:function(e){return e&&e.nodeType},isNodeList:function(e){return"object"==typeof e&&/^\[object (HTMLCollection|NodeList|Object)\]$/.test(Object.prototype.toString.call(e))&&e.length!==a&&(0===e.length||"object"==typeof e[0]&&e[0].nodeType>0)},isWrapped:function(e){return e&&(e.jquery||t.Zepto&&t.Zepto.zepto.isZ(e))},isSVG:function(e){return t.SVGElement&&e instanceof t.SVGElement},isEmptyObject:function(e){for(var t in e)return!1;return!0}},$,m=!1;if(e.fn&&e.fn.jquery?($=e,m=!0):$=t.Velocity.Utilities,8>=f&&!m)throw new Error("Velocity: IE8 and below require jQuery to be loaded before Velocity.");if(7>=f)return void(jQuery.fn.velocity=jQuery.fn.animate);var y=400,h="swing",v={State:{isMobile:/Android|webOS|iPhone|iPad|iPod|BlackBerry|IEMobile|Opera Mini/i.test(navigator.userAgent),isAndroid:/Android/i.test(navigator.userAgent),isGingerbread:/Android 2\.3\.[3-7]/i.test(navigator.userAgent),isChrome:t.chrome,isFirefox:/Firefox/i.test(navigator.userAgent),prefixElement:r.createElement("div"),prefixMatches:{},scrollAnchor:null,scrollPropertyLeft:null,scrollPropertyTop:null,isTicking:!1,calls:[]},CSS:{},Utilities:$,Redirects:{},Easings:{},Promise:t.Promise,defaults:{queue:"",duration:y,easing:h,begin:a,complete:a,progress:a,display:a,visibility:a,loop:!1,delay:!1,mobileHA:!0,_cacheValues:!0},init:function(e){$.data(e,"velocity",{isSVG:g.isSVG(e),isAnimating:!1,computedStyle:null,tweensContainer:null,rootPropertyValueCache:{},transformCache:{}})},hook:null,mock:!1,version:{major:1,minor:2,patch:2},debug:!1};t.pageYOffset!==a?(v.State.scrollAnchor=t,v.State.scrollPropertyLeft="pageXOffset",v.State.scrollPropertyTop="pageYOffset"):(v.State.scrollAnchor=r.documentElement||r.body.parentNode||r.body,v.State.scrollPropertyLeft="scrollLeft",v.State.scrollPropertyTop="scrollTop");var b=function(){function e(e){return-e.tension*e.x-e.friction*e.v}function t(t,r,a){var n={x:t.x+a.dx*r,v:t.v+a.dv*r,tension:t.tension,friction:t.friction};return{dx:n.v,dv:e(n)}}function r(r,a){var n={dx:r.v,dv:e(r)},o=t(r,.5*a,n),i=t(r,.5*a,o),s=t(r,a,i),l=1/6*(n.dx+2*(o.dx+i.dx)+s.dx),u=1/6*(n.dv+2*(o.dv+i.dv)+s.dv);return r.x=r.x+l*a,r.v=r.v+u*a,r}return function a(e,t,n){var o={x:-1,v:0,tension:null,friction:null},i=[0],s=0,l=1e-4,u=.016,c,p,f;for(e=parseFloat(e)||500,t=parseFloat(t)||20,n=n||null,o.tension=e,o.friction=t,c=null!==n,c?(s=a(e,t),p=s/n*u):p=u;;)if(f=r(f||o,p),i.push(1+f.x),s+=16,!(Math.abs(f.x)>l&&Math.abs(f.v)>l))break;return c?function(e){return i[e*(i.length-1)|0]}:s}}();v.Easings={linear:function(e){return e},swing:function(e){return.5-Math.cos(e*Math.PI)/2},spring:function(e){return 1-Math.cos(4.5*e*Math.PI)*Math.exp(6*-e)}},$.each([["ease",[.25,.1,.25,1]],["ease-in",[.42,0,1,1]],["ease-out",[0,0,.58,1]],["ease-in-out",[.42,0,.58,1]],["easeInSine",[.47,0,.745,.715]],["easeOutSine",[.39,.575,.565,1]],["easeInOutSine",[.445,.05,.55,.95]],["easeInQuad",[.55,.085,.68,.53]],["easeOutQuad",[.25,.46,.45,.94]],["easeInOutQuad",[.455,.03,.515,.955]],["easeInCubic",[.55,.055,.675,.19]],["easeOutCubic",[.215,.61,.355,1]],["easeInOutCubic",[.645,.045,.355,1]],["easeInQuart",[.895,.03,.685,.22]],["easeOutQuart",[.165,.84,.44,1]],["easeInOutQuart",[.77,0,.175,1]],["easeInQuint",[.755,.05,.855,.06]],["easeOutQuint",[.23,1,.32,1]],["easeInOutQuint",[.86,0,.07,1]],["easeInExpo",[.95,.05,.795,.035]],["easeOutExpo",[.19,1,.22,1]],["easeInOutExpo",[1,0,0,1]],["easeInCirc",[.6,.04,.98,.335]],["easeOutCirc",[.075,.82,.165,1]],["easeInOutCirc",[.785,.135,.15,.86]]],function(e,t){v.Easings[t[0]]=l.apply(null,t[1])});var x=v.CSS={RegEx:{isHex:/^#([A-f\d]{3}){1,2}$/i,valueUnwrap:/^[A-z]+\((.*)\)$/i,wrappedValueAlreadyExtracted:/[0-9.]+ [0-9.]+ [0-9.]+( [0-9.]+)?/,valueSplit:/([A-z]+\(.+\))|(([A-z0-9#-.]+?)(?=\s|$))/gi},Lists:{colors:["fill","stroke","stopColor","color","backgroundColor","borderColor","borderTopColor","borderRightColor","borderBottomColor","borderLeftColor","outlineColor"],transformsBase:["translateX","translateY","scale","scaleX","scaleY","skewX","skewY","rotateZ"],transforms3D:["transformPerspective","translateZ","scaleZ","rotateX","rotateY"]},Hooks:{templates:{textShadow:["Color X Y Blur","black 0px 0px 0px"],boxShadow:["Color X Y Blur Spread","black 0px 0px 0px 0px"],clip:["Top Right Bottom Left","0px 0px 0px 0px"],backgroundPosition:["X Y","0% 0%"],transformOrigin:["X Y Z","50% 50% 0px"],perspectiveOrigin:["X Y","50% 50%"]},registered:{},register:function(){for(var e=0;e=f)switch(e){case"name":return"filter";case"extract":var a=r.toString().match(/alpha\(opacity=(.*)\)/i);return r=a?a[1]/100:1;case"inject":return t.style.zoom=1,parseFloat(r)>=1?"":"alpha(opacity="+parseInt(100*parseFloat(r),10)+")"}else switch(e){case"name":return"opacity";case"extract":return r;case"inject":return r}}},register:function(){9>=f||v.State.isGingerbread||(x.Lists.transformsBase=x.Lists.transformsBase.concat(x.Lists.transforms3D));for(var e=0;en&&(n=1),o=!/(\d)$/i.test(n);break;case"skew":o=!/(deg|\d)$/i.test(n);break;case"rotate":o=!/(deg|\d)$/i.test(n)}return o||(i(r).transformCache[t]="("+n+")"),i(r).transformCache[t]}}}();for(var e=0;e=f||3!==o.split(" ").length||(o+=" 1"),o;case"inject":return 8>=f?4===n.split(" ").length&&(n=n.split(/\s+/).slice(0,3).join(" ")):3===n.split(" ").length&&(n+=" 1"),(8>=f?"rgb":"rgba")+"("+n.replace(/\s+/g,",").replace(/\.(\d)+(?=,)/g,"")+")"}}}()}},Names:{camelCase:function(e){return e.replace(/-(\w)/g,function(e,t){return t.toUpperCase()})},SVGAttribute:function(e){var t="width|height|x|y|cx|cy|r|rx|ry|x1|x2|y1|y2";return(f||v.State.isAndroid&&!v.State.isChrome)&&(t+="|transform"),new RegExp("^("+t+")$","i").test(e)},prefixCheck:function(e){if(v.State.prefixMatches[e])return[v.State.prefixMatches[e],!0];for(var t=["","Webkit","Moz","ms","O"],r=0,a=t.length;a>r;r++){var n;if(n=0===r?e:t[r]+e.replace(/^\w/,function(e){return e.toUpperCase()}),g.isString(v.State.prefixElement.style[n]))return v.State.prefixMatches[e]=n,[n,!0]}return[e,!1]}},Values:{hexToRgb:function(e){var t=/^#?([a-f\d])([a-f\d])([a-f\d])$/i,r=/^#?([a-f\d]{2})([a-f\d]{2})([a-f\d]{2})$/i,a;return e=e.replace(t,function(e,t,r,a){return t+t+r+r+a+a}),a=r.exec(e),a?[parseInt(a[1],16),parseInt(a[2],16),parseInt(a[3],16)]:[0,0,0]},isCSSNullValue:function(e){return 0==e||/^(none|auto|transparent|(rgba\(0, ?0, ?0, ?0\)))$/i.test(e)},getUnitType:function(e){return/^(rotate|skew)/i.test(e)?"deg":/(^(scale|scaleX|scaleY|scaleZ|alpha|flexGrow|flexHeight|zIndex|fontWeight)$)|((opacity|red|green|blue|alpha)$)/i.test(e)?"":"px"},getDisplayType:function(e){var t=e&&e.tagName.toString().toLowerCase();return/^(b|big|i|small|tt|abbr|acronym|cite|code|dfn|em|kbd|strong|samp|var|a|bdo|br|img|map|object|q|script|span|sub|sup|button|input|label|select|textarea)$/i.test(t)?"inline":/^(li)$/i.test(t)?"list-item":/^(tr)$/i.test(t)?"table-row":/^(table)$/i.test(t)?"table":/^(tbody)$/i.test(t)?"table-row-group":"block"},addClass:function(e,t){e.classList?e.classList.add(t):e.className+=(e.className.length?" ":"")+t},removeClass:function(e,t){e.classList?e.classList.remove(t):e.className=e.className.toString().replace(new RegExp("(^|\\s)"+t.split(" ").join("|")+"(\\s|$)","gi")," ")}},getPropertyValue:function(e,r,n,o){function s(e,r){function n(){u&&x.setPropertyValue(e,"display","none")}var l=0;if(8>=f)l=$.css(e,r);else{var u=!1;if(/^(width|height)$/.test(r)&&0===x.getPropertyValue(e,"display")&&(u=!0,x.setPropertyValue(e,"display",x.Values.getDisplayType(e))),!o){if("height"===r&&"border-box"!==x.getPropertyValue(e,"boxSizing").toString().toLowerCase()){var c=e.offsetHeight-(parseFloat(x.getPropertyValue(e,"borderTopWidth"))||0)-(parseFloat(x.getPropertyValue(e,"borderBottomWidth"))||0)-(parseFloat(x.getPropertyValue(e,"paddingTop"))||0)-(parseFloat(x.getPropertyValue(e,"paddingBottom"))||0);return n(),c}if("width"===r&&"border-box"!==x.getPropertyValue(e,"boxSizing").toString().toLowerCase()){var p=e.offsetWidth-(parseFloat(x.getPropertyValue(e,"borderLeftWidth"))||0)-(parseFloat(x.getPropertyValue(e,"borderRightWidth"))||0)-(parseFloat(x.getPropertyValue(e,"paddingLeft"))||0)-(parseFloat(x.getPropertyValue(e,"paddingRight"))||0);return n(),p}}var d;d=i(e)===a?t.getComputedStyle(e,null):i(e).computedStyle?i(e).computedStyle:i(e).computedStyle=t.getComputedStyle(e,null),"borderColor"===r&&(r="borderTopColor"),l=9===f&&"filter"===r?d.getPropertyValue(r):d[r],(""===l||null===l)&&(l=e.style[r]),n()}if("auto"===l&&/^(top|right|bottom|left)$/i.test(r)){var g=s(e,"position");("fixed"===g||"absolute"===g&&/top|left/i.test(r))&&(l=$(e).position()[r]+"px")}return l}var l;if(x.Hooks.registered[r]){var u=r,c=x.Hooks.getRoot(u);n===a&&(n=x.getPropertyValue(e,x.Names.prefixCheck(c)[0])),x.Normalizations.registered[c]&&(n=x.Normalizations.registered[c]("extract",e,n)),l=x.Hooks.extractValue(u,n)}else if(x.Normalizations.registered[r]){var p,d;p=x.Normalizations.registered[r]("name",e),"transform"!==p&&(d=s(e,x.Names.prefixCheck(p)[0]),x.Values.isCSSNullValue(d)&&x.Hooks.templates[r]&&(d=x.Hooks.templates[r][1])),l=x.Normalizations.registered[r]("extract",e,d)}if(!/^[\d-]/.test(l))if(i(e)&&i(e).isSVG&&x.Names.SVGAttribute(r))if(/^(height|width)$/i.test(r))try{l=e.getBBox()[r]}catch(g){l=0}else l=e.getAttribute(r);else l=s(e,x.Names.prefixCheck(r)[0]);return x.Values.isCSSNullValue(l)&&(l=0),v.debug>=2&&console.log("Get "+r+": "+l),l},setPropertyValue:function(e,r,a,n,o){var s=r;if("scroll"===r)o.container?o.container["scroll"+o.direction]=a:"Left"===o.direction?t.scrollTo(a,o.alternateValue):t.scrollTo(o.alternateValue,a);else if(x.Normalizations.registered[r]&&"transform"===x.Normalizations.registered[r]("name",e))x.Normalizations.registered[r]("inject",e,a),s="transform",a=i(e).transformCache[r];else{if(x.Hooks.registered[r]){var l=r,u=x.Hooks.getRoot(r);n=n||x.getPropertyValue(e,u),a=x.Hooks.injectValue(l,a,n),r=u}if(x.Normalizations.registered[r]&&(a=x.Normalizations.registered[r]("inject",e,a),r=x.Normalizations.registered[r]("name",e)),s=x.Names.prefixCheck(r)[0],8>=f)try{e.style[s]=a}catch(c){v.debug&&console.log("Browser does not support ["+a+"] for ["+s+"]")}else i(e)&&i(e).isSVG&&x.Names.SVGAttribute(r)?e.setAttribute(r,a):e.style[s]=a;v.debug>=2&&console.log("Set "+r+" ("+s+"): "+a)}return[s,a]},flushTransformCache:function(e){function t(t){return parseFloat(x.getPropertyValue(e,t))}var r="";if((f||v.State.isAndroid&&!v.State.isChrome)&&i(e).isSVG){var a={translate:[t("translateX"),t("translateY")],skewX:[t("skewX")],skewY:[t("skewY")],scale:1!==t("scale")?[t("scale"),t("scale")]:[t("scaleX"),t("scaleY")],rotate:[t("rotateZ"),0,0]};$.each(i(e).transformCache,function(e){/^translate/i.test(e)?e="translate":/^scale/i.test(e)?e="scale":/^rotate/i.test(e)&&(e="rotate"),a[e]&&(r+=e+"("+a[e].join(" ")+") ",delete a[e])})}else{var n,o;$.each(i(e).transformCache,function(t){return n=i(e).transformCache[t],"transformPerspective"===t?(o=n,!0):(9===f&&"rotateZ"===t&&(t="rotate"),void(r+=t+n+" "))}),o&&(r="perspective"+o+" "+r)}x.setPropertyValue(e,"transform",r)}};x.Hooks.register(),x.Normalizations.register(),v.hook=function(e,t,r){var n=a;return e=o(e),$.each(e,function(e,o){if(i(o)===a&&v.init(o),r===a)n===a&&(n=v.CSS.getPropertyValue(o,t));else{var s=v.CSS.setPropertyValue(o,t,r);"transform"===s[0]&&v.CSS.flushTransformCache(o),n=s}}),n};var S=function(){function e(){return l?T.promise||null:f}function n(){function e(e){function p(e,t){var r=a,i=a,s=a;return g.isArray(e)?(r=e[0],!g.isArray(e[1])&&/^[\d-]/.test(e[1])||g.isFunction(e[1])||x.RegEx.isHex.test(e[1])?s=e[1]:(g.isString(e[1])&&!x.RegEx.isHex.test(e[1])||g.isArray(e[1]))&&(i=t?e[1]:u(e[1],o.duration),e[2]!==a&&(s=e[2]))):r=e,t||(i=i||o.easing),g.isFunction(r)&&(r=r.call(n,w,P)),g.isFunction(s)&&(s=s.call(n,w,P)),[r||0,i,s]}function f(e,t){var r,a;return a=(t||"0").toString().toLowerCase().replace(/[%A-z]+$/,function(e){return r=e,""}),r||(r=x.Values.getUnitType(e)),[a,r]}function d(){var e={myParent:n.parentNode||r.body,position:x.getPropertyValue(n,"position"),fontSize:x.getPropertyValue(n,"fontSize")},a=e.position===N.lastPosition&&e.myParent===N.lastParent,o=e.fontSize===N.lastFontSize;N.lastParent=e.myParent,N.lastPosition=e.position,N.lastFontSize=e.fontSize;var s=100,l={};if(o&&a)l.emToPx=N.lastEmToPx,l.percentToPxWidth=N.lastPercentToPxWidth,l.percentToPxHeight=N.lastPercentToPxHeight;else{var u=i(n).isSVG?r.createElementNS("http://www.w3.org/2000/svg","rect"):r.createElement("div");v.init(u),e.myParent.appendChild(u),$.each(["overflow","overflowX","overflowY"],function(e,t){v.CSS.setPropertyValue(u,t,"hidden")}),v.CSS.setPropertyValue(u,"position",e.position),v.CSS.setPropertyValue(u,"fontSize",e.fontSize),v.CSS.setPropertyValue(u,"boxSizing","content-box"),$.each(["minWidth","maxWidth","width","minHeight","maxHeight","height"],function(e,t){v.CSS.setPropertyValue(u,t,s+"%")}),v.CSS.setPropertyValue(u,"paddingLeft",s+"em"),l.percentToPxWidth=N.lastPercentToPxWidth=(parseFloat(x.getPropertyValue(u,"width",null,!0))||1)/s,l.percentToPxHeight=N.lastPercentToPxHeight=(parseFloat(x.getPropertyValue(u,"height",null,!0))||1)/s,l.emToPx=N.lastEmToPx=(parseFloat(x.getPropertyValue(u,"paddingLeft"))||1)/s,e.myParent.removeChild(u)}return null===N.remToPx&&(N.remToPx=parseFloat(x.getPropertyValue(r.body,"fontSize"))||16),null===N.vwToPx&&(N.vwToPx=parseFloat(t.innerWidth)/100,N.vhToPx=parseFloat(t.innerHeight)/100),l.remToPx=N.remToPx,l.vwToPx=N.vwToPx,l.vhToPx=N.vhToPx,v.debug>=1&&console.log("Unit ratios: "+JSON.stringify(l),n),l}if(o.begin&&0===w)try{o.begin.call(m,m)}catch(y){setTimeout(function(){throw y},1)}if("scroll"===k){var S=/^x$/i.test(o.axis)?"Left":"Top",V=parseFloat(o.offset)||0,C,A,F;o.container?g.isWrapped(o.container)||g.isNode(o.container)?(o.container=o.container[0]||o.container,C=o.container["scroll"+S],F=C+$(n).position()[S.toLowerCase()]+V):o.container=null:(C=v.State.scrollAnchor[v.State["scrollProperty"+S]],A=v.State.scrollAnchor[v.State["scrollProperty"+("Left"===S?"Top":"Left")]],F=$(n).offset()[S.toLowerCase()]+V),s={scroll:{rootPropertyValue:!1,startValue:C,currentValue:C,endValue:F,unitType:"",easing:o.easing,scrollData:{container:o.container,direction:S,alternateValue:A}},element:n},v.debug&&console.log("tweensContainer (scroll): ",s.scroll,n)}else if("reverse"===k){if(!i(n).tweensContainer)return void $.dequeue(n,o.queue);"none"===i(n).opts.display&&(i(n).opts.display="auto"),"hidden"===i(n).opts.visibility&&(i(n).opts.visibility="visible"),i(n).opts.loop=!1,i(n).opts.begin=null,i(n).opts.complete=null,b.easing||delete o.easing,b.duration||delete o.duration,o=$.extend({},i(n).opts,o);var E=$.extend(!0,{},i(n).tweensContainer);for(var j in E)if("element"!==j){var H=E[j].startValue;E[j].startValue=E[j].currentValue=E[j].endValue,E[j].endValue=H,g.isEmptyObject(b)||(E[j].easing=o.easing),v.debug&&console.log("reverse tweensContainer ("+j+"): "+JSON.stringify(E[j]),n)}s=E}else if("start"===k){var E;i(n).tweensContainer&&i(n).isAnimating===!0&&(E=i(n).tweensContainer),$.each(h,function(e,t){if(RegExp("^"+x.Lists.colors.join("$|^")+"$").test(e)){var r=p(t,!0),n=r[0],o=r[1],i=r[2];if(x.RegEx.isHex.test(n)){for(var s=["Red","Green","Blue"],l=x.Values.hexToRgb(n),u=i?x.Values.hexToRgb(i):a,c=0;cO;O++){var z={delay:F.delay,progress:F.progress};O===R-1&&(z.display=F.display,z.visibility=F.visibility,z.complete=F.complete),S(m,"reverse",z)}return e()}};v=$.extend(S,v),v.animate=S;var P=t.requestAnimationFrame||d;return v.State.isMobile||r.hidden===a||r.addEventListener("visibilitychange",function(){r.hidden?(P=function(e){return setTimeout(function(){e(!0)},16)},c()):P=t.requestAnimationFrame||d}),e.Velocity=v,e!==t&&(e.fn.velocity=S,e.fn.velocity.defaults=v.defaults),$.each(["Down","Up"],function(e,t){v.Redirects["slide"+t]=function(e,r,n,o,i,s){var l=$.extend({},r),u=l.begin,c=l.complete,p={height:"",marginTop:"",marginBottom:"",paddingTop:"",paddingBottom:""},f={};l.display===a&&(l.display="Down"===t?"inline"===v.CSS.Values.getDisplayType(e)?"inline-block":"block":"none"),l.begin=function(){u&&u.call(i,i);for(var r in p){f[r]=e.style[r];var a=v.CSS.getPropertyValue(e,r);p[r]="Down"===t?[a,0]:[0,a]}f.overflow=e.style.overflow,e.style.overflow="hidden"},l.complete=function(){for(var t in f)e.style[t]=f[t];c&&c.call(i,i),s&&s.resolver(i)},v(e,p,l)}}),$.each(["In","Out"],function(e,t){v.Redirects["fade"+t]=function(e,r,n,o,i,s){var l=$.extend({},r),u={opacity:"In"===t?1:0},c=l.complete;l.complete=n!==o-1?l.begin=null:function(){c&&c.call(i,i),s&&s.resolver(i)},l.display===a&&(l.display="In"===t?"auto":"none"),v(this,u,l)}}),v}(window.jQuery||window.Zepto||window,window,document)}); \ No newline at end of file diff --git a/lib/velocity/velocity.ui.min.js b/lib/velocity/velocity.ui.min.js new file mode 100644 index 0000000000..870694530b --- /dev/null +++ b/lib/velocity/velocity.ui.min.js @@ -0,0 +1,2 @@ +/* VelocityJS.org UI Pack (5.0.4). (C) 2014 Julian Shapiro. MIT @license: en.wikipedia.org/wiki/MIT_License. Portions copyright Daniel Eden, Christian Pucci. */ +!function(t){"function"==typeof require&&"object"==typeof exports?module.exports=t():"function"==typeof define&&define.amd?define(["velocity"],t):t()}(function(){return function(t,a,e,r){function n(t,a){var e=[];return t&&a?($.each([t,a],function(t,a){var r=[];$.each(a,function(t,a){for(;a.toString().length<5;)a="0"+a;r.push(a)}),e.push(r.join(""))}),parseFloat(e[0])>parseFloat(e[1])):!1}if(!t.Velocity||!t.Velocity.Utilities)return void(a.console&&console.log("Velocity UI Pack: Velocity must be loaded first. Aborting."));var i=t.Velocity,$=i.Utilities,s=i.version,o={major:1,minor:1,patch:0};if(n(o,s)){var l="Velocity UI Pack: You need to update Velocity (jquery.velocity.js) to a newer version. Visit http://github.com/julianshapiro/velocity.";throw alert(l),new Error(l)}i.RegisterEffect=i.RegisterUI=function(t,a){function e(t,a,e,r){var n=0,s;$.each(t.nodeType?[t]:t,function(t,a){r&&(e+=t*r),s=a.parentNode,$.each(["height","paddingTop","paddingBottom","marginTop","marginBottom"],function(t,e){n+=parseFloat(i.CSS.getPropertyValue(a,e))})}),i.animate(s,{height:("In"===a?"+":"-")+"="+n},{queue:!1,easing:"ease-in-out",duration:e*("In"===a?.6:1)})}return i.Redirects[t]=function(n,s,o,l,c,u){function f(){s.display!==r&&"none"!==s.display||!/Out$/.test(t)||$.each(c.nodeType?[c]:c,function(t,a){i.CSS.setPropertyValue(a,"display","none")}),s.complete&&s.complete.call(c,c),u&&u.resolver(c||n)}var p=o===l-1;a.defaultDuration="function"==typeof a.defaultDuration?a.defaultDuration.call(c,c):parseFloat(a.defaultDuration);for(var d=0;d1&&($.each(a.reverse(),function(t,e){var r=a[t+1];if(r){var n=e.o||e.options,s=r.o||r.options,o=n&&n.sequenceQueue===!1?"begin":"complete",l=s&&s[o],c={};c[o]=function(){var t=r.e||r.elements,a=t.nodeType?[t]:t;l&&l.call(a,a),i(e)},r.o?r.o=$.extend({},s,c):r.options=$.extend({},s,c)}}),a.reverse()),i(a[0])}}(window.jQuery||window.Zepto||window,window,document)}); \ No newline at end of file diff --git a/page/10/index.html b/page/10/index.html new file mode 100644 index 0000000000..d0ab5b5bff --- /dev/null +++ b/page/10/index.html @@ -0,0 +1,1240 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + dinosaur + + + + + + + + + + + + +

    +
    + +
    +
    + + + + + + + + +
    + +
    + +
    +
    + + +
    + + 0% +
    + + +
    +
    +
    + + +
    + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    背景

    需要做到以下几步:

    +
      +
    • 搭建spring boot ,
    • +
    • 使用spring boot
    • +
    • 打包spring boot
    • +
    +

    开始

    下载spring boot demo ,链接在

    +
    1
    https://start.spring.io/
    + +

    spring boot download

    +

    一个可用的例子

    +
    1
    https://start.spring.io/#!type=maven-project&language=java&platformVersion=3.0.0&packaging=jar&jvmVersion=17&groupId=com.example&artifactId=demo&name=demo&description=Demo%20project%20for%20Spring%20Boot&packageName=com.example.demo
    + +

    解压

    然后下载下来名字叫demo.zip, 然后需要解压

    +
    1
    unzip  demo.zip 
    + +

    安装maven

    maven 是java的一个包管理工具

    +

    对于ubuntu 来说 ,使用下面的命令安装maven

    +
    1
    sudo apt install maven
    + +

    添加tomcat

    pom.xml 添加tomcat相关内容

    +
    1
    2
    3
    4
    <dependency>
    <groupId>org.springframework.boot</groupId>
    <artifactId>spring-boot-starter-web</artifactId>
    </dependency>
    + +

    编译成一个fatjar

    使用命令 mvn spring-boot:repackage 编译成一个fat-jar

    +
    1
    mvn package
    + + +

    启动jar包

    命令为java -jar ./target/demo-0.0.1-SNAPSHOT.jar

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    $ java -jar ./target/demo-0.0.1-SNAPSHOT.jar 

    . ____ _ __ _ _
    /\\ / ___'_ __ _ _(_)_ __ __ _ \ \ \ \
    ( ( )\___ | '_ | '_| | '_ \/ _` | \ \ \ \
    \\/ ___)| |_)| | | | | || (_| | ) ) ) )
    ' |____| .__|_| |_|_| |_\__, | / / / /
    =========|_|==============|___/=/_/_/_/
    :: Spring Boot :: (v3.0.0)

    2022-12-09T00:43:52.343+08:00 INFO 1459280 --- [ main] com.example.demo.DemoApplication : Starting DemoApplication v0.0.1-SNAPSHOT using Java 17.0.5 with PID 1459280 (/home/dai/spring/demo/target/demo-0.0.1-SNAPSHOT.jar started by dai in /home/dai/spring/demo)
    2022-12-09T00:43:52.346+08:00 INFO 1459280 --- [ main] com.example.demo.DemoApplication : No active profile set, falling back to 1 default profile: "default"
    2022-12-09T00:43:53.153+08:00 INFO 1459280 --- [ main] o.s.b.w.embedded.tomcat.TomcatWebServer : Tomcat initialized with port(s): 8080 (http)
    2022-12-09T00:43:53.162+08:00 INFO 1459280 --- [ main] o.apache.catalina.core.StandardService : Starting service [Tomcat]
    2022-12-09T00:43:53.163+08:00 INFO 1459280 --- [ main] o.apache.catalina.core.StandardEngine : Starting Servlet engine: [Apache Tomcat/10.1.1]
    2022-12-09T00:43:53.232+08:00 INFO 1459280 --- [ main] o.a.c.c.C.[Tomcat].[localhost].[/] : Initializing Spring embedded WebApplicationContext
    2022-12-09T00:43:53.234+08:00 INFO 1459280 --- [ main] w.s.c.ServletWebServerApplicationContext : Root WebApplicationContext: initialization completed in 835 ms
    2022-12-09T00:43:53.537+08:00 INFO 1459280 --- [ main] o.s.b.w.embedded.tomcat.TomcatWebServer : Tomcat started on port(s): 8080 (http) with context path ''
    2022-12-09T00:43:53.549+08:00 INFO 1459280 --- [ main] com.example.demo.DemoApplication : Started DemoApplication in 1.522 seconds (process running for 1.859)

    + +

    相关阅读

    + + +
    + + + + +
    +
    +
    +
    + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    背景

    背景是需要了解java的redis是怎么使用的

    +

    redission

    redisson 是java的一个redis客户端

    +

    接入spring boot 遇到的问题

    spring boot 启动遇到了问题redirection loop detected , 是因为测试环境redis是cluster 模式 , 但是本地配置是单个节点,所以会有问题

    +
    1
    org.springframework.dao.InvalidDataAccessApiUsageException: MOVED redirection loop detected. Node redis://10.2.26.106:6379 has further redirect to redis://xxx:6379; nested exception is org.redisson.client.RedisException: MOVED redirection loop detected. Node redis://10.2.26.106:6379 has further redirect to redis://xxx:6379
    + +

    解决方案 : 在配置文件改成cluster 就可以正常get 和set 了

    +

    相关阅读

    + + +
    + + + + +
    +
    +
    +
    + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    背景

    了解java的volaite 内容

    +

    volatile

    volatile 在jls有几个描述:

    +
      +
    • keyword
    • +
    • filed modified
    • +
    +

    volatile目的

    volatile详细描述在: 8.3.1.4 volatile Fields

    +
    1
    2
    3
    4
    5
    6
    The Java programming language allows threads to access shared variables (§17.1).
    As a rule, to ensure that shared variables are consistently and reliably updated, a
    thread should ensure that it has exclusive use of such variables by obtaining a lock
    that, conventionally, enforces mutual exclusion for those shared variables.
    The Java programming language provides a second mechanism, volatile fields,
    that is more convenient than locking for some purposes
    + +

    翻译:

    +
    1
    2
    3
    4
    5
    在java中,可以由不同的线程共享一个变量.
    线程必须通过获得锁来保证共享的变量可以被可靠和一致的更新.
    简单来说,就是使用mutual的排他性来获取这个特性
    java除了通过锁,还有另外一个机制来访问和更新共享变量,这就是volatile fields.
    在某些情况下volatile field会比锁更加方便
    + + +

    visibility, ordering and atomicity

    同步有三个问题:

    +
      +
    • 可见性
    • +
    • 排序
    • +
    • 原子性
    • +
    +

    jsr-133 里面有三个不正常同步会出现的问题 ,原文如下:

    +
    1
    2
    If a program is not correctly synchronized, then three types of problems can appear:
    visibility, ordering and atomicity.
    + +

    orderging

    下面是jsr133 里面给的例子:

    +

    分别有两个线程:
    threadOne 和线程threadTwo

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    class BadlyOrdered {
    boolean a = false;
    boolean b = false;

    void threadOne() {
    a = true;
    b = true;
    }

    boolean threadTwo() {
    boolean r1 = b; // sees true
    boolean r2 = a; // sees false
    return r1 && !r2; // returns true
    }
    }
    + + +

    在网上找了个c++ 版本的cpu指令重排的例子来源

    +

    命名为 reorder.cpp

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    38
    39
    40
    41
    42
    43
    44
    45
    46
    47
    48
    49
    50
    51
    52
    53
    54
    55
    56
    57
    58
    59
    60
    61
    62
    63
    64
    65
    66
    67
    68
    69
    70
    71
    72
    73
    74
    75
    76
    77
    78
    79
    80
    81
    82
    83
    84
    85
    86
    87
    88
    89
    90
    91
    92
    93
    94
    95
    96
    97
    98
    99
    100
    101
    102
    103
    104
    105
    106
    107
    108
    109
    110
    111
    112
    113
    114
    115
    116
    117
    118
    119
    120
    121
    122
    123
    124
    125
    126
    127
    128
    129
    130
    131
    132
    133
    134
    135
    136
    137
    138
    139
    140
    141
    142
    143
    144
    145
    146
    147
    148
    149
    150
    151
    152
    153
    154
    155
    156
    157
    158
    159
    160
    161
    162
    163
    164
    #include <pthread.h>
    #include <semaphore.h>
    #include <stdio.h>

    // Set either of these to 1 to prevent CPU reordering
    #define USE_CPU_FENCE 0
    #define USE_SINGLE_HW_THREAD 0 // Supported on Linux, but not Cygwin or PS3

    #if USE_SINGLE_HW_THREAD
    #include <sched.h>
    #endif


    //-------------------------------------
    // MersenneTwister
    // A thread-safe random number generator with good randomness
    // in a small number of instructions. We'll use it to introduce
    // random timing delays.
    //-------------------------------------
    #define MT_IA 397
    #define MT_LEN 624

    class MersenneTwister
    {
    unsigned int m_buffer[MT_LEN];
    int m_index;

    public:
    MersenneTwister(unsigned int seed);
    // Declare noinline so that the function call acts as a compiler barrier:
    unsigned int integer() __attribute__((noinline));
    };

    MersenneTwister::MersenneTwister(unsigned int seed)
    {
    // Initialize by filling with the seed, then iterating
    // the algorithm a bunch of times to shuffle things up.
    for (int i = 0; i < MT_LEN; i++)
    m_buffer[i] = seed;
    m_index = 0;
    for (int i = 0; i < MT_LEN * 100; i++)
    integer();
    }

    unsigned int MersenneTwister::integer()
    {
    // Indices
    int i = m_index;
    int i2 = m_index + 1; if (i2 >= MT_LEN) i2 = 0; // wrap-around
    int j = m_index + MT_IA; if (j >= MT_LEN) j -= MT_LEN; // wrap-around

    // Twist
    unsigned int s = (m_buffer[i] & 0x80000000) | (m_buffer[i2] & 0x7fffffff);
    unsigned int r = m_buffer[j] ^ (s >> 1) ^ ((s & 1) * 0x9908B0DF);
    m_buffer[m_index] = r;
    m_index = i2;

    // Swizzle
    r ^= (r >> 11);
    r ^= (r << 7) & 0x9d2c5680UL;
    r ^= (r << 15) & 0xefc60000UL;
    r ^= (r >> 18);
    return r;
    }


    //-------------------------------------
    // Main program, as decribed in the post
    //-------------------------------------
    sem_t beginSema1;
    sem_t beginSema2;
    sem_t endSema;

    int X, Y;
    int r1, r2;

    void *thread1Func(void *param)
    {
    MersenneTwister random(1);
    for (;;)
    {
    sem_wait(&beginSema1); // Wait for signal
    while (random.integer() % 8 != 0) {} // Random delay

    // ----- THE TRANSACTION! -----
    X = 1;
    #if USE_CPU_FENCE
    asm volatile("mfence" ::: "memory"); // Prevent CPU reordering
    #else
    asm volatile("" ::: "memory"); // Prevent compiler reordering
    #endif
    r1 = Y;

    sem_post(&endSema); // Notify transaction complete
    }
    return NULL; // Never returns
    };

    void *thread2Func(void *param)
    {
    MersenneTwister random(2);
    for (;;)
    {
    sem_wait(&beginSema2); // Wait for signal
    while (random.integer() % 8 != 0) {} // Random delay

    // ----- THE TRANSACTION! -----
    Y = 1;
    #if USE_CPU_FENCE
    asm volatile("mfence" ::: "memory"); // Prevent CPU reordering
    #else
    asm volatile("" ::: "memory"); // Prevent compiler reordering
    #endif
    r2 = X;

    sem_post(&endSema); // Notify transaction complete
    }
    return NULL; // Never returns
    };

    int main()
    {
    // Initialize the semaphores
    sem_init(&beginSema1, 0, 0);
    sem_init(&beginSema2, 0, 0);
    sem_init(&endSema, 0, 0);

    // Spawn the threads
    pthread_t thread1, thread2;
    pthread_create(&thread1, NULL, thread1Func, NULL);
    pthread_create(&thread2, NULL, thread2Func, NULL);

    #if USE_SINGLE_HW_THREAD
    // Force thread affinities to the same cpu core.
    cpu_set_t cpus;
    CPU_ZERO(&cpus);
    CPU_SET(0, &cpus);
    pthread_setaffinity_np(thread1, sizeof(cpu_set_t), &cpus);
    pthread_setaffinity_np(thread2, sizeof(cpu_set_t), &cpus);
    #endif

    // Repeat the experiment ad infinitum
    int detected = 0;
    for (int iterations = 1; ; iterations++)
    {
    // Reset X and Y
    X = 0;
    Y = 0;
    // Signal both threads
    sem_post(&beginSema1);
    sem_post(&beginSema2);
    // Wait for both threads
    sem_wait(&endSema);
    sem_wait(&endSema);
    // Check if there was a simultaneous reorder
    if (r1 == 0 && r2 == 0)
    {
    detected++;
    printf("%d reorders detected after %d iterations\n", detected, iterations);
    }
    }
    return 0; // Never returns
    }

    + + +
    1
    2
    3
    4
    5
    ## 然后编译
    gcc -O2 reorder.cpp -o reorder

    ## 执行
    ./reorder
    + + + + + + + + +

    相关阅读

    + + +
    + + + + +
    +
    +
    +
    + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    背景

    遇到错误his is not a valid name for a Kubernetes node, causing node registration to fail.

    +

    处理步骤

    查看错误:

    +
    1
    $ microk8s inspect
    + + +
    1
    2
    3
    WARNING:  This machine's hostname contains capital letters and/or underscores. 
    This is not a valid name for a Kubernetes node, causing node registration to fail.
    Please change the machine's hostname or refer to the documentation for more details:
    + + +

    遇到这个错误的原因是: hostname 不合法

    +

    如何查自己的hostname ?

    +

    使用hostname 或者hostnamectl 命令

    +
    1
    2
    $ hostname
    dai-MS-7B89
    + +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    $ hostnamectl
    Static hostname: dai-MS-7B89
    Icon name: computer-desktop
    Chassis: desktop
    Machine ID: d55c62a250474c459bda9aecc21307a7
    Boot ID: e97354108f364004a0775ce12cc57d98
    Operating System: Ubuntu 22.04 LTS
    Kernel: Linux 5.15.0-56-generic
    Architecture: x86-64
    Hardware Vendor: Micro-Star International Co., Ltd.
    Hardware Model: MS-7B89
    + +

    我现在的名字是大写还有横杠

    +

    修改成myhost

    +
    1
    sudo hostnamectl set-hostname  myhost
    + +

    相关阅读

    + + +
    + + + + +
    +
    +
    +
    + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    背景

    了解java的签名相关内容

    +

    javap 查看签名

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    javap -s java.lang.String
    Compiled from "String.java"
    public final class java.lang.String implements java.io.Serializable, java.lang.Comparable<java.lang.String>, java.lang.CharSequence {
    static final boolean COMPACT_STRINGS;
    descriptor: Z
    public static final java.util.Comparator<java.lang.String> CASE_INSENSITIVE_ORDER;
    descriptor: Ljava/util/Comparator;
    static final byte LATIN1;
    descriptor: B
    static final byte UTF16;
    descriptor: B
    public java.lang.String();
    descriptor: ()V



    public java.lang.String(byte[], int, int, int);
    descriptor: ([BIII)V

    public java.lang.String(byte[], int);
    descriptor: ([BI)V

    public java.lang.String(byte[], int, int, java.lang.String) throws java.io.UnsupportedEncodingException;
    descriptor: ([BIILjava/lang/String;)V


    }
    + + +

    相关阅读

    + + +
    + + + + +
    +
    +
    +
    + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    背景

    了解tomcat生命周期,了解一个http的生命周期

    +

    步骤

      +
    • 1 拉取tomcat 代码
      1
      git clone https://github.com/apache/tomcat
    • +
    +

    相关的编译后的安装目录在${tomcat.source}/build.propertiesfile 里面

    +
      +
    • 2 安装ant
    • +
    +

    我的是ubuntu所以直接通过包管理安装

    +
    1
    2
    sudo apt install ant

    + +
      +
    • 3 修改复制build.properties 文件
      这里面会有相关的属性,和下载代码相关的路径需要在这里配置
      1
      2
      3
      cp build.properties.default  build.properties


    • +
    +

    我们需要将调试符号编译时生成,如果是一个java程序,则是javac -g , 也就是在编译的时候添加到javac 中.
    在tomcat的源代码中的build.xml 中很容易看到compile.debug 这个环境变量

    +
    1
    2
    3
    4
    5
    6
    7
    8
    <javac srcdir="java" destdir="${tomcat.classes}"
    debug="${compile.debug}" <--! 这里会有compile.debug 环境变量 ->
    deprecation="${compile.deprecation}"
    release="${compile.release}"
    encoding="ISO-8859-1"
    includeAntRuntime="true" >
    <!-- Uncomment this to show unchecked warnings:
    <compilerarg value="-Xlint:unchecked"/>
    +

    那么这个环境变量是在哪里控制的呢?
    打开 build.properties ,就能看到,所以默认下载下来的tomcat 就是开了-g 选项的,不需要修改

    +
    1
    2
    # ----- Build control flags -----
    compile.debug=true
    + + +
      +
    • 4 执行构建命令ant
    • +
    +
    1
    ant
    + +

    编译好的相关代码会在 , source_code 就是你的源代码

    +
    1
    {source_code}/output/build/bin
    + +

    切换目录到{source_code}/output/build/bin

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    ./startup.sh 
    Using CATALINA_BASE: /home/dai/tomcat/output/build
    Using CATALINA_HOME: /home/dai/tomcat/output/build
    Using CATALINA_TMPDIR: /home/dai/tomcat/output/build/temp
    Using JRE_HOME: /usr
    Using CLASSPATH: /home/dai/tomcat/output/build/bin/bootstrap.jar:/home/dai/tomcat/output/build/bin/tomcat-juli.jar
    Using CATALINA_OPTS:
    Tomcat started.

    + +

    查看tomcat 的命令 , 就是以下内容:

    +
    1
    2
    3
    4
    5
    6
    ps aux | grep tomcat


    /usr/bin/java -Djava.util.logging.config.file=/home/dai/tomcat/output/build/conf/logging.properties -Djava.util.logging.manager=org.apache.juli.ClassLoaderLogManager -Djdk.tls.ephemeralDHKeySize=2048 -Djava.protocol.handler.pkgs=org.apache.catalina.webresources -Dorg.apache.catalina.security.SecurityListener.UMASK=0027 --add-opens=java.base/java.lang=ALL-UNNAMED --add-opens=java.base/java.io=ALL-UNNAMED --add-opens=java.base/java.util=ALL-UNNAMED --add-opens=java.base/java.util.concurrent=ALL-UNNAMED --add-opens=java.rmi/sun.rmi.transport=ALL-UNNAMED -classpath /home/dai/tomcat/output/build/bin/bootstrap.jar:/home/dai/tomcat/output/build/bin/tomcat-juli.jar -Dcatalina.base=/home/dai/tomcat/output/build -Dcatalina.home=/home/dai/tomcat/output/build -Djava.io.tmpdir=/home/dai/tomcat/output/build/temp org.apache.catalina.startup.Bootstrap start


    + + +

    请求tomcat

    在浏览器输入 http://127.0.0.1:8080/

    +

    debug tomcat

    断点到main函数

    在上面步骤用ps aux | grep "tomcat" 获取执行的命令 , 然后在前面/usr/bin/java 紧接着的地方加上参数-agentlib:jdwp=transport=dt_socket,server=y,suspend=y,address=8000 ,让程序断点在main函数

    +

    参数说明:

    +
      +
    • suspend 代表是否暂停
    • +
    • address: 指定地址 , 也可以只指定端口,我这里是8000 端口
    • +
    • -agentlib:jdwp=transport 协议 ,一般本地用socket 通讯 , 也可以共享内存什么的 。 我这里是dt_socket
    • +
    +
    1
    2
    /usr/bin/java -agentlib:jdwp=transport=dt_socket,server=y,suspend=y,address=8000   -Djava.util.logging.config.file=/home/dai/tomcat/output/build/conf/logging.properties -Djava.util.logging.manager=org.apache.juli.ClassLoaderLogManager -Djdk.tls.ephemeralDHKeySize=2048 -Djava.protocol.handler.pkgs=org.apache.catalina.webresources -Dorg.apache.catalina.security.SecurityListener.UMASK=0027 --add-opens=java.base/java.lang=ALL-UNNAMED --add-opens=java.base/java.io=ALL-UNNAMED --add-opens=java.base/java.util=ALL-UNNAMED --add-opens=java.base/java.util.concurrent=ALL-UNNAMED --add-opens=java.rmi/sun.rmi.transport=ALL-UNNAMED -classpath /home/dai/tomcat/output/build/bin/bootstrap.jar:/home/dai/tomcat/output/build/bin/tomcat-juli.jar -Dcatalina.base=/home/dai/tomcat/output/build -Dcatalina.home=/home/dai/tomcat/output/build -Djava.io.tmpdir=/home/dai/tomcat/output/build/temp org.apache.catalina.startup.Bootstrap start

    + +

    jdb 开启调试

    1
    jdb  -attach  8000 -sourcepath /home/dai/tomcat/java/
    + + +
    1
    2
    3
    4
    5
    6
    7
    8

    ### 在 org.apache.catalina.startup.Bootstrap.main 打断点
    main[1] stop in org.apache.catalina.startup.Bootstrap.main
    Deferring breakpoint org.apache.catalina.startup.Bootstrap.main.
    It will be set after the class is loaded.
    #### 使用run开始执行
    main[1] run

    + + +

    使用list 列出代码 然后就会断点到main 函数

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    Breakpoint hit: "thread=main", org.apache.catalina.startup.Bootstrap.main(), line=442 bci=0
    442 synchronized (daemonLock) {

    main[1] list
    438 * @param args Command line arguments to be processed
    439 */
    440 public static void main(String args[]) {
    441
    442 => synchronized (daemonLock) {
    443 if (daemon == null) {
    444 // Don't set daemon until init() has completed
    445 Bootstrap bootstrap = new Bootstrap();
    446 try {
    447 bootstrap.init();
    main[1]
    + + +

    servelet 请求路径

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    http-nio-8080-exec-1[1] where
    [1] HelloWorldExample.doGet (HelloWorldExample.java:41)
    [2] jakarta.servlet.http.HttpServlet.service (HttpServlet.java:705)
    [3] jakarta.servlet.http.HttpServlet.service (HttpServlet.java:814)
    [4] org.apache.catalina.core.ApplicationFilterChain.internalDoFilter (ApplicationFilterChain.java:223)
    [5] org.apache.catalina.core.ApplicationFilterChain.doFilter (ApplicationFilterChain.java:158)
    [6] org.apache.tomcat.websocket.server.WsFilter.doFilter (WsFilter.java:53)
    [7] org.apache.catalina.core.ApplicationFilterChain.internalDoFilter (ApplicationFilterChain.java:185)
    [8] org.apache.catalina.core.ApplicationFilterChain.doFilter (ApplicationFilterChain.java:158)
    [9] org.apache.catalina.filters.HttpHeaderSecurityFilter.doFilter (HttpHeaderSecurityFilter.java:126)
    [10] org.apache.catalina.core.ApplicationFilterChain.internalDoFilter (ApplicationFilterChain.java:185)
    [11] org.apache.catalina.core.ApplicationFilterChain.doFilter (ApplicationFilterChain.java:158)
    [12] org.apache.catalina.core.StandardWrapperValve.invoke (StandardWrapperValve.java:177)
    [13] org.apache.catalina.core.StandardContextValve.invoke (StandardContextValve.java:97)
    [14] org.apache.catalina.authenticator.AuthenticatorBase.invoke (AuthenticatorBase.java:542)
    [15] org.apache.catalina.core.StandardHostValve.invoke (StandardHostValve.java:119)
    [16] org.apache.catalina.valves.ErrorReportValve.invoke (ErrorReportValve.java:92)
    [17] org.apache.catalina.valves.AbstractAccessLogValve.invoke (AbstractAccessLogValve.java:690)
    [18] org.apache.catalina.core.StandardEngineValve.invoke (StandardEngineValve.java:78)
    [19] org.apache.catalina.connector.CoyoteAdapter.service (CoyoteAdapter.java:357)
    [20] org.apache.coyote.http11.Http11Processor.service (Http11Processor.java:400)
    [21] org.apache.coyote.AbstractProcessorLight.process (AbstractProcessorLight.java:65)
    [22] org.apache.coyote.AbstractProtocol$ConnectionHandler.process (AbstractProtocol.java:859)
    [23] org.apache.tomcat.util.net.NioEndpoint$SocketProcessor.doRun (NioEndpoint.java:1,734)
    [24] org.apache.tomcat.util.net.SocketProcessorBase.run (SocketProcessorBase.java:52)
    [25] org.apache.tomcat.util.threads.ThreadPoolExecutor.runWorker (ThreadPoolExecutor.java:1,191)
    [26] org.apache.tomcat.util.threads.ThreadPoolExecutor$Worker.run (ThreadPoolExecutor.java:659)
    [27] org.apache.tomcat.util.threads.TaskThread$WrappingRunnable.run (TaskThread.java:61)
    [28] java.lang.Thread.run (Thread.java:833)
    + + +

    相关阅读

    + + +
    + + + + +
    +
    +
    +
    + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    背景

    assert 是glibc 一个函数.
    java也有类似功能,断言是可以快速在测试环境发现问题的功能.
    在java里面,assert是一个Statement 也就是一个语句

    + + +
    + + + + +
    +
    +
    +
    + + + + + + + + + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    背景

    了解java里面spring boot 一个http请求的生命周期

    +

    tomcat 启动

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    initialize:108, TomcatWebServer (org.springframework.boot.web.embedded.tomcat)
    <init>:104, TomcatWebServer (org.springframework.boot.web.embedded.tomcat)
    getTomcatWebServer:440, TomcatServletWebServerFactory (org.springframework.boot.web.embedded.tomcat)
    getWebServer:193, TomcatServletWebServerFactory (org.springframework.boot.web.embedded.tomcat)
    createWebServer:178, ServletWebServerApplicationContext (org.springframework.boot.web.servlet.context)
    onRefresh:158, ServletWebServerApplicationContext (org.springframework.boot.web.servlet.context)
    refresh:545, AbstractApplicationContext (org.springframework.context.support)
    refresh:143, ServletWebServerApplicationContext (org.springframework.boot.web.servlet.context)
    refresh:755, SpringApplication (org.springframework.boot)
    refresh:747, SpringApplication (org.springframework.boot)
    refreshContext:402, SpringApplication (org.springframework.boot)
    run:312, SpringApplication (org.springframework.boot)
    main:22, Application
    + +

    堆栈:

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    38
    39
    40
    41
    42
    43
    44
    45
    46
    47
    48
    49
    50
    51
    52
    53
    54
    55
    56
    57
    58
    59
    60
    61
    62
    63
    64
    65
    66
    67
    68
    69
    70
    71
    72
    73
    74
    75
    76
    getLanguagesAllList:35, CommonController (com.patpat.mms.mdp.base.core.rest.controller)
    invoke:-1, CommonController$$FastClassBySpringCGLIB$$2cf69542 (com.patpat.mms.mdp.base.core.rest.controller)
    invoke:218, MethodProxy (org.springframework.cglib.proxy)
    invokeJoinpoint:779, CglibAopProxy$CglibMethodInvocation (org.springframework.aop.framework)
    proceed:163, ReflectiveMethodInvocation (org.springframework.aop.framework)
    proceed:750, CglibAopProxy$CglibMethodInvocation (org.springframework.aop.framework)
    proceed:88, MethodInvocationProceedingJoinPoint (org.springframework.aop.aspectj)
    doAround:27, AbstractLogAspect (com.patpat.marketing.common.aspect.log)
    invoke0:-1, NativeMethodAccessorImpl (jdk.internal.reflect)
    invoke:62, NativeMethodAccessorImpl (jdk.internal.reflect)
    invoke:43, DelegatingMethodAccessorImpl (jdk.internal.reflect)
    invoke:566, Method (java.lang.reflect)
    invokeAdviceMethodWithGivenArgs:644, AbstractAspectJAdvice (org.springframework.aop.aspectj)
    invokeAdviceMethod:633, AbstractAspectJAdvice (org.springframework.aop.aspectj)
    invoke:70, AspectJAroundAdvice (org.springframework.aop.aspectj)
    proceed:186, ReflectiveMethodInvocation (org.springframework.aop.framework)
    proceed:750, CglibAopProxy$CglibMethodInvocation (org.springframework.aop.framework)
    invoke:95, ExposeInvocationInterceptor (org.springframework.aop.interceptor)
    proceed:186, ReflectiveMethodInvocation (org.springframework.aop.framework)
    proceed:750, CglibAopProxy$CglibMethodInvocation (org.springframework.aop.framework)
    intercept:692, CglibAopProxy$DynamicAdvisedInterceptor (org.springframework.aop.framework)
    getLanguagesAllList:-1, CommonController$$EnhancerBySpringCGLIB$$cea69971 (com.patpat.mms.mdp.base.core.rest.controller)
    invoke0:-1, NativeMethodAccessorImpl (jdk.internal.reflect)
    invoke:62, NativeMethodAccessorImpl (jdk.internal.reflect)
    invoke:43, DelegatingMethodAccessorImpl (jdk.internal.reflect)
    invoke:566, Method (java.lang.reflect)
    doInvoke:190, InvocableHandlerMethod (org.springframework.web.method.support)
    invokeForRequest:138, InvocableHandlerMethod (org.springframework.web.method.support)
    invokeAndHandle:105, ServletInvocableHandlerMethod (org.springframework.web.servlet.mvc.method.annotation)
    invokeHandlerMethod:878, RequestMappingHandlerAdapter (org.springframework.web.servlet.mvc.method.annotation)
    handleInternal:792, RequestMappingHandlerAdapter (org.springframework.web.servlet.mvc.method.annotation)
    handle:87, AbstractHandlerMethodAdapter (org.springframework.web.servlet.mvc.method)
    doDispatch:1040, DispatcherServlet (org.springframework.web.servlet)
    doService:943, DispatcherServlet (org.springframework.web.servlet)
    processRequest:1006, FrameworkServlet (org.springframework.web.servlet)
    doPost:909, FrameworkServlet (org.springframework.web.servlet)
    service:652, HttpServlet (javax.servlet.http)
    service:883, FrameworkServlet (org.springframework.web.servlet)
    service:733, HttpServlet (javax.servlet.http)
    internalDoFilter:227, ApplicationFilterChain (org.apache.catalina.core)
    doFilter:162, ApplicationFilterChain (org.apache.catalina.core)
    doFilter:53, WsFilter (org.apache.tomcat.websocket.server)
    internalDoFilter:189, ApplicationFilterChain (org.apache.catalina.core)
    doFilter:162, ApplicationFilterChain (org.apache.catalina.core)
    doFilterInternal:100, RequestContextFilter (org.springframework.web.filter)
    doFilter:119, OncePerRequestFilter (org.springframework.web.filter)
    internalDoFilter:189, ApplicationFilterChain (org.apache.catalina.core)
    doFilter:162, ApplicationFilterChain (org.apache.catalina.core)
    doFilterInternal:93, FormContentFilter (org.springframework.web.filter)
    doFilter:119, OncePerRequestFilter (org.springframework.web.filter)
    internalDoFilter:189, ApplicationFilterChain (org.apache.catalina.core)
    doFilter:162, ApplicationFilterChain (org.apache.catalina.core)
    doFilterInternal:97, WebMvcMetricsFilter (org.springframework.boot.actuate.metrics.web.servlet)
    doFilter:119, OncePerRequestFilter (org.springframework.web.filter)
    internalDoFilter:189, ApplicationFilterChain (org.apache.catalina.core)
    doFilter:162, ApplicationFilterChain (org.apache.catalina.core)
    doFilterInternal:201, CharacterEncodingFilter (org.springframework.web.filter)
    doFilter:119, OncePerRequestFilter (org.springframework.web.filter)
    internalDoFilter:189, ApplicationFilterChain (org.apache.catalina.core)
    doFilter:162, ApplicationFilterChain (org.apache.catalina.core)
    invoke:202, StandardWrapperValve (org.apache.catalina.core)
    invoke:97, StandardContextValve (org.apache.catalina.core)
    invoke:542, AuthenticatorBase (org.apache.catalina.authenticator)
    invoke:143, StandardHostValve (org.apache.catalina.core)
    invoke:92, ErrorReportValve (org.apache.catalina.valves)
    invoke:78, StandardEngineValve (org.apache.catalina.core)
    service:357, CoyoteAdapter (org.apache.catalina.connector)
    service:374, Http11Processor (org.apache.coyote.http11)
    process:65, AbstractProcessorLight (org.apache.coyote)
    process:893, AbstractProtocol$ConnectionHandler (org.apache.coyote)
    doRun:1707, NioEndpoint$SocketProcessor (org.apache.tomcat.util.net)
    run:49, SocketProcessorBase (org.apache.tomcat.util.net)
    runWorker:1128, ThreadPoolExecutor (java.util.concurrent)
    run:628, ThreadPoolExecutor$Worker (java.util.concurrent)
    run:61, TaskThread$WrappingRunnable (org.apache.tomcat.util.threads)
    run:834, Thread (java.lang)
    +

    dispatcher servelet

    堆栈

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    38
    39
    40
    41
    42
    43
    doService:911, DispatcherServlet (org.springframework.web.servlet)
    processRequest:1006, FrameworkServlet (org.springframework.web.servlet)
    doPost:909, FrameworkServlet (org.springframework.web.servlet)
    service:652, HttpServlet (javax.servlet.http)
    service:883, FrameworkServlet (org.springframework.web.servlet)
    service:733, HttpServlet (javax.servlet.http)
    internalDoFilter:227, ApplicationFilterChain (org.apache.catalina.core)
    doFilter:162, ApplicationFilterChain (org.apache.catalina.core)
    doFilter:53, WsFilter (org.apache.tomcat.websocket.server)
    internalDoFilter:189, ApplicationFilterChain (org.apache.catalina.core)
    doFilter:162, ApplicationFilterChain (org.apache.catalina.core)
    doFilterInternal:100, RequestContextFilter (org.springframework.web.filter)
    doFilter:119, OncePerRequestFilter (org.springframework.web.filter)
    internalDoFilter:189, ApplicationFilterChain (org.apache.catalina.core)
    doFilter:162, ApplicationFilterChain (org.apache.catalina.core)
    doFilterInternal:93, FormContentFilter (org.springframework.web.filter)
    doFilter:119, OncePerRequestFilter (org.springframework.web.filter)
    internalDoFilter:189, ApplicationFilterChain (org.apache.catalina.core)
    doFilter:162, ApplicationFilterChain (org.apache.catalina.core)
    doFilterInternal:97, WebMvcMetricsFilter (org.springframework.boot.actuate.metrics.web.servlet)
    doFilter:119, OncePerRequestFilter (org.springframework.web.filter)
    internalDoFilter:189, ApplicationFilterChain (org.apache.catalina.core)
    doFilter:162, ApplicationFilterChain (org.apache.catalina.core)
    doFilterInternal:201, CharacterEncodingFilter (org.springframework.web.filter)
    doFilter:119, OncePerRequestFilter (org.springframework.web.filter)
    internalDoFilter:189, ApplicationFilterChain (org.apache.catalina.core)
    doFilter:162, ApplicationFilterChain (org.apache.catalina.core)
    invoke:202, StandardWrapperValve (org.apache.catalina.core)
    invoke:97, StandardContextValve (org.apache.catalina.core)
    invoke:542, AuthenticatorBase (org.apache.catalina.authenticator)
    invoke:143, StandardHostValve (org.apache.catalina.core)
    invoke:92, ErrorReportValve (org.apache.catalina.valves)
    invoke:78, StandardEngineValve (org.apache.catalina.core)
    service:357, CoyoteAdapter (org.apache.catalina.connector)
    service:374, Http11Processor (org.apache.coyote.http11)
    process:65, AbstractProcessorLight (org.apache.coyote)
    process:893, AbstractProtocol$ConnectionHandler (org.apache.coyote)
    doRun:1707, NioEndpoint$SocketProcessor (org.apache.tomcat.util.net)
    run:49, SocketProcessorBase (org.apache.tomcat.util.net)
    runWorker:1128, ThreadPoolExecutor (java.util.concurrent)
    run:628, ThreadPoolExecutor$Worker (java.util.concurrent)
    run:61, TaskThread$WrappingRunnable (org.apache.tomcat.util.threads)
    run:834, Thread (java.lang)
    + +

    相关阅读

    + + +
    + + + + +
    +
    +
    +
    + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    背景

      +
    • 了解java 编译过程
    • +
    • 使用java的module调用javac
    • +
    +

    java的编译命令

    在java jdk9 开始,拥有了module 系统 ,jdk里面内部的库也拆分成为不同的module

    +

    java的编译前端命令是javac ,实际上调用的是jdk.compiler 这个module下面的类com.sun.tools.javac.Main

    +

    也就是说javac 这个命令和 java --module jdk.compiler/com.sun.tools.javac.Main 这个命令是一致的

    +

    java前端

    列出所有的module

    +
    1
    2
    3
    4
    5
    6
    7
    8

    $ java --list-modules
    ...
    jdk.attach@17.0.5
    jdk.charsets@17.0.5
    jdk.compiler@17.0.5
    ...

    + +

    我的jdk现在是jdk17 , 可以看到其中有一个jdk.compiler 的module , 这个就是java编译器前端

    +

    使用module 方式调用

    先创建一个hello world

    +
    1
    vim com/Hello.java
    +

    内容如下:

    +
    1
    2
    3
    4
    5
    6
    7
    8
    package com;
    public class Hello{
    public static void main(String [] argc){

    System.out.print(argc.length);

    }
    }
    + +

    然后使用以下命令编译:

    +
    1
    java  --module   jdk.compiler/com.sun.tools.javac.Main  com/Hello.java
    + +

    获取class 文件:

    +
    1
    2
    3
    4
    $tree com
    com
    ├── Hello.class
    └── Hello.java
    + + +

    执行这个hello world 的demo:

    +
    1
    2
    $ java com.Hello
    0
    + +

    正常执行

    +

    相关阅读

    + + +
    + + + + +
    +
    +
    +
    + + + + + + + + + + +
    + + + + +
    + + + + + + + + +
    +
    + +
    + +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/page/11/index.html b/page/11/index.html new file mode 100644 index 0000000000..cb1f31c8f8 --- /dev/null +++ b/page/11/index.html @@ -0,0 +1,1137 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + dinosaur + + + + + + + + + + + + +
    +
    + +
    +
    + + + + + +
    + + + + + + + + +
    + +
    + +
    +
    + + +
    + + 0% +
    + + +
    +
    +
    + + +
    + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    背景

    线上环境写入clickhouse的时候出现错误xid equal to close_xid , 只报错一次 ,后续就好了

    +
    1
    returned error: Code: 999, e.displayText() = DB::Exception: Cannot allocate block number in ZooKeeper: Coordination::Exception: xid equal to close_xid (Session expired) 
    + + +

    排查问题

    看了一下pr , 是19年加的pr ,当xid 等于0xffffffff 的时候就抛出异常,防止出现死锁 .
    在异常之后的下一次请求,clickhouse会重新zookpeer,然后重置xid ,保证整个链接正常

    +

    相关阅读

    + + +
    + + + + +
    +
    +
    +
    + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    背景

    很多时候可以通过java 命令打印反汇编信息

    +

    相关命令

    jvm参数: HotSpot options (with an -XX: prefix on the command line)

    +

    核心参数是 -XX:+PrintAssembly , 这个参数可以获取对应的反汇编编码

    +
    1
    ./java   -Xcomp -XX:+UnlockDiagnosticVMOptions -XX:+PrintAssembly -XX:CompileCommand=compileonly,*com.Hello::testIncr  com.Hello  >> test.txt
    + +

    解析参数

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    Thread 2 "java" hit Hardware watchpoint 19: PrintAssembly

    Old value = false
    New value = true
    JVMFlag::write<bool> (this=0x7f1814bcf140 <flagTable+17600>, value=true) at /var/jdk/src/hotspot/share/runtime/flags/jvmFlag.hpp:237
    237 }
    (gdb) bt
    #0 JVMFlag::write<bool> (this=0x7f1814bcf140 <flagTable+17600>, value=true) at /var/jdk/src/hotspot/share/runtime/flags/jvmFlag.hpp:237
    #1 0x00007f18137cf984 in TypedFlagAccessImpl<bool, EventBooleanFlagChanged>::check_constraint_and_set (this=0x7f1814bd49c0 <flag_access_bool>, flag=0x7f1814bcf140 <flagTable+17600>, value_addr=0x7f18129e7f54,
    origin=JVMFlagOrigin::COMMAND_LINE, verbose=true) at /var/jdk/src/hotspot/share/runtime/flags/jvmFlagAccess.cpp:75
    #2 0x00007f18137ce655 in FlagAccessImpl_bool::set_impl (this=0x7f1814bd49c0 <flag_access_bool>, flag=0x7f1814bcf140 <flagTable+17600>, value_addr=0x7f18129e7f54, origin=JVMFlagOrigin::COMMAND_LINE)
    at /var/jdk/src/hotspot/share/runtime/flags/jvmFlagAccess.cpp:94
    #3 0x00007f18137ce576 in FlagAccessImpl::set (this=0x7f1814bd49c0 <flag_access_bool>, flag=0x7f1814bcf140 <flagTable+17600>, value=0x7f18129e7f54, origin=JVMFlagOrigin::COMMAND_LINE)
    at /var/jdk/src/hotspot/share/runtime/flags/jvmFlagAccess.cpp:49
    #4 0x00007f18137cc5db in JVMFlagAccess::set_impl (flag=0x7f1814bcf140 <flagTable+17600>, value=0x7f18129e7f54, origin=JVMFlagOrigin::COMMAND_LINE) at /var/jdk/src/hotspot/share/runtime/flags/jvmFlagAccess.cpp:299
    #5 0x00007f181306cdf9 in JVMFlagAccess::set<bool, 0> (flag=0x7f1814bcf140 <flagTable+17600>, value=0x7f18129e7f54, origin=JVMFlagOrigin::COMMAND_LINE) at /var/jdk/src/hotspot/share/runtime/flags/jvmFlagAccess.hpp:120
    #6 0x00007f181306bd96 in JVMFlagAccess::set_bool (f=0x7f1814bcf140 <flagTable+17600>, v=0x7f18129e7f54, origin=JVMFlagOrigin::COMMAND_LINE) at /var/jdk/src/hotspot/share/runtime/flags/jvmFlagAccess.hpp:133
    #7 0x00007f1813060002 in set_bool_flag (flag=0x7f1814bcf140 <flagTable+17600>, value=true, origin=JVMFlagOrigin::COMMAND_LINE) at /var/jdk/src/hotspot/share/runtime/arguments.cpp:825
    #8 0x00007f18130607a9 in Arguments::parse_argument (arg=0x55dcf2d093a4 "+PrintAssembly", origin=JVMFlagOrigin::COMMAND_LINE) at /var/jdk/src/hotspot/share/runtime/arguments.cpp:993
    #9 0x00007f18130611be in Arguments::process_argument (arg=0x55dcf2d093a4 "+PrintAssembly", ignore_unrecognized=0 '\000', origin=JVMFlagOrigin::COMMAND_LINE) at /var/jdk/src/hotspot/share/runtime/arguments.cpp:1179
    #10 0x00007f1813066867 in Arguments::parse_each_vm_init_arg (args=0x7f18129e8d50, patch_mod_javabase=0x7f18129e87fb, origin=JVMFlagOrigin::COMMAND_LINE) at /var/jdk/src/hotspot/share/runtime/arguments.cpp:2972
    #11 0x00007f18130639da in Arguments::parse_vm_init_args (vm_options_args=0x7f18129e8878, java_tool_options_args=0x7f18129e88b8, java_options_args=0x7f18129e88f8, cmd_line_args=0x7f18129e8d50)
    at /var/jdk/src/hotspot/share/runtime/arguments.cpp:2174
    #12 0x00007f1813068a46 in Arguments::parse (initial_cmd_args=0x7f18129e8d50) at /var/jdk/src/hotspot/share/runtime/arguments.cpp:3946
    #13 0x00007f1813e5def7 in Threads::create_vm (args=0x7f18129e8d50, canTryAgain=0x7f18129e8c5b) at /var/jdk/src/hotspot/share/runtime/thread.cpp:2734
    #14 0x00007f181378343b in JNI_CreateJavaVM_inner (vm=0x7f18129e8da8, penv=0x7f18129e8db0, args=0x7f18129e8d50) at /var/jdk/src/hotspot/share/prims/jni.cpp:3613
    #15 0x00007f1813783787 in JNI_CreateJavaVM (vm=0x7f18129e8da8, penv=0x7f18129e8db0, args=0x7f18129e8d50) at /var/jdk/src/hotspot/share/prims/jni.cpp:3701
    #16 0x00007f1814efaa6a in InitializeJVM (pvm=0x7f18129e8da8, penv=0x7f18129e8db0, ifn=0x7f18129e8e00) at /var/jdk/src/java.base/share/native/libjli/java.c:1459
    #17 0x00007f1814ef75ec in JavaMain (_args=0x7ffc68186870) at /var/jdk/src/java.base/share/native/libjli/java.c:411
    #18 0x00007f1814efe5ec in ThreadJavaMain (args=0x7ffc68186870) at /var/jdk/src/java.base/unix/native/libjli/java_md.c:651
    #19 0x00007f1814d59b43 in start_thread (arg=<optimized out>) at ./nptl/pthread_create.c:442
    + + + +

    堆栈:

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    #0  Compile::Compile (this=0x7f5eb0b5ea80, ci_env=0x7f5eb0b5f6b0, generator=0x7f5ecfee55f2 <OptoRuntime::new_instance_Type()>, 
    stub_function=0x7f5ecfee3d90 <OptoRuntime::new_instance_C(Klass*, JavaThread*)> "\363\017\036\372UH\211\345H\203\354`H\211}\250H\211u\240\350[2$\377H9E\240\017\225\300\204\300t?H\215\005m\241\364",
    stub_name=0x7f5ed074e78a "_new_instance_Java", is_fancy_jump=0, pass_tls=true, return_pc=false, directive=0x7f5ec822a050) at /var/jdk/src/hotspot/share/opto/compile.cpp:892
    #1 0x00007f5ecfee3c98 in OptoRuntime::generate_stub (env=0x7f5eb0b5f6b0, gen=0x7f5ecfee55f2 <OptoRuntime::new_instance_Type()>,
    C_function=0x7f5ecfee3d90 <OptoRuntime::new_instance_C(Klass*, JavaThread*)> "\363\017\036\372UH\211\345H\203\354`H\211}\250H\211u\240\350[2$\377H9E\240\017\225\300\204\300t?H\215\005m\241\364", name=0x7f5ed074e78a "_new_instance_Java",
    is_fancy_jump=0, pass_tls=true, return_pc=false) at /var/jdk/src/hotspot/share/opto/runtime.cpp:171
    #2 0x00007f5ecfee374d in OptoRuntime::generate (env=0x7f5eb0b5f6b0) at /var/jdk/src/hotspot/share/opto/runtime.cpp:139
    #3 0x00007f5ecf48ab83 in C2Compiler::init_c2_runtime () at /var/jdk/src/hotspot/share/opto/c2compiler.cpp:78
    #4 0x00007f5ecf48ac07 in C2Compiler::initialize (this=0x7f5ec8342980) at /var/jdk/src/hotspot/share/opto/c2compiler.cpp:91
    #5 0x00007f5ecf5c2ab2 in CompileBroker::init_compiler_runtime () at /var/jdk/src/hotspot/share/compiler/compileBroker.cpp:1782
    #6 0x00007f5ecf5c3046 in CompileBroker::compiler_thread_loop () at /var/jdk/src/hotspot/share/compiler/compileBroker.cpp:1919
    #7 0x00007f5ecf5e5462 in CompilerThread::thread_entry (thread=0x7f5ec8343060, __the_thread__=0x7f5ec8343060) at /var/jdk/src/hotspot/share/compiler/compilerThread.cpp:59
    #8 0x00007f5ed00c0009 in JavaThread::thread_main_inner (this=0x7f5ec8343060) at /var/jdk/src/hotspot/share/runtime/thread.cpp:1297
    #9 0x00007f5ed00bfe92 in JavaThread::run (this=0x7f5ec8343060) at /var/jdk/src/hotspot/share/runtime/thread.cpp:1280
    #10 0x00007f5ed00bd57f in Thread::call_run (this=0x7f5ec8343060) at /var/jdk/src/hotspot/share/runtime/thread.cpp:358
    #11 0x00007f5ecfe041e7 in thread_native_entry (thread=0x7f5ec8343060) at /var/jdk/src/hotspot/os/linux/os_linux.cpp:705
    #12 0x00007f5ed0fc0b43 in start_thread (arg=<optimized out>) at ./nptl/pthread_create.c:442
    #13 0x00007f5ed1051bb4 in clone () at ../sysdeps/unix/sysv/linux/x86_64/clone.S:100
    (gdb) b Compile::Compile
    + + +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    (gdb) bt
    #0 nmethod::print (this=0x7fd21d591010, st=0x7fd22c000b80) at /var/jdk/src/hotspot/share/code/nmethod.cpp:2518
    #1 0x00007fd23498cf10 in nmethod::decode2 (this=0x7fd21d591010, ost=0x7fd22c000b80) at /var/jdk/src/hotspot/share/code/nmethod.cpp:2887
    #2 0x00007fd234985e16 in nmethod::print_nmethod (this=0x7fd21d591010, printmethod=true) at /var/jdk/src/hotspot/share/code/nmethod.cpp:962
    #3 0x00007fd234985c95 in nmethod::maybe_print_nmethod (this=0x7fd21d591010, directive=0x7fd22c229f20) at /var/jdk/src/hotspot/share/code/nmethod.cpp:935
    #4 0x00007fd2341a2a9e in CompileBroker::invoke_compiler_on_method (task=0x7fd22c359c10) at /var/jdk/src/hotspot/share/compiler/compileBroker.cpp:2345
    #5 0x00007fd2341a12c1 in CompileBroker::compiler_thread_loop () at /var/jdk/src/hotspot/share/compiler/compileBroker.cpp:1966
    #6 0x00007fd2341c3462 in CompilerThread::thread_entry (thread=0x7fd22c344ac0, __the_thread__=0x7fd22c344ac0) at /var/jdk/src/hotspot/share/compiler/compilerThread.cpp:59
    #7 0x00007fd234c9e009 in JavaThread::thread_main_inner (this=0x7fd22c344ac0) at /var/jdk/src/hotspot/share/runtime/thread.cpp:1297
    #8 0x00007fd234c9de92 in JavaThread::run (this=0x7fd22c344ac0) at /var/jdk/src/hotspot/share/runtime/thread.cpp:1280
    #9 0x00007fd234c9b57f in Thread::call_run (this=0x7fd22c344ac0) at /var/jdk/src/hotspot/share/runtime/thread.cpp:358
    #10 0x00007fd2349e21e7 in thread_native_entry (thread=0x7fd22c344ac0) at /var/jdk/src/hotspot/os/linux/os_linux.cpp:705
    #11 0x00007fd235b9eb43 in start_thread (arg=<optimized out>) at ./nptl/pthread_create.c:442
    #12 0x00007fd235c2fbb4 in clone () at ../sysdeps/unix/sysv/linux/x86_64/clone.S:100
    + +

    解析指令

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    (gdb) bt
    #0 DirectivesStack::getMatchingDirective (method=..., comp=0x7f4b2c2bef40) at /var/jdk/src/hotspot/share/compiler/compilerDirectives.cpp:670
    #1 0x00007f4b34793667 in CompileBroker::compile_method (method=..., osr_bci=-1, comp_level=3, hot_method=..., hot_count=0, compile_reason=CompileTask::Reason_MustBeCompiled, __the_thread__=0x7f4b2c02a5c0)
    at /var/jdk/src/hotspot/share/compiler/compileBroker.cpp:1349
    #2 0x00007f4b34770655 in CompilationPolicy::compile_if_required (m=..., __the_thread__=0x7f4b2c02a5c0) at /var/jdk/src/hotspot/share/compiler/compilationPolicy.cpp:110
    #3 0x00007f4b34ae8f97 in JavaCalls::call_helper (result=0x7f4b33e21750, method=..., args=0x7f4b33e217a0, __the_thread__=0x7f4b2c02a5c0) at /var/jdk/src/hotspot/share/runtime/javaCalls.cpp:359
    #4 0x00007f4b34fe0344 in os::os_exception_wrapper (f=0x7f4b34ae8ccc <JavaCalls::call_helper(JavaValue*, methodHandle const&, JavaCallArguments*, JavaThread*)>, value=0x7f4b33e21750, method=..., args=0x7f4b33e217a0, thread=0x7f4b2c02a5c0)
    at /var/jdk/src/hotspot/os/linux/os_linux.cpp:4794
    #5 0x00007f4b34ae8cc9 in JavaCalls::call (result=0x7f4b33e21750, method=..., args=0x7f4b33e217a0, __the_thread__=0x7f4b2c02a5c0) at /var/jdk/src/hotspot/share/runtime/javaCalls.cpp:330
    #6 0x00007f4b34ab6626 in InstanceKlass::call_class_initializer (this=0x80004c5e8, __the_thread__=0x7f4b2c02a5c0) at /var/jdk/src/hotspot/share/oops/instanceKlass.cpp:1519
    #7 0x00007f4b34ab50aa in InstanceKlass::initialize_impl (this=0x80004c5e8, __the_thread__=0x7f4b2c02a5c0) at /var/jdk/src/hotspot/share/oops/instanceKlass.cpp:1177
    #8 0x00007f4b34ab3adc in InstanceKlass::initialize (this=0x80004c5e8, __the_thread__=0x7f4b2c02a5c0) at /var/jdk/src/hotspot/share/oops/instanceKlass.cpp:796
    #9 0x00007f4b352905de in initialize_class (class_name=0x7f4b3114d470, __the_thread__=0x7f4b2c02a5c0) at /var/jdk/src/hotspot/share/runtime/thread.cpp:689
    #10 0x00007f4b35296d82 in Threads::initialize_jsr292_core_classes (__the_thread__=0x7f4b2c02a5c0) at /var/jdk/src/hotspot/share/runtime/thread.cpp:2687
    #11 0x00007f4b352975e0 in Threads::create_vm (args=0x7f4b33e21d50, canTryAgain=0x7f4b33e21c5b) at /var/jdk/src/hotspot/share/runtime/thread.cpp:2987
    #12 0x00007f4b34bbc43b in JNI_CreateJavaVM_inner (vm=0x7f4b33e21da8, penv=0x7f4b33e21db0, args=0x7f4b33e21d50) at /var/jdk/src/hotspot/share/prims/jni.cpp:3613
    #13 0x00007f4b34bbc787 in JNI_CreateJavaVM (vm=0x7f4b33e21da8, penv=0x7f4b33e21db0, args=0x7f4b33e21d50) at /var/jdk/src/hotspot/share/prims/jni.cpp:3701
    #14 0x00007f4b36333a6a in InitializeJVM (pvm=0x7f4b33e21da8, penv=0x7f4b33e21db0, ifn=0x7f4b33e21e00) at /var/jdk/src/java.base/share/native/libjli/java.c:1459
    #15 0x00007f4b363305ec in JavaMain (_args=0x7fff1f7dd300) at /var/jdk/src/java.base/share/native/libjli/java.c:411
    #16 0x00007f4b363375ec in ThreadJavaMain (args=0x7fff1f7dd300) at /var/jdk/src/java.base/unix/native/libjli/java_md.c:651
    #17 0x00007f4b36192b43 in start_thread (arg=<optimized out>) at ./nptl/pthread_create.c:442
    #18 0x00007f4b36223bb4 in clone () at ../sysdeps/unix/sysv/linux/x86_64/clone.S:100
    + +

    输出结果

    输出结果:

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35

    ------------------------ OptoAssembly for Compile_id = 26 -----------------------
    #
    # void ( )
    #
    # -- Old rsp -- Framesize: 32 --
    #r591 rsp+28: in_preserve
    #r590 rsp+24: return address
    #r589 rsp+20: in_preserve
    #r588 rsp+16: saved fp register
    #r587 rsp+12: pad2, stack alignment
    #r586 rsp+ 8: pad2, stack alignment
    #r585 rsp+ 4: Fixed slot 1
    #r584 rsp+ 0: Fixed slot 0
    #
    000 N1: # out( B1 ) <- in( B1 ) Freq: 1

    000 B1: # out( N1 ) <- BLOCK HEAD IS JUNK Freq: 1
    000 # stack bang (96 bytes)
    pushq rbp # Save rbp
    subq rsp, #16 # Create frame

    00c movq R10, java/lang/Class:exact * # ptr
    016 movl R8, [R10 + #112 (8-bit)] # int ! Field: volatile com/Hello.i
    01a MEMBAR-acquire ! (empty encoding)
    01a MEMBAR-release ! (empty encoding)
    01a incl R8 # int
    01d movl [R10 + #112 (8-bit)], R8 # int ! Field: volatile com/Hello.i
    021 lock addl [rsp + #0], 0 ! membar_volatile
    027 addq rsp, 16 # Destroy frame
    popq rbp
    cmpq rsp, poll_offset[r15_thread]
    ja #safepoint_stub # Safepoint: poll for GC

    039 ret
    + + +

    相关指令:

    +
    1
    2
    3
    // jdk 中的函数输出上面的汇编
    PhaseOutput::dump_asm_on

    + + +

    相关阅读

    + + +
    + + + + +
    +
    +
    +
    + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    背景

    1
    2
    returned error: Code: 999, e.displayText() = DB::Exception: Cannot allocate block number in ZooKeeper: Coordination::Exception: Connection loss, path: xxx

    + + +

    场景

    最近偶尔发生clickhouse发生链接丢失的情况

    + + +
    + + + + +
    +
    +
    +
    + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    背景

    编译c++ 时候需要了解编译过程

    +

    cmake 添加c++ flag

    当我们用g++编译一个程序的时候,经常会有些警告或者error。这时候,会弹出对应的警告和error,举例:

    +
    1
    error: unused variable 'productSize' [-Werror,-Wunused-variable]
    + +

    你会看到-Werror,-Wunused-variable , 意思是因为这个选项导致error,其实是我有定义了但是没有使用的变量。

    +

    如果我们想关闭,可以添加-Wno-unused-variable 也就是在原来的报错的-W-xxx 改成-Wno-xxx 即可

    +

    我们可以在add_compile_options 添加对应的编译flag

    +
    1
    add_compile_options(-Wall -Wextra -pedantic -Werror  -Wno-unused-variable)
    + +
    + + + + +
    +
    +
    +
    + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    背景

    想了解jdk的的实现和php有什么不一样。

    +

    堆栈

    jdk 会将opcode 生成对应的汇编代码,生成汇编的代码如下:

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    (gdb) where
    #0 AbstractAssembler::emit_int8 (this=0x7ffff00198a0, x=-64 '\300') at /home/ubuntu/jdk/src/hotspot/share/asm/assembler.hpp:286
    #1 0x00007ffff62513e9 in Assembler::emit_arith (this=0x7ffff00198a0, op1=133, op2=192, dst=0x0, src=0x0) at /home/ubuntu/jdk/src/hotspot/cpu/x86/assembler_x86.cpp:300
    #2 0x00007ffff6284cf3 in Assembler::testq (this=0x7ffff00198a0, dst=0x0, src=0x0) at /home/ubuntu/jdk/src/hotspot/cpu/x86/assembler_x86.cpp:9191
    #3 0x00007ffff6bcd75b in MacroAssembler::testptr (this=0x7ffff00198a0, dst=0x0, src=0x0) at /home/ubuntu/jdk/src/hotspot/cpu/x86/macroAssembler_x86.cpp:4072
    #4 0x00007ffff6f5b67f in StubGenerator::generate_forward_exception (this=0x7ffff5b68890) at /home/ubuntu/jdk/src/hotspot/cpu/x86/stubGenerator_x86_64.cpp:537
    #5 0x00007ffff6f7dd2a in StubGenerator::generate_initial (this=0x7ffff5b68890) at /home/ubuntu/jdk/src/hotspot/cpu/x86/stubGenerator_x86_64.cpp:5752
    #6 0x00007ffff6f7e9db in StubGenerator::StubGenerator (this=0x7ffff5b68890, code=0x7ffff5b68940, all=false)
    at /home/ubuntu/jdk/src/hotspot/cpu/x86/stubGenerator_x86_64.cpp:5994
    #7 0x00007ffff6f589d3 in StubGenerator_generate (code=0x7ffff5b68940, all=false) at /home/ubuntu/jdk/src/hotspot/cpu/x86/stubGenerator_x86_64.cpp:6000
    #8 0x00007ffff6f7eae9 in StubRoutines::initialize1 () at /home/ubuntu/jdk/src/hotspot/share/runtime/stubRoutines.cpp:195
    #9 0x00007ffff6f7fb77 in stubRoutines_init1 () at /home/ubuntu/jdk/src/hotspot/share/runtime/stubRoutines.cpp:374
    #10 0x00007ffff686a610 in init_globals () at /home/ubuntu/jdk/src/hotspot/share/runtime/init.cpp:112
    #11 0x00007ffff700e2fd in Threads::create_vm (args=0x7ffff5b68e20, canTryAgain=0x7ffff5b68d2b) at /home/ubuntu/jdk/src/hotspot/share/runtime/thread.cpp:3729
    #12 0x00007ffff697a82d in JNI_CreateJavaVM_inner (vm=0x7ffff5b68e78, penv=0x7ffff5b68e80, args=0x7ffff5b68e20) at /home/ubuntu/jdk/src/hotspot/share/prims/jni.cpp:3935
    #13 0x00007ffff697ab47 in JNI_CreateJavaVM (vm=0x7ffff5b68e78, penv=0x7ffff5b68e80, args=0x7ffff5b68e20) at /home/ubuntu/jdk/src/hotspot/share/prims/jni.cpp:4021
    #14 0x00007ffff7fba8a2 in InitializeJVM (pvm=0x7ffff5b68e78, penv=0x7ffff5b68e80, ifn=0x7ffff5b68ed0) at /home/ubuntu/jdk/src/java.base/share/native/libjli/java.c:1529
    #15 0x00007ffff7fb7453 in JavaMain (_args=0x7ffffffface0) at /home/ubuntu/jdk/src/java.base/share/native/libjli/java.c:414
    #16 0x00007ffff7d79609 in start_thread (arg=<optimized out>) at pthread_create.c:477
    #17 0x00007ffff7ed5163 in clone () at ../sysdeps/unix/sysv/linux/x86_64/clone.S:95
    + + +
    + + + + +
    +
    +
    +
    + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    第一步

    拉取代码

    +
    1
    git clone https://github.com/openjdk/jdk.git  
    +
    + + +

    第二步

    本地版本:

    +

    我本地的jdk版本是17

    +
    1
    2
    java -version
    openjdk version "17.0.4" 2022-07-19
    + +

    jdk 只能由上一个版本自举,所以必须要切到下一个大版本 ,我本地是jdk17 ,所以要切到jdk 18

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    ## 切换到jdk18
    git checkout jdk-18+37

    ## 生成makefile

    ./configure --with-debug-level=slowdebug --with-extra-cflags="-Wno-nonnull -Wno-maybe-uninitialized -Wno-free-nonheap-object"
    ## 2个线程同时编译

    make JOBS=2 CONF=linux-x86_64-server-slowdebug
    + + + +

    相关错误

    1
    2
    onfigure: error: Could not find all X11 headers (shape.h Xrender.h Xrandr.h XTest.h Intrinsic.h). You might be able to fix this by running 'sudo apt-get install libx11-dev libxext-dev libxrender-dev libxrandr-dev libxtst-dev libxt-dev'.
    configure exiting with result code 1
    +

    解决方案:

    +
    1
    sudo apt-get install libx11-dev libxext-dev libxrender-dev libxrandr-dev libxtst-dev libxt-dev
    + + + +
    1
    2
    configure: error: Could not find cups! You might be able to fix this by running 'sudo apt-get install libcups2-dev'. 
    configure exiting with result code 1
    +

    解决方案:

    +
    1
    sudo apt-get install libcups2-dev
    +
    + +

    相关阅读

    + + +
    + + + + +
    +
    +
    +
    + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    背景

    java 的spring boot 不少地方用到了利用java 的thread local map 来实现线程变量隔离。
    想要理解java的相关内容。

    +

    实现

    核心就在于每次创建线程的对象实例的时候,在线程的初始化时候会把threadlocal map 创建好 , 每个线程一个自己的map , 从而实现线程隔离

    +

    文件路径在jdk 的这里
    src/java.base/share/classes/java/lang/Thread.java

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    /**
    * Initializes a virtual Thread.
    *
    * @param name thread name, can be null
    * @param characteristics thread characteristics
    * @param bound true when bound to an OS thread
    */
    Thread(String name, int characteristics, boolean bound) {
    ...
    this.inheritableThreadLocals = ThreadLocal.createInheritedMap(parentMap);
    ...
    }

    + +

    threadlocal 初始化

    threadlocal 有多个入口 ,最后都是通过createMap 初始化ThreadlocalMap , 这个map 由Thread 的实例化对象持有,

    +

    核心对象成员: threadLocals . 每个线程自己持有一个map , 这个map的entry是一个weakreference

    +
    1
    2
    3
    /* ThreadLocal values pertaining to this thread. This map is maintained
    * by the ThreadLocal class. */
    ThreadLocal.ThreadLocalMap threadLocals = null;
    + +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    /**
    * Sets the current thread's copy of this thread-local variable
    * to the specified value. Most subclasses will have no need to
    * override this method, relying solely on the {@link #initialValue}
    * method to set the values of thread-locals.
    *
    * @param value the value to be stored in the current thread's copy of
    * this thread-local.
    */
    public void set(T value) {
    Thread t = Thread.currentThread();
    ThreadLocalMap map = getMap(t);
    if (map != null) {
    map.set(this, value);
    } else {
    createMap(t, value); // 初始化
    }
    }
    + +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    /**
    * Variant of set() to establish initialValue. Used instead
    * of set() in case user has overridden the set() method.
    *
    * @return the initial value
    */
    private T setInitialValue() {
    T value = initialValue();
    Thread t = Thread.currentThread();
    ThreadLocalMap map = getMap(t);
    if (map != null) {
    map.set(this, value);
    } else {
    createMap(t, value); // 初始化
    }
    if (this instanceof TerminatingThreadLocal) {
    TerminatingThreadLocal.register((TerminatingThreadLocal<?>) this);
    }
    return value;
    + +

    每个thread 持有一个threadLocals 对象

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    /**
    * Get the map associated with a ThreadLocal. Overridden in
    * InheritableThreadLocal.
    *
    * @param t the current thread
    * @return the map
    */
    ThreadLocalMap getMap(Thread t) {
    return t.threadLocals;
    }
    + + +

    相关阅读

    + + +
    + + + + +
    +
    +
    +
    + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    背景

    autowired 默认都是一般都是单例,为什么httpServerletRequest是做到不是单例呢?

    +

    堆栈:

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    38
    39
    40
    41
    42
    43
    getRequestAttributes:105, RequestContextHolder (org.springframework.web.context.request)
    processRequest:997, FrameworkServlet (org.springframework.web.servlet)
    doPost:909, FrameworkServlet (org.springframework.web.servlet)
    service:652, HttpServlet (javax.servlet.http)
    service:883, FrameworkServlet (org.springframework.web.servlet)
    service:733, HttpServlet (javax.servlet.http)
    internalDoFilter:227, ApplicationFilterChain (org.apache.catalina.core)
    doFilter:162, ApplicationFilterChain (org.apache.catalina.core)
    doFilter:53, WsFilter (org.apache.tomcat.websocket.server)
    internalDoFilter:189, ApplicationFilterChain (org.apache.catalina.core)
    doFilter:162, ApplicationFilterChain (org.apache.catalina.core)
    doFilterInternal:100, RequestContextFilter (org.springframework.web.filter)
    doFilter:119, OncePerRequestFilter (org.springframework.web.filter)
    internalDoFilter:189, ApplicationFilterChain (org.apache.catalina.core)
    doFilter:162, ApplicationFilterChain (org.apache.catalina.core)
    doFilterInternal:93, FormContentFilter (org.springframework.web.filter)
    doFilter:119, OncePerRequestFilter (org.springframework.web.filter)
    internalDoFilter:189, ApplicationFilterChain (org.apache.catalina.core)
    doFilter:162, ApplicationFilterChain (org.apache.catalina.core)
    doFilterInternal:97, WebMvcMetricsFilter (org.springframework.boot.actuate.metrics.web.servlet)
    doFilter:119, OncePerRequestFilter (org.springframework.web.filter)
    internalDoFilter:189, ApplicationFilterChain (org.apache.catalina.core)
    doFilter:162, ApplicationFilterChain (org.apache.catalina.core)
    doFilterInternal:201, CharacterEncodingFilter (org.springframework.web.filter)
    doFilter:119, OncePerRequestFilter (org.springframework.web.filter)
    internalDoFilter:189, ApplicationFilterChain (org.apache.catalina.core)
    doFilter:162, ApplicationFilterChain (org.apache.catalina.core)
    invoke:202, StandardWrapperValve (org.apache.catalina.core)
    invoke:97, StandardContextValve (org.apache.catalina.core)
    invoke:542, AuthenticatorBase (org.apache.catalina.authenticator)
    invoke:143, StandardHostValve (org.apache.catalina.core)
    invoke:92, ErrorReportValve (org.apache.catalina.valves)
    invoke:78, StandardEngineValve (org.apache.catalina.core)
    service:357, CoyoteAdapter (org.apache.catalina.connector)
    service:374, Http11Processor (org.apache.coyote.http11)
    process:65, AbstractProcessorLight (org.apache.coyote)
    process:893, AbstractProtocol$ConnectionHandler (org.apache.coyote)
    doRun:1707, NioEndpoint$SocketProcessor (org.apache.tomcat.util.net)
    run:49, SocketProcessorBase (org.apache.tomcat.util.net)
    runWorker:1128, ThreadPoolExecutor (java.util.concurrent)
    run:628, ThreadPoolExecutor$Worker (java.util.concurrent)
    run:61, TaskThread$WrappingRunnable (org.apache.tomcat.util.threads)
    run:834, Thread (java.lang)
    + + +

    相关阅读

    + + +
    + + + + +
    +
    +
    +
    + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    背景

    lucene 搜索的结果搜索经过soccer算出分数之后,还需要topK取前几个数据,所以需要使用到topk的算法。
    一般用优先队列实现。

    +

    介绍

    下面都是描述最大优先队列

    +

    优先队列分为两个,一个是最小优先,一个是最大优先。其实就是方向改变而已。

    +

    我们先介绍他的性质:

    +

    组成 : 优先队列是item集合S 。每个item 包含两个内容:element 和key

    +

    操作

    +
      +
    • insert(S , item)
    • +
    • maxnum(S)
    • +
    • extract_max(S)
    • +
    • increase_key (S,element,key) 将优先队列里面
    • +
    +

    证明

    对于一个非空满二叉树,第一个节点编号是index=1
    则对每个节点index :

    +
      +
    • 他的左节点left=index *2
    • +
    • right = index *2 +1
    • +
    +

    证明:
    归纳法
    init:
    当index= 1 时, left = 2 , 满足left = index*2
    当index=1 时,right = 3 , 满足 right = index*2 +1

    +

    deduction:
    n+1 元素:
    如果他是左节点 , 则他的前一个节点满足 n = (pre_parent *2 +1)
    对于n+1 个元素 , n+1 = (pre_parent *2 +1) +1 = (pre_parent +1 )*2
    即满足递推公式

    +

    同理右节点同理

    +

    所以证明完毕

    +

    相关论文

    算法导论

    + + +
    + + + + +
    +
    +
    +
    + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    背景

    了解java 的method reference

    +

    介绍

    java method reference
    在上述文档的:15.13 Method Reference Expressions 这一个小节有介绍

    +

    java method reference 是java的一个表达式, 表达式经过求值,会得到一个对象.
    那么method reference结果是一个functional interface type

    +
    1
    2
    Evaluation of a method reference expression produces an instance of a functional
    interface type
    + + +
    + + + + +
    +
    +
    +
    + + + + + + + + + + +
    + + + + +
    + + + + + + + + +
    +
    + +
    + +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/page/12/index.html b/page/12/index.html new file mode 100644 index 0000000000..77111e235b --- /dev/null +++ b/page/12/index.html @@ -0,0 +1,1221 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + dinosaur + + + + + + + + + + + + +
    +
    + +
    +
    + + + + + +
    + + + + + + + + +
    + +
    + +
    +
    + + +
    + + 0% +
    + + +
    +
    +
    + + +
    + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    背景

    记录java的一些简单的语法

    +

    static 块

    static block

    +
    1
    2
    3
    4
    5
    8.7 Static Initializers
    A static initializer declared in a class is executed when the class is initialized
    (§12.4.2). Together with any field initializers for class variables (§8.3.2), static
    initializers may be used to initialize the class variables of the class.

    +

    static 块会在类加载之后回调,在对象实例化之前

    + + +
    + + + + +
    +
    +
    +
    + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    背景

    tim文件是lucene 存储词相关统计信息的文件.
    与它相关的还有tip文件

    +

    格式和例子

    文件格式:

    可以从最下面的相关阅读可以获取对应的文档

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    TermsDict (.tim) --> Header, PostingsHeader, NodeBlockNumBlocks, Footer
    NodeBlock --> (OuterNode | InnerNode)
    OuterNode --> EntryCount, SuffixLength, ByteSuffixLength, StatsLength, < TermStats >EntryCount, MetaLength, <TermMetadata>EntryCount
    InnerNode --> EntryCount, SuffixLength[,Sub?], ByteSuffixLength, StatsLength, < TermStats ? >EntryCount, MetaLength, <TermMetadata ? >EntryCount
    TermStats --> DocFreq, TotalTermFreq
    Header --> CodecHeader
    EntryCount,SuffixLength,StatsLength,DocFreq,MetaLength --> VInt
    TotalTermFreq --> VLong
    Footer --> CodecFooter
    + +

    例子

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    hexdump -C  _j_Lucene90_0.tim 

    00000000 3f d7 6c 17 12 42 6c 6f 63 6b 54 72 65 65 54 65 |?.l..BlockTreeTe|
    00000010 72 6d 73 44 69 63 74 00 00 00 00 fe ea 80 e6 45 |rmsDict........E|
    00000020 20 d8 56 64 1b 1b 1b 89 70 fe 67 0a 4c 75 63 65 | .Vd....p.g.Luce|
    00000030 6e 65 39 30 5f 30 25 bc 03 61 6d 61 6e 64 62 75 |ne90_0%..amandbu|
    00000040 74 63 61 6e 64 6f 68 65 6c 6c 6f 68 69 69 69 73 |tcandohellohiiis|
    00000050 69 74 6b 6e 6f 77 6d 61 79 6d 6f 6e 67 6f 6e 6f |itknowmaymongono|
    00000060 74 74 72 79 77 68 61 74 77 6f 72 6c 64 79 6f 75 |ttrywhatworldyou|
    00000070 24 02 03 03 03 02 05 02 01 02 02 04 03 05 03 03 |$...............|
    00000080 04 05 03 10 04 00 09 02 01 04 00 03 02 01 01 02 |................|
    00000090 01 07 02 02 26 7a 3d 04 01 02 03 01 01 01 01 01 |....&z=.........| <--- 第六个字节 ,也就是7a开头
    000000a0 05 01 01 01 00 02 04 00 02 01 01 01 01 01 02 01 |................|
    000000b0 01 01 02 01 01 01 01 05 01 03 01 05 a4 03 2f 68 |............../h|
    000000c0 6f 6d 65 2f 75 62 75 6e 74 75 2f 64 6f 63 2f 68 |ome/ubuntu/doc/h|
    000000d0 65 6c 6c 6f 2e 74 78 74 2f 68 6f 6d 65 2f 75 62 |ello.txt/home/ub|
    000000e0 75 6e 74 75 2f 64 6f 63 2f 6d 6f 6e 67 6f 2e 74 |untu/doc/mongo.t|
    000000f0 78 74 05 1a 01 03 04 82 01 01 03 c0 28 93 e8 00 |xt..........(...|
    00000100 00 00 00 00 00 00 00 da 02 a3 a3 |...........|
    +

    这里的ste.in 是tim文件的数据

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    main[2] list
    472 }
    473 }
    474
    475 // metadata
    476 => ste.fr.parent.postingsReader.decodeTerm(bytesReader, ste.fr.fieldInfo, state, absolute);
    477
    478 metaDataUpto++;
    479 absolute = false;
    480 }
    481 state.termBlockOrd = metaDataUpto;
    main[2] print ste.in
    ste.in = "MMapIndexInput(path="/home/ubuntu/index/_j_Lucene90_0.tim")"

    + +

    这里的对应的是

    +
    1
    2
    3
    4
    5
    main[2] dump bytesReader.bytes
    bytesReader.bytes = {
    122, 61, 4, 1, 2, 3, 1, 1, 1, 1, 1, 5, 1, 1, 1, 0, 2, 4, 0, 2, 1, 1, 1, 1, 1, 2, 1, 1, 1, 2, 1, 1, 1, 1, 5, 1, 3, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
    }

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    ```
    hexdump -C _j_Lucene90_0.tim

    00000000 3f d7 6c 17 12 42 6c 6f 63 6b 54 72 65 65 54 65 |?.l..BlockTreeTe|
    00000010 72 6d 73 44 69 63 74 00 00 00 00 fe ea 80 e6 45 |rmsDict........E|
    00000020 20 d8 56 64 1b 1b 1b 89 70 fe 67 0a 4c 75 63 65 | .Vd....p.g.Luce|
    00000030 6e 65 39 30 5f 30 25 bc 03 61 6d 61 6e 64 62 75 |ne90_0%..amandbu|
    00000040 74 63 61 6e 64 6f 68 65 6c 6c 6f 68 69 69 69 73 |tcandohellohiiis|
    00000050 69 74 6b 6e 6f 77 6d 61 79 6d 6f 6e 67 6f 6e 6f |itknowmaymongono|
    00000060 74 74 72 79 77 68 61 74 77 6f 72 6c 64 79 6f 75 |ttrywhatworldyou|
    00000070 24 02 03 03 03 02 05 02 01 02 02 04 03 05 03 03 |$...............|
    00000080 04 05 03 10 04 00 09 02 01 04 00 03 02 01 01 02 |................|
    00000090 01 07 02 02 26 7a 3d 04 01 02 03 01 01 01 01 01 |....&z=.........|
    000000a0 05 01 01 01 00 02 04 00 02 01 01 01 01 01 02 01 |................|
    000000b0 01 01 02 01 01 01 01 05 01 03 01 05 a4 03 2f 68 |............../h|
    000000c0 6f 6d 65 2f 75 62 75 6e 74 75 2f 64 6f 63 2f 68 |ome/ubuntu/doc/h|
    000000d0 65 6c 6c 6f 2e 74 78 74 2f 68 6f 6d 65 2f 75 62 |ello.txt/home/ub|
    000000e0 75 6e 74 75 2f 64 6f 63 2f 6d 6f 6e 67 6f 2e 74 |untu/doc/mongo.t|
    000000f0 78 74 05 1a 01 03 04 82 01 01 03 c0 28 93 e8 00 |xt..........(...|
    00000100 00 00 00 00 00 00 00 da 02 a3 a3 |...........|
    +
    
    +
    +
    +## 相关阅读
    +- [格式文档](https://lucene.apache.org/core/9_0_0/core/org/apache/lucene/codecs/lucene90/blocktree/Lucene90BlockTreeTermsWriter.html)
    +- [tim 格式](https://www.jianshu.com/p/b05eed0da6ad)
    +
    + + +
    + + + + +
    +
    +
    +
    + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    背景

    整理相关经典论文

    +

    数据库

    mvcc

    无敌推荐的数据库事务相关pdf

    +

    执行器相关

    +

    Efficient Query Evaluation using a Two-Level Retrieval
    Process

    +

    topk min/max heap

    +

    parser

    lr(k)

    +

    gc

    Mostly Concurrent Garbage Collectio
    Garbage-First Garbage Collection
    A Fast Write Barrier for Generational Garbage Collectors
    Incremental Collection of Mature Objects*

    +

    database

    The Vertica Analytic Database: C-Store 7 Years Later

    + + +
    + + + + +
    +
    +
    +
    + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    clickhouse 堆栈

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    (lldb) bt
    * thread #3, name = 'HTTPHandler', stop reason = breakpoint 1.1
    * frame #0: 0x000000001d9c6522 clickhouse-server`DB::tryParseQuery(parser=0x00007fff2d3ef620, _out_query_end=0x00007fff2d3ecc60, all_queries_end="\xa5\xa5\xa5\xa5\xa5\xa5\xa5\xa5\xa5\xa5\xa5\xa5\xa5\xa5\xa5\xa5\xa5\xa5ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZtext/plain; charset=UTF-8", out_error_message="", hilite=false, query_description="", allow_multi_statements=false, max_query_size=262144, max_parser_depth=1000) at parseQuery.cpp:237:32
    frame #1: 0x000000001d9c77bd clickhouse-server`DB::parseQueryAndMovePosition(parser=0x00007fff2d3ef620, pos=0x00007fff2d3ecc60, end="\xa5\xa5\xa5\xa5\xa5\xa5\xa5\xa5\xa5\xa5\xa5\xa5\xa5\xa5\xa5\xa5\xa5\xa5ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZtext/plain; charset=UTF-8", query_description="", allow_multi_statements=false, max_query_size=262144, max_parser_depth=1000) at parseQuery.cpp:343:18
    frame #2: 0x000000001d9c7926 clickhouse-server`DB::parseQuery(parser=0x00007fff2d3ef620, begin="show databases\xa5\xa5\xa5\xa5\xa5\xa5\xa5\xa5\xa5\xa5\xa5\xa5\xa5\xa5\xa5\xa5\xa5\xa5ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZtext/plain; charset=UTF-8", end="\xa5\xa5\xa5\xa5\xa5\xa5\xa5\xa5\xa5\xa5\xa5\xa5\xa5\xa5\xa5\xa5\xa5\xa5ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZtext/plain; charset=UTF-8", query_description="", max_query_size=262144, max_parser_depth=1000) at parseQuery.cpp:360:12
    frame #3: 0x000000001b95ec13 clickhouse-server`DB::executeQueryImpl(begin="show databases\xa5\xa5\xa5\xa5\xa5\xa5\xa5\xa5\xa5\xa5\xa5\xa5\xa5\xa5\xa5\xa5\xa5\xa5ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZtext/plain; charset=UTF-8", end="\xa5\xa5\xa5\xa5\xa5\xa5\xa5\xa5\xa5\xa5\xa5\xa5\xa5\xa5\xa5\xa5\xa5\xa5ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZtext/plain; charset=UTF-8", context=std::__1::shared_ptr<DB::Context>::element_type @ 0x00007ffff5b23000 strong=4 weak=5, internal=false, stage=Complete, istr=0x00007fff2403d000) at executeQuery.cpp:442:15
    frame #4: 0x000000001b965181 clickhouse-server`DB::executeQuery(istr=0x00007fff2403d000, ostr=0x00007fff240393d8, allow_into_outfile=false, context=std::__1::shared_ptr<DB::Context>::element_type @ 0x00007ffff5b23000 strong=4 weak=5, set_result_details=DB::SetResultDetailsFunc @ 0x00007fff2d3f0bf0, output_format_settings= Has Value=false )>, std::__1::optional<DB::FormatSettings> const&) at executeQuery.cpp:1136:30
    frame #5: 0x000000001d3b7b17 clickhouse-server`DB::HTTPHandler::processQuery(this=0x00007ffff7154d40, request=0x00007fff2d3f2438, params=0x00007fff2d3f1e10, response=0x00007fff2d3f24f0, used_output=0x00007fff2d3f1ec8, query_scope= Has Value=true ) at HTTPHandler.cpp:822:5
    frame #6: 0x000000001d3ba4fe clickhouse-server`DB::HTTPHandler::handleRequest(this=0x00007ffff7154d40, request=0x00007fff2d3f2438, response=0x00007fff2d3f24f0) at HTTPHandler.cpp:960:9
    frame #7: 0x000000001d3f1e17 clickhouse-server`DB::HTTPServerConnection::run(this=0x00007ffff5b18000) at HTTPServerConnection.cpp:65:34
    frame #8: 0x000000002308e1d9 clickhouse-server`Poco::Net::TCPServerConnection::start(this=0x00007ffff5b18000) at TCPServerConnection.cpp:43:3
    frame #9: 0x000000002308e9e6 clickhouse-server`Poco::Net::TCPServerDispatcher::run(this=0x00007fff12875500) at TCPServerDispatcher.cpp:115:20
    frame #10: 0x00000000232cecf4 clickhouse-server`Poco::PooledThread::run(this=0x00007ffff702df80) at ThreadPool.cpp:199:14
    frame #11: 0x00000000232cb81a clickhouse-server`Poco::(anonymous namespace)::RunnableHolder::run(this=0x00007ffff7001330) at Thread.cpp:55:11
    frame #12: 0x00000000232ca5fe clickhouse-server`Poco::ThreadImpl::runnableEntry(pThread=0x00007ffff702dfb8) at Thread_POSIX.cpp:345:27
    frame #13: 0x00007ffff7df8b43 libc.so.6`start_thread(arg=<unavailable>) at pthread_create.c:442:8
    frame #14: 0x00007ffff7e8aa00 libc.so.6`__clone3 at clone3.S:81

    + + +
    + + + + +
    +
    +
    +
    + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    背景

    我家里的电脑的lucene是10版本的

    +

    创建索引和保存

    1
    2
    3
    4
    ### 断点
    java -agentlib:jdwp=transport=dt_socket,server=y,address=8000 -cp /home/dai/lucene/lucene/demo/build/libs/lucene-demo-10.0.0-SNAPSHOT.jar:/home/dai/lucene/lucene/core/build/libs/lucene-core-10.0.0-SNAPSHOT.jar org.apache.lucene.demo.IndexFiles -docs /home/dai/docs
    ### jdb 调试
    jdb -attach 8000 -sourcepath /home/dai/lucene/lucene/demo/src/java/:/home/dai/lucene/lucene/core/src/java/
    +

    分词

    倒排索引和分词都在这块代码

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    main[1] where
    [1] org.apache.lucene.index.IndexingChain$PerField.invert (IndexingChain.java:1,140)
    [2] org.apache.lucene.index.IndexingChain.processField (IndexingChain.java:729)
    [3] org.apache.lucene.index.IndexingChain.processDocument (IndexingChain.java:620)
    [4] org.apache.lucene.index.DocumentsWriterPerThread.updateDocuments (DocumentsWriterPerThread.java:241)
    [5] org.apache.lucene.index.DocumentsWriter.updateDocuments (DocumentsWriter.java:432)
    [6] org.apache.lucene.index.IndexWriter.updateDocuments (IndexWriter.java:1,531)
    [7] org.apache.lucene.index.IndexWriter.updateDocument (IndexWriter.java:1,816)
    [8] org.apache.lucene.index.IndexWriter.addDocument (IndexWriter.java:1,469)
    [9] org.apache.lucene.demo.IndexFiles.indexDoc (IndexFiles.java:271)
    [10] org.apache.lucene.demo.IndexFiles$1.visitFile (IndexFiles.java:212)
    [11] org.apache.lucene.demo.IndexFiles$1.visitFile (IndexFiles.java:208)
    [12] java.nio.file.Files.walkFileTree (Files.java:2,811)
    [13] java.nio.file.Files.walkFileTree (Files.java:2,882)
    [14] org.apache.lucene.demo.IndexFiles.indexDocs (IndexFiles.java:206)
    [15] org.apache.lucene.demo.IndexFiles.main (IndexFiles.java:157)

    + +
    1
    2
    3
    4
    5
    6
    Step completed: "thread=main", org.apache.lucene.index.TermsHashPerField.add(), line=193 bci=22
    193 int termID = bytesHash.add(termBytes);

    main[1] print termBytes
    termBytes = "[2f 68 6f 6d 65 2f 64 61 69 2f 64 6f 63 73 2f 62 62 62 2e 74 78 74]"

    + +

    invert

    倒排索引,核心是构造一个term=>doc 的映射。比较核心的类是lucene/core/src/java/org/apache/lucene/index/FreqProxTermsWriterPerField.java,这是

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    38
    39
    40
    41
    42
    43
    44
    45
    46
    47
    48
    49
    50
    51
    52
    53
    54
    55
    56
    57
    58
    59
    60
    61
    62
    63
    64
    @Override
    void addTerm(final int termID, final int docID) {
    final FreqProxPostingsArray postings = freqProxPostingsArray;
    assert !hasFreq || postings.termFreqs[termID] > 0;

    if (!hasFreq) {
    assert postings.termFreqs == null;
    if (termFreqAtt.getTermFrequency() != 1) {
    throw new IllegalStateException(
    "field \""
    + getFieldName()
    + "\": must index term freq while using custom TermFrequencyAttribute");
    }
    if (docID != postings.lastDocIDs[termID]) {
    // New document; now encode docCode for previous doc:
    assert docID > postings.lastDocIDs[termID];
    writeVInt(0, postings.lastDocCodes[termID]);
    postings.lastDocCodes[termID] = docID - postings.lastDocIDs[termID];
    postings.lastDocIDs[termID] = docID;
    fieldState.uniqueTermCount++;
    }
    } else if (docID != postings.lastDocIDs[termID]) {
    assert docID > postings.lastDocIDs[termID]
    : "id: " + docID + " postings ID: " + postings.lastDocIDs[termID] + " termID: " + termID;
    // Term not yet seen in the current doc but previously
    // seen in other doc(s) since the last flush

    // Now that we know doc freq for previous doc,
    // write it & lastDocCode
    if (1 == postings.termFreqs[termID]) {
    writeVInt(0, postings.lastDocCodes[termID] | 1);
    } else {
    writeVInt(0, postings.lastDocCodes[termID]);
    writeVInt(0, postings.termFreqs[termID]);
    }

    // Init freq for the current document
    postings.termFreqs[termID] = getTermFreq();
    fieldState.maxTermFrequency =
    Math.max(postings.termFreqs[termID], fieldState.maxTermFrequency);
    postings.lastDocCodes[termID] = (docID - postings.lastDocIDs[termID]) << 1;
    postings.lastDocIDs[termID] = docID;
    if (hasProx) {
    writeProx(termID, fieldState.position);
    if (hasOffsets) {
    postings.lastOffsets[termID] = 0;
    writeOffsets(termID, fieldState.offset);
    }
    } else {
    assert !hasOffsets;
    }
    fieldState.uniqueTermCount++;
    } else {
    postings.termFreqs[termID] = Math.addExact(postings.termFreqs[termID], getTermFreq());
    fieldState.maxTermFrequency =
    Math.max(fieldState.maxTermFrequency, postings.termFreqs[termID]);
    if (hasProx) {
    writeProx(termID, fieldState.position - postings.lastPositions[termID]);
    if (hasOffsets) {
    writeOffsets(termID, fieldState.offset);
    }
    }
    }
    }
    + +

    生成termId

    堆栈

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    main[1] where
    [1] org.apache.lucene.index.TermsHashPerField.initStreamSlices (TermsHashPerField.java:150)
    [2] org.apache.lucene.index.TermsHashPerField.add (TermsHashPerField.java:198)
    [3] org.apache.lucene.index.IndexingChain$PerField.invert (IndexingChain.java:1,224)
    [4] org.apache.lucene.index.IndexingChain.processField (IndexingChain.java:729)
    [5] org.apache.lucene.index.IndexingChain.processDocument (IndexingChain.java:620)
    [6] org.apache.lucene.index.DocumentsWriterPerThread.updateDocuments (DocumentsWriterPerThread.java:241)
    [7] org.apache.lucene.index.DocumentsWriter.updateDocuments (DocumentsWriter.java:432)
    [8] org.apache.lucene.index.IndexWriter.updateDocuments (IndexWriter.java:1,531)
    [9] org.apache.lucene.index.IndexWriter.updateDocument (IndexWriter.java:1,816)
    [10] org.apache.lucene.index.IndexWriter.addDocument (IndexWriter.java:1,469)
    [11] org.apache.lucene.demo.IndexFiles.indexDoc (IndexFiles.java:271)
    [12] org.apache.lucene.demo.IndexFiles$1.visitFile (IndexFiles.java:212)
    [13] org.apache.lucene.demo.IndexFiles$1.visitFile (IndexFiles.java:208)
    [14] java.nio.file.Files.walkFileTree (Files.java:2,811)
    [15] java.nio.file.Files.walkFileTree (Files.java:2,882)
    [16] org.apache.lucene.demo.IndexFiles.indexDocs (IndexFiles.java:206)
    [17] org.apache.lucene.demo.IndexFiles.main (IndexFiles.java:157)

    +
    1
    2
    3
    IntBlockPool intPool,
    ByteBlockPool bytePool,
    ByteBlockPool termBytePool,
    +

    首先介绍intPool这个变量这个变量维护了一个二维数组int buffers[][]和三个偏移量来保存bytePool的偏移量。

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    public final class IntBlockPool {
    ...

    // 类初始化是10 , 后面会自动扩容,核心结构 , 这个二维数组存的是bytePool 偏移量,默认初始化容量是10
    public int[][] buffers = new int[10][];

    // 二维数组偏移量,也就是联合buffers使用 。一般这样用 buffers[bufferUpto+offset]
    private int bufferUpto = -1;
    // 二维数组中的一维数组 , 描述的是最新写入的buffers
    // 举例 buffer = buffers[1];
    public int[] buffer;
    //intUpto 描述的是相对于一维数组的偏移
    public int intUpto = INT_BLOCK_SIZE;
    // 绝对偏移 ,相对于二维数组的偏移 ,有点像计算机里面的相对跳转和绝对跳转
    public int intOffset = -INT_BLOCK_SIZE;
    }
    + +

    然后和intPool一样,bytePooltermBytePool 也是用几个变量加一个二维数组描述

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    public final class ByteBlockPool implements Accountable {
    ...
    // 核心结构,一个二维数组
    public byte[][] buffers = new byte[10][];

    /** index into the buffers array pointing to the current buffer used as the head */
    private int bufferUpto = -1; // Which buffer we are upto
    /** Where we are in head buffer */
    public int byteUpto = BYTE_BLOCK_SIZE;

    /** Current head buffer */
    public byte[] buffer;
    /** Current head offset */
    public int byteOffset = -BYTE_BLOCK_SIZE;
    +

    查询搜索

    断点

    1
    2
    3
    4
    5
    ## 断点
    java -agentlib:jdwp=transport=dt_socket,server=y,address=8000 -cp /home/dai/lucene/lucene/demo/build/libs/lucene-demo-10.0.0-SNAPSHOT.jar:/home/dai/lucene/lucene/core/build/libs/lucene-core-10.0.0-SNAPSHOT.jar:/home/dai/lucene/lucene/queryparser/build/libs/lucene-queryparser-10.0.0-SNAPSHOT.jar org.apache.lucene.demo.SearchFiles

    ## jdb 调试
    jdb -attach 8000 -sourcepath /home/dai/lucene/lucene/demo/src/java/:/home/dai/lucene/lucene/core/src/java/
    + +

    termState描述的是term的统计信息

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    ain[1] print termState
    termState = "TermStates
    state=docFreq=1 totalTermFreq=1 termBlockOrd=2 blockFP=0 docStartFP=63 posStartFP=63 payStartFP=0 lastPosBlockOffset=-1 singletonDocID=6
    "
    main[1] print term
    term = "contents:am"
    main[1] where
    [1] org.apache.lucene.search.TermQuery.createWeight (TermQuery.java:233)
    [2] org.apache.lucene.search.IndexSearcher.createWeight (IndexSearcher.java:894)
    [3] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:686)
    [4] org.apache.lucene.search.IndexSearcher.searchAfter (IndexSearcher.java:532)
    [5] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:542)
    [6] org.apache.lucene.demo.SearchFiles.doPagingSearch (SearchFiles.java:180)
    [7] org.apache.lucene.demo.SearchFiles.main (SearchFiles.java:150)

    + +

    排序

    默认排序是BM25Similarity

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    main[1] where
    [1] org.apache.lucene.search.similarities.BM25Similarity.scorer (BM25Similarity.java:200)
    [2] org.apache.lucene.search.TermQuery$TermWeight.<init> (TermQuery.java:75)
    [3] org.apache.lucene.search.TermQuery.createWeight (TermQuery.java:233)
    [4] org.apache.lucene.search.IndexSearcher.createWeight (IndexSearcher.java:894)
    [5] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:686)
    [6] org.apache.lucene.search.IndexSearcher.searchAfter (IndexSearcher.java:532)
    [7] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:542)
    [8] org.apache.lucene.demo.SearchFiles.doPagingSearch (SearchFiles.java:180)
    [9] org.apache.lucene.demo.SearchFiles.main (SearchFiles.java:150)

    + + +

    核心搜索参数

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    main[1] list
    763 // there is no doc of interest in this reader context
    764 // continue with the following leaf
    765 continue;
    766 }
    767 => BulkScorer scorer = weight.bulkScorer(ctx);
    768 if (scorer != null) {
    769 try {
    770 scorer.score(leafCollector, ctx.reader().getLiveDocs());
    771 } catch (
    772 @SuppressWarnings("unused")
    main[1] where
    [1] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:767)
    [2] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:693)
    [3] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:687)
    [4] org.apache.lucene.search.IndexSearcher.searchAfter (IndexSearcher.java:532)
    [5] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:542)
    [6] org.apache.lucene.demo.SearchFiles.doPagingSearch (SearchFiles.java:180)
    [7] org.apache.lucene.demo.SearchFiles.main (SearchFiles.java:150)

    +

    获取reader

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    Step completed: "thread=main", org.apache.lucene.index.LeafReaderContext.reader(), line=67 bci=0
    67 return reader;

    main[1] print reader
    reader = "_0(10.0.0):c7:[diagnostics={source=flush, os.arch=amd64, java.runtime.version=17.0.3+7-Ubuntu-0ubuntu0.22.04.1, os.version=5.15.0-33-generic, java.vendor=Private Build, os=Linux, timestamp=1656601918836, java.version=17.0.3, java.vm.version=17.0.3+7-Ubuntu-0ubuntu0.22.04.1, lucene.version=10.0.0}]:[attributes={Lucene90StoredFieldsFormat.mode=BEST_SPEED}] :id=c276i3vlaza4c6uumuxapfnvf"
    main[1] where
    [1] org.apache.lucene.index.LeafReaderContext.reader (LeafReaderContext.java:67)
    [2] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:770)
    [3] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:693)
    [4] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:687)
    [5] org.apache.lucene.search.IndexSearcher.searchAfter (IndexSearcher.java:532)
    [6] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:542)
    [7] org.apache.lucene.demo.SearchFiles.doPagingSearch (SearchFiles.java:180)
    [8] org.apache.lucene.demo.SearchFiles.main (SearchFiles.java:150)

    +

    其中的reader 对象

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    main[1] dump reader
    reader = {
    si: instance of org.apache.lucene.index.SegmentCommitInfo(id=1531)
    originalSi: instance of org.apache.lucene.index.SegmentCommitInfo(id=1532)
    metaData: instance of org.apache.lucene.index.LeafMetaData(id=1533)
    liveDocs: null
    hardLiveDocs: null
    numDocs: 7
    core: instance of org.apache.lucene.index.SegmentCoreReaders(id=1534)
    segDocValues: instance of org.apache.lucene.index.SegmentDocValues(id=1535)
    isNRT: false
    docValuesProducer: null
    fieldInfos: instance of org.apache.lucene.index.FieldInfos(id=1536)
    readerClosedListeners: instance of java.util.concurrent.CopyOnWriteArraySet(id=1537)
    readerCacheHelper: instance of org.apache.lucene.index.SegmentReader$1(id=1538)
    coreCacheHelper: instance of org.apache.lucene.index.SegmentReader$2(id=1539)
    $assertionsDisabled: true
    org.apache.lucene.index.LeafReader.readerContext: instance of org.apache.lucene.index.LeafReaderContext(id=1540)
    org.apache.lucene.index.LeafReader.$assertionsDisabled: true
    org.apache.lucene.index.IndexReader.closed: false
    org.apache.lucene.index.IndexReader.closedByChild: false
    org.apache.lucene.index.IndexReader.refCount: instance of java.util.concurrent.atomic.AtomicInteger(id=1541)
    org.apache.lucene.index.IndexReader.parentReaders: instance of java.util.Collections$SynchronizedSet(id=1542)
    }

    + +

    排序:

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    main[1] list
    222
    223 @Override
    224 public int score(LeafCollector collector, Bits acceptDocs, int min, int max)
    225 throws IOException {
    226 => collector.setScorer(scorer);
    227 DocIdSetIterator scorerIterator = twoPhase == null ? iterator : twoPhase.approximation();
    228 DocIdSetIterator competitiveIterator = collector.competitiveIterator();
    229 DocIdSetIterator filteredIterator;
    230 if (competitiveIterator == null) {
    231 filteredIterator = scorerIterator;
    main[1] where
    [1] org.apache.lucene.search.Weight$DefaultBulkScorer.score (Weight.java:226)
    [2] org.apache.lucene.search.BulkScorer.score (BulkScorer.java:38)
    [3] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:770)
    [4] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:693)
    [5] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:687)
    [6] org.apache.lucene.search.IndexSearcher.searchAfter (IndexSearcher.java:532)
    [7] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:542)
    [8] org.apache.lucene.demo.SearchFiles.doPagingSearch (SearchFiles.java:180)
    [9] org.apache.lucene.demo.SearchFiles.main (SearchFiles.java:150)

    + +

    排序

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    38
    39
    40
    41
    private static class SimpleTopScoreDocCollector extends TopScoreDocCollector {

    ...

    @Override
    public LeafCollector getLeafCollector(LeafReaderContext context) throws IOException {
    ...
    return new ScorerLeafCollector() {
    ...
    @Override
    public void collect(int doc) throws IOException {
    float score = scorer.score(); <---- 这里不用传docId 就能获取score ,是因为可以从父类TopScoreDocCollector 获取docId

    // This collector relies on the fact that scorers produce positive values:
    assert score >= 0; // NOTE: false for NaN

    totalHits++;
    hitsThresholdChecker.incrementHitCount();

    if (minScoreAcc != null && (totalHits & minScoreAcc.modInterval) == 0) {
    updateGlobalMinCompetitiveScore(scorer);
    }

    if (score <= pqTop.score) {
    if (totalHitsRelation == TotalHits.Relation.EQUAL_TO) {
    // we just reached totalHitsThreshold, we can start setting the min
    // competitive score now
    updateMinCompetitiveScore(scorer);
    }
    // Since docs are returned in-order (i.e., increasing doc Id), a document
    // with equal score to pqTop.score cannot compete since HitQueue favors
    // documents with lower doc Ids. Therefore reject those docs too.
    return;
    }
    pqTop.doc = doc + docBase;
    pqTop.score = score;
    pqTop = pq.updateTop();
    updateMinCompetitiveScore(scorer);
    }
    };
    }
    + + +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    main[1] print scorer
    scorer = "scorer(weight(contents:am))[org.apache.lucene.search.TermScorer@290dbf45]"
    main[1] dump scorer
    scorer = {
    postingsEnum: instance of org.apache.lucene.index.SlowImpactsEnum(id=1546)
    impactsEnum: instance of org.apache.lucene.index.SlowImpactsEnum(id=1546)
    iterator: instance of org.apache.lucene.search.ImpactsDISI(id=1547)
    docScorer: instance of org.apache.lucene.search.LeafSimScorer(id=1548)
    impactsDisi: instance of org.apache.lucene.search.ImpactsDISI(id=1547)
    $assertionsDisabled: true
    org.apache.lucene.search.Scorer.weight: instance of org.apache.lucene.search.TermQuery$TermWeight(id=1549)
    }
    main[1] where
    [1] org.apache.lucene.search.TopScoreDocCollector$SimpleTopScoreDocCollector$1.collect (TopScoreDocCollector.java:76) <--- 这里没有传doc_id 进去scorer 是因为有个回调, 可以获取doc_id , 这里会有歌pq,是一个排序好的doc
    [2] org.apache.lucene.search.Weight$DefaultBulkScorer.scoreAll (Weight.java:305)
    [3] org.apache.lucene.search.Weight$DefaultBulkScorer.score (Weight.java:247)
    [4] org.apache.lucene.search.BulkScorer.score (BulkScorer.java:38)
    [5] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:770)
    [6] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:693)
    [7] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:687)
    [8] org.apache.lucene.search.IndexSearcher.searchAfter (IndexSearcher.java:532)
    [9] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:542)
    [10] org.apache.lucene.demo.SearchFiles.doPagingSearch (SearchFiles.java:180)
    [11] org.apache.lucene.demo.SearchFiles.main (SearchFiles.java:150)
    main[1]

    + +

    核心算分函数

    排序算分

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    main[1] list
    246 // float. And then monotonicity is preserved through composition via
    247 // x -> 1 + x and x -> 1 - 1/x.
    248 // Finally we expand weight * (1 - 1 / (1 + freq * 1/norm)) to
    249 // weight - weight / (1 + freq * 1/norm), which runs slightly faster.
    250 => float normInverse = cache[((byte) encodedNorm) & 0xFF];
    251 return weight - weight / (1f + freq * normInverse);
    252 }
    253
    254 @Override
    255 public Explanation explain(Explanation freq, long encodedNorm) {
    main[1] where
    [1] org.apache.lucene.search.similarities.BM25Similarity$BM25Scorer.score (BM25Similarity.java:250)
    [2] org.apache.lucene.search.LeafSimScorer.score (LeafSimScorer.java:60)
    [3] org.apache.lucene.search.TermScorer.score (TermScorer.java:75)
    [4] org.apache.lucene.search.TopScoreDocCollector$SimpleTopScoreDocCollector$1.collect (TopScoreDocCollector.java:73)
    [5] org.apache.lucene.search.Weight$DefaultBulkScorer.scoreAll (Weight.java:305)
    [6] org.apache.lucene.search.Weight$DefaultBulkScorer.score (Weight.java:247)
    [7] org.apache.lucene.search.BulkScorer.score (BulkScorer.java:38)
    [8] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:770)
    [9] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:693)
    [10] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:687)
    [11] org.apache.lucene.search.IndexSearcher.searchAfter (IndexSearcher.java:532)
    [12] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:542)
    [13] org.apache.lucene.demo.SearchFiles.doPagingSearch (SearchFiles.java:180)
    [14] org.apache.lucene.demo.SearchFiles.main (SearchFiles.java:150)

    +

    reduce 过程

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    main[1] list
    60 * Populates the results array with the ScoreDoc instances. This can be overridden in case a
    61 * different ScoreDoc type should be returned.
    62 */
    63 protected void populateResults(ScoreDoc[] results, int howMany) {
    64 => for (int i = howMany - 1; i >= 0; i--) {
    65 results[i] = pq.pop();
    66 }
    67 }
    68
    69 /**
    main[1] where
    [1] org.apache.lucene.search.TopDocsCollector.populateResults (TopDocsCollector.java:64)
    [2] org.apache.lucene.search.TopDocsCollector.topDocs (TopDocsCollector.java:166)
    [3] org.apache.lucene.search.TopDocsCollector.topDocs (TopDocsCollector.java:98)
    [4] org.apache.lucene.search.IndexSearcher$2.reduce (IndexSearcher.java:526)
    [5] org.apache.lucene.search.IndexSearcher$2.reduce (IndexSearcher.java:505)
    [6] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:694)
    [7] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:687)
    [8] org.apache.lucene.search.IndexSearcher.searchAfter (IndexSearcher.java:532)
    [9] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:542)
    [10] org.apache.lucene.demo.SearchFiles.doPagingSearch (SearchFiles.java:180)
    [11] org.apache.lucene.demo.SearchFiles.main (SearchFiles.java:150)
    + +

    辅助函数,获取topk的数据内容

    +

    堆栈:

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    main[1] where
    [1] org.apache.lucene.search.TopDocs.mergeAux (TopDocs.java:312)
    [2] org.apache.lucene.search.TopDocs.merge (TopDocs.java:216)
    [3] org.apache.lucene.search.IndexSearcher$2.reduce (IndexSearcher.java:528)
    [4] org.apache.lucene.search.IndexSearcher$2.reduce (IndexSearcher.java:505)
    [5] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:694)
    [6] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:687)
    [7] org.apache.lucene.search.IndexSearcher.searchAfter (IndexSearcher.java:532)
    [8] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:542)
    [9] org.apache.lucene.demo.SearchFiles.doPagingSearch (SearchFiles.java:180)
    [10] org.apache.lucene.demo.SearchFiles.main (SearchFiles.java:150)

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    38
    39
    40
    41
    42
    43
    44
    45
    46
    47
    48
    49
    50
    51
    52
    53
    54
    55
    56
    57
    58
    59
    60
    61
    62
    63
    64
    65
    66
    67
    68
    69
    70
    71
    72
    73
    74
    75
    76
    77
    78
    79
    80
    /**
    * Auxiliary method used by the {@link #merge} impls. A sort value of null is used to indicate
    * that docs should be sorted by score.
    */
    private static TopDocs mergeAux(
    Sort sort, int start, int size, TopDocs[] shardHits, Comparator<ScoreDoc> tieBreaker) {

    final PriorityQueue<ShardRef> queue;
    if (sort == null) {
    queue = new ScoreMergeSortQueue(shardHits, tieBreaker);
    } else {
    queue = new MergeSortQueue(sort, shardHits, tieBreaker);
    }

    long totalHitCount = 0;
    TotalHits.Relation totalHitsRelation = TotalHits.Relation.EQUAL_TO;
    int availHitCount = 0;
    for (int shardIDX = 0; shardIDX < shardHits.length; shardIDX++) {
    final TopDocs shard = shardHits[shardIDX];
    // totalHits can be non-zero even if no hits were
    // collected, when searchAfter was used:
    totalHitCount += shard.totalHits.value;
    // If any hit count is a lower bound then the merged
    // total hit count is a lower bound as well
    if (shard.totalHits.relation == TotalHits.Relation.GREATER_THAN_OR_EQUAL_TO) {
    totalHitsRelation = TotalHits.Relation.GREATER_THAN_OR_EQUAL_TO;
    }
    if (shard.scoreDocs != null && shard.scoreDocs.length > 0) {
    availHitCount += shard.scoreDocs.length;
    queue.add(new ShardRef(shardIDX));
    }
    }

    final ScoreDoc[] hits;
    boolean unsetShardIndex = false;
    if (availHitCount <= start) {
    hits = new ScoreDoc[0];
    } else {
    hits = new ScoreDoc[Math.min(size, availHitCount - start)];
    int requestedResultWindow = start + size;
    int numIterOnHits = Math.min(availHitCount, requestedResultWindow);
    int hitUpto = 0;
    while (hitUpto < numIterOnHits) {
    assert queue.size() > 0;
    ShardRef ref = queue.top();
    final ScoreDoc hit = shardHits[ref.shardIndex].scoreDocs[ref.hitIndex++];

    // Irrespective of whether we use shard indices for tie breaking or not, we check for
    // consistent
    // order in shard indices to defend against potential bugs
    if (hitUpto > 0) {
    if (unsetShardIndex != (hit.shardIndex == -1)) {
    throw new IllegalArgumentException("Inconsistent order of shard indices");
    }
    }

    unsetShardIndex |= hit.shardIndex == -1;

    if (hitUpto >= start) {
    hits[hitUpto - start] = hit;
    }

    hitUpto++;

    if (ref.hitIndex < shardHits[ref.shardIndex].scoreDocs.length) {
    // Not done with this these TopDocs yet:
    queue.updateTop();
    } else {
    queue.pop();
    }
    }
    }

    TotalHits totalHits = new TotalHits(totalHitCount, totalHitsRelation);
    if (sort == null) {
    return new TopDocs(totalHits, hits);
    } else {
    return new TopFieldDocs(totalHits, hits, sort.getSort());
    }
    }
    + + + +

    通过docid 获取对应的文档

    1
    2
    3
    4
    fieldsStream.seek(startPointer);
    decompressor.decompress(fieldsStream, totalLength, offset, length, bytes);
    assert bytes.length == length;
    documentInput = new ByteArrayDataInput(bytes.bytes, bytes.offset, bytes.length);
    + +

    堆栈:

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    main[1] where
    [1] org.apache.lucene.store.ByteBufferIndexInput$SingleBufferImpl.seek (ByteBufferIndexInput.java:576)
    [2] org.apache.lucene.codecs.lucene90.compressing.Lucene90CompressingStoredFieldsReader$BlockState.document (Lucene90CompressingStoredFieldsReader.java:594)
    [3] org.apache.lucene.codecs.lucene90.compressing.Lucene90CompressingStoredFieldsReader.document (Lucene90CompressingStoredFieldsReader.java:610)
    [4] org.apache.lucene.codecs.lucene90.compressing.Lucene90CompressingStoredFieldsReader.visitDocument (Lucene90CompressingStoredFieldsReader.java:628)
    [5] org.apache.lucene.index.CodecReader.document (CodecReader.java:89)
    [6] org.apache.lucene.index.BaseCompositeReader.document (BaseCompositeReader.java:154)
    [7] org.apache.lucene.index.IndexReader.document (IndexReader.java:380)
    [8] org.apache.lucene.search.IndexSearcher.doc (IndexSearcher.java:380)
    [9] org.apache.lucene.demo.SearchFiles.doPagingSearch (SearchFiles.java:214)
    [10] org.apache.lucene.demo.SearchFiles.main (SearchFiles.java:150)


    + + +

    mmap加载文件到内存:

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    Breakpoint hit: "thread=main", org.apache.lucene.store.ByteBufferIndexInput.setCurBuf(), line=86 bci=0
    86 this.curBuf = curBuf;

    main[1] where
    [1] org.apache.lucene.store.ByteBufferIndexInput.setCurBuf (ByteBufferIndexInput.java:86)
    [2] org.apache.lucene.store.ByteBufferIndexInput$SingleBufferImpl.<init> (ByteBufferIndexInput.java:556)
    [3] org.apache.lucene.store.ByteBufferIndexInput.newInstance (ByteBufferIndexInput.java:63)
    [4] org.apache.lucene.store.MMapDirectory.openInput (MMapDirectory.java:238)
    [5] org.apache.lucene.store.Directory.openChecksumInput (Directory.java:152)
    [6] org.apache.lucene.index.SegmentInfos.readCommit (SegmentInfos.java:290)
    [7] org.apache.lucene.index.StandardDirectoryReader$1.doBody (StandardDirectoryReader.java:88)
    [8] org.apache.lucene.index.StandardDirectoryReader$1.doBody (StandardDirectoryReader.java:77)
    [9] org.apache.lucene.index.SegmentInfos$FindSegmentsFile.run (SegmentInfos.java:798)
    [10] org.apache.lucene.index.StandardDirectoryReader.open (StandardDirectoryReader.java:109)
    [11] org.apache.lucene.index.StandardDirectoryReader.open (StandardDirectoryReader.java:67)
    [12] org.apache.lucene.index.DirectoryReader.open (DirectoryReader.java:60)
    [13] org.apache.lucene.demo.SearchFiles.main (SearchFiles.java:105)

    + +

    很明显,打开文件是在org.apache.lucene.store.MMapDirectory.openInput 这个类实现就是打开文件。

    +

    先打开文件segments_1

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    main[1] print name
    name = "segments_1"
    main[1] list
    228
    229 /** Creates an IndexInput for the file with the given name. */
    230 @Override
    231 public IndexInput openInput(String name, IOContext context) throws IOException {
    232 => ensureOpen();
    233 ensureCanRead(name);
    234 Path path = directory.resolve(name);
    235 try (FileChannel c = FileChannel.open(path, StandardOpenOption.READ)) {
    236 final String resourceDescription = "MMapIndexInput(path=\"" + path.toString() + "\")";
    237 final boolean useUnmap = getUseUnmap();
    main[1]
    + + +

    举例读取字符串:

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    private static void readField(DataInput in, StoredFieldVisitor visitor, FieldInfo info, int bits)
    throws IOException {
    switch (bits & TYPE_MASK) {
    case BYTE_ARR:
    int length = in.readVInt();
    byte[] data = new byte[length];
    in.readBytes(data, 0, length);
    visitor.binaryField(info, data);
    break;
    case STRING:
    visitor.stringField(info, in.readString());
    break;
    case NUMERIC_INT:
    visitor.intField(info, in.readZInt());
    break;
    case NUMERIC_FLOAT:
    visitor.floatField(info, readZFloat(in));
    break;
    case NUMERIC_LONG:
    visitor.longField(info, readTLong(in));
    break;
    case NUMERIC_DOUBLE:
    visitor.doubleField(info, readZDouble(in));
    break;
    default:
    throw new AssertionError("Unknown type flag: " + Integer.toHexString(bits));
    }
    }
    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    main[1] where
    [1] org.apache.lucene.codecs.lucene90.compressing.Lucene90CompressingStoredFieldsReader.readField (Lucene90CompressingStoredFieldsReader.java:246)
    [2] org.apache.lucene.codecs.lucene90.compressing.Lucene90CompressingStoredFieldsReader.visitDocument (Lucene90CompressingStoredFieldsReader.java:640)
    [3] org.apache.lucene.index.CodecReader.document (CodecReader.java:89)
    [4] org.apache.lucene.index.BaseCompositeReader.document (BaseCompositeReader.java:154)
    [5] org.apache.lucene.index.IndexReader.document (IndexReader.java:380)
    [6] org.apache.lucene.search.IndexSearcher.doc (IndexSearcher.java:380)
    [7] org.apache.lucene.demo.SearchFiles.doPagingSearch (SearchFiles.java:214)
    [8] org.apache.lucene.demo.SearchFiles.main (SearchFiles.java:150)
    main[1]

    + + + +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    main[1] list
    66 }
    67
    68 @Override
    69 public void stringField(FieldInfo fieldInfo, String value) throws IOException {
    70 => final FieldType ft = new FieldType(TextField.TYPE_STORED);
    71 ft.setStoreTermVectors(fieldInfo.hasVectors());
    72 ft.setOmitNorms(fieldInfo.omitsNorms());
    73 ft.setIndexOptions(fieldInfo.getIndexOptions());
    74 doc.add(
    75 new StoredField(
    main[1] print value
    value = "/home/dai/docs/aaa.txt"
    main[1] where
    [1] org.apache.lucene.document.DocumentStoredFieldVisitor.stringField (DocumentStoredFieldVisitor.java:70)
    [2] org.apache.lucene.codecs.lucene90.compressing.Lucene90CompressingStoredFieldsReader.readField (Lucene90CompressingStoredFieldsReader.java:246)
    [3] org.apache.lucene.codecs.lucene90.compressing.Lucene90CompressingStoredFieldsReader.visitDocument (Lucene90CompressingStoredFieldsReader.java:640)
    [4] org.apache.lucene.index.CodecReader.document (CodecReader.java:89)
    [5] org.apache.lucene.index.BaseCompositeReader.document (BaseCompositeReader.java:154)
    [6] org.apache.lucene.index.IndexReader.document (IndexReader.java:380)
    [7] org.apache.lucene.search.IndexSearcher.doc (IndexSearcher.java:380)
    [8] org.apache.lucene.demo.SearchFiles.doPagingSearch (SearchFiles.java:214)
    [9] org.apache.lucene.demo.SearchFiles.main (SearchFiles.java:150)

    + + +

    将读到的string 加载到doc对象里面

    这是核心函数 , mmap 读取文件,然后seek 算出偏移和长度 ,从文件中读取出来并构造成对象

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32

    /**
    * Get the serialized representation of the given docID. This docID has to be contained in the
    * current block.
    */
    SerializedDocument document(int docID) throws IOException {
    if (contains(docID) == false) {
    throw new IllegalArgumentException();
    }

    final int index = docID - docBase;
    final int offset = Math.toIntExact(offsets[index]);
    final int length = Math.toIntExact(offsets[index + 1]) - offset;
    final int totalLength = Math.toIntExact(offsets[chunkDocs]);
    final int numStoredFields = Math.toIntExact(this.numStoredFields[index]);

    final BytesRef bytes;
    if (merging) {
    bytes = this.bytes;
    } else {
    bytes = new BytesRef();
    }
    ...
    fieldsStream.seek(startPointer); // 计算偏移量
    decompressor.decompress(fieldsStream, totalLength, offset, length, bytes); // 解压内容
    assert bytes.length == length;
    documentInput = new ByteArrayDataInput(bytes.bytes, bytes.offset, bytes.length); // 将内容塞到对象里面
    }

    return new SerializedDocument(documentInput, length, numStoredFields);
    }
    }
    + +

    获取doc

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    Breakpoint hit: "thread=main", org.apache.lucene.codecs.lucene90.Lucene90PostingsReader$BlockDocsEnum.advance(), line=498 bci=0
    498 if (docFreq > BLOCK_SIZE && target > nextSkipDoc) {

    main[1] where
    [1] org.apache.lucene.codecs.lucene90.Lucene90PostingsReader$BlockDocsEnum.advance (Lucene90PostingsReader.java:498)
    [2] org.apache.lucene.index.SlowImpactsEnum.advance (SlowImpactsEnum.java:77)
    [3] org.apache.lucene.search.ImpactsDISI.advance (ImpactsDISI.java:128)
    [4] org.apache.lucene.search.ImpactsDISI.nextDoc (ImpactsDISI.java:133)
    [5] org.apache.lucene.search.Weight$DefaultBulkScorer.scoreAll (Weight.java:301)
    [6] org.apache.lucene.search.Weight$DefaultBulkScorer.score (Weight.java:247)
    [7] org.apache.lucene.search.BulkScorer.score (BulkScorer.java:38)
    [8] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:770)
    [9] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:693)
    [10] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:687)
    [11] org.apache.lucene.search.IndexSearcher.searchAfter (IndexSearcher.java:532)
    [12] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:542)
    [13] org.apache.lucene.demo.SearchFiles.doPagingSearch (SearchFiles.java:180)
    [14] org.apache.lucene.demo.SearchFiles.main (SearchFiles.java:150)

    + +

    term query 和遍历

    注意到 ImpactsEnum 实现了iteratorDocId

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    1,138      }
    1,139
    1,140 @Override
    1,141 public ImpactsEnum impacts(int flags) throws IOException {
    1,142 => assert !eof;
    1,143 // if (DEBUG) {
    1,144 // System.out.println("BTTR.docs seg=" + segment);
    1,145 // }
    1,146 currentFrame.decodeMetaData();
    1,147 // if (DEBUG) {
    main[1] where
    [1] org.apache.lucene.codecs.lucene90.blocktree.SegmentTermsEnum.impacts (SegmentTermsEnum.java:1,142)
    [2] org.apache.lucene.search.TermQuery$TermWeight.scorer (TermQuery.java:114)
    [3] org.apache.lucene.search.Weight.bulkScorer (Weight.java:166)
    [4] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:767)
    [5] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:693)
    [6] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:687)
    [7] org.apache.lucene.search.IndexSearcher.searchAfter (IndexSearcher.java:532)
    [8] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:542)
    [9] org.apache.lucene.demo.SearchFiles.doPagingSearch (SearchFiles.java:180)
    [10] org.apache.lucene.demo.SearchFiles.main (SearchFiles.java:150)

    + + +

    注意到PostingsEnum 也有docidIterater

    +

    排序topk

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28

    main[1] where
    [1] org.apache.lucene.util.PriorityQueue.upHeap (PriorityQueue.java:276)
    [2] org.apache.lucene.util.PriorityQueue.add (PriorityQueue.java:161)
    [3] org.apache.lucene.search.TopDocs.mergeAux (TopDocs.java:303)
    [4] org.apache.lucene.search.TopDocs.merge (TopDocs.java:216)
    [5] org.apache.lucene.search.IndexSearcher$2.reduce (IndexSearcher.java:528)
    [6] org.apache.lucene.search.IndexSearcher$2.reduce (IndexSearcher.java:505)
    [7] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:694)
    [8] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:687)
    [9] org.apache.lucene.search.IndexSearcher.searchAfter (IndexSearcher.java:532)
    [10] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:542)
    [11] org.apache.lucene.demo.SearchFiles.doPagingSearch (SearchFiles.java:180)
    [12] org.apache.lucene.demo.SearchFiles.main (SearchFiles.java:150)


    @Override
    public boolean lessThan(ShardRef first, ShardRef second) {
    assert first != second;
    ScoreDoc firstScoreDoc = shardHits[first.shardIndex][first.hitIndex];
    ScoreDoc secondScoreDoc = shardHits[second.shardIndex][second.hitIndex];
    if (firstScoreDoc.score < secondScoreDoc.score) {
    return false;
    } else if (firstScoreDoc.score > secondScoreDoc.score) {
    return true;
    } else {
    return tieBreakLessThan(first, firstScoreDoc, second, secondScoreDoc, tieBreakerComparator);
    }
    + +

    相关阅读

    + + +
    + + + + +
    +
    +
    +
    + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    Storage duration

    这个是描述变量的生命周期的,分为四类

    +
      +
    • automatic: 生命周期在代码块内,在代码块内分配内存,在代码块内析构
    • +
    • static:生命周期是整个程序。分配内存的时机是程序开始前,析构是在程序结束之后(和static 关键词没有太大关系)
    • +
    • thread: 生命周期是线程开始和线程结束
    • +
    • dynamic:动态生命周期,一般是new、malloc一类
    • +
    +

    Linkage

    linkage 描述的是变量可见性,分为三种:

    +
      +
    • no linkage: 当前代码块可见
    • +
    • internal linkage:当前编译单元内可见
    • +
    • external linkage:其他编译单元可见
    • +
    +

    相关阅读

    + + +
    + + + + +
    +
    +
    +
    + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    背景

    需要编译和了解lucene代码

    +

    编译

    因为lucene锁死了版本,所以要切换成jdk17,我本地是jdk18

    +

    clone代码

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    ## clone 代码
    git clone https://github.com/apache/lucene.git

    ### 切换目录
    cd lucene

    ### 编译
    ./gradlew

    ## 如果是翻墙,可以使用代理,这样会快一点
    ## 指定域名和端口
    ./gradlew -DsocksProxyHost=192.168.1.102 -DsocksProxyPort=1081

    + +

    启动和测试

    1
    2
    3
    4
    5
    ### 打包demo
    ./gradlew lucene:demo:jar

    ### 执行demo
    java -cp /home/ubuntu/lucene-9.1.0/lucene/demo/build/classes/java/main:/home/ubuntu/lucene-9.1.0/lucene/core/build/classes/java/main/ org.apache.lucene.demo.IndexFiles -
    + + + + +

    操作系统是ubuntu切换jdk17命令如下:

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    ### 安装jdk17
    sudo apt install openjdk-17-jdk
    # Configure Java 切换java
    sudo update-alternatives --config java

    # Configure Java Compiler 切换javac
    sudo update-alternatives --config javac


    ### 查看切换之后的命令,java 已经是17了
    java --version
    openjdk 17.0.3 2022-04-19
    OpenJDK Runtime Environment (build 17.0.3+7-Ubuntu-0ubuntu0.22.04.1)
    OpenJDK 64-Bit Server VM (build 17.0.3+7-Ubuntu-0ubuntu0.22.04.1, mixed mode, sharing)
    + +

    遇到的错误

    gradle-wrapper.jar 下载不下来,跳过证书:

    +
    1
    wget --no-check-certificate  https://raw.githubusercontent.com/gradle/gradle/v7.3.3/gradle/wrapper/gradle-wrapper.jar
    +

    然后放到{$luceneGitDir}/gradle/wrapper/ 下面 , 这里luceneGitDir 是你的git clone 下来的lucuene 目录

    +

    相关代码

    1
    2
    IndexWriterConfig iwc = new IndexWriterConfig(analyzer);
    iwc.setUseCompoundFile(false); // 生成多个文件
    + + +

    写入header

    对应的jdb调试

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    main[1] stop in  org.apache.lucene.codecs.lucene90.compressing.Lucene90CompressingStoredFieldsWriter:136
    Deferring breakpoint org.apache.lucene.codecs.lucene90.compressing.Lucene90CompressingStoredFieldsWriter:136.
    It will be set after the class is loaded.
    main[1] cont
    > Set deferred breakpoint org.apache.lucene.codecs.lucene90.compressing.Lucene90CompressingStoredFieldsWriter:136

    Breakpoint hit: "thread=main", org.apache.lucene.codecs.lucene90.compressing.Lucene90CompressingStoredFieldsWriter.<init>(), line=136 bci=180
    136 CodecUtil.writeIndexHeader(

    main[1] list
    132
    133 fieldsStream =
    134 directory.createOutput(
    135 IndexFileNames.segmentFileName(segment, segmentSuffix, FIELDS_EXTENSION), context);
    136 => CodecUtil.writeIndexHeader(
    137 fieldsStream, formatName, VERSION_CURRENT, si.getId(), segmentSuffix);
    138 assert CodecUtil.indexHeaderLength(formatName, segmentSuffix)
    139 == fieldsStream.getFilePointer();
    140
    141 indexWriter =
    main[1] print formatName
    formatName = "Lucene90StoredFieldsFastData"
    +

    对应堆栈

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    [1] org.apache.lucene.store.OutputStreamIndexOutput.writeByte (OutputStreamIndexOutput.java:54)
    [2] org.apache.lucene.codecs.CodecUtil.writeBEInt (CodecUtil.java:653)
    [3] org.apache.lucene.codecs.CodecUtil.writeHeader (CodecUtil.java:82)
    [4] org.apache.lucene.codecs.CodecUtil.writeIndexHeader (CodecUtil.java:125)
    [5] org.apache.lucene.codecs.lucene90.compressing.Lucene90CompressingStoredFieldsWriter.<init> (Lucene90CompressingStoredFieldsWriter.java:128)
    [6] org.apache.lucene.codecs.lucene90.compressing.Lucene90CompressingStoredFieldsFormat.fieldsWriter (Lucene90CompressingStoredFieldsFormat.java:140)
    [7] org.apache.lucene.codecs.lucene90.Lucene90StoredFieldsFormat.fieldsWriter (Lucene90StoredFieldsFormat.java:154)
    [8] org.apache.lucene.index.StoredFieldsConsumer.initStoredFieldsWriter (StoredFieldsConsumer.java:49)
    [9] org.apache.lucene.index.StoredFieldsConsumer.startDocument (StoredFieldsConsumer.java:56)
    [10] org.apache.lucene.index.IndexingChain.startStoredFields (IndexingChain.java:556)
    [11] org.apache.lucene.index.IndexingChain.processDocument (IndexingChain.java:587)
    [12] org.apache.lucene.index.DocumentsWriterPerThread.updateDocuments (DocumentsWriterPerThread.java:241)
    [13] org.apache.lucene.index.DocumentsWriter.updateDocuments (DocumentsWriter.java:432)
    [14] org.apache.lucene.index.IndexWriter.updateDocuments (IndexWriter.java:1,531)
    [15] org.apache.lucene.index.IndexWriter.updateDocument (IndexWriter.java:1,816)
    [16] org.apache.lucene.index.IndexWriter.addDocument (IndexWriter.java:1,469)
    [17] org.apache.lucene.demo.IndexFiles.indexDoc (IndexFiles.java:271)
    [18] org.apache.lucene.demo.IndexFiles$1.visitFile (IndexFiles.java:212)
    [19] org.apache.lucene.demo.IndexFiles$1.visitFile (IndexFiles.java:208)
    [20] java.nio.file.Files.walkFileTree (Files.java:2,725)
    [21] java.nio.file.Files.walkFileTree (Files.java:2,797)
    [22] org.apache.lucene.demo.IndexFiles.indexDocs (IndexFiles.java:206)
    [23] org.apache.lucene.demo.IndexFiles.main (IndexFiles.java:157)

    + +

    倒排索引

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    main[1] where
    [1] org.apache.lucene.index.TermsHashPerField.initStreamSlices (TermsHashPerField.java:150)
    [2] org.apache.lucene.index.TermsHashPerField.add (TermsHashPerField.java:198)
    [3] org.apache.lucene.index.IndexingChain$PerField.invert (IndexingChain.java:1,224)
    [4] org.apache.lucene.index.IndexingChain.processField (IndexingChain.java:729)
    [5] org.apache.lucene.index.IndexingChain.processDocument (IndexingChain.java:620)
    [6] org.apache.lucene.index.DocumentsWriterPerThread.updateDocuments (DocumentsWriterPerThread.java:241)
    [7] org.apache.lucene.index.DocumentsWriter.updateDocuments (DocumentsWriter.java:432)
    [8] org.apache.lucene.index.IndexWriter.updateDocuments (IndexWriter.java:1,531)
    [9] org.apache.lucene.index.IndexWriter.updateDocument (IndexWriter.java:1,816)
    [10] org.apache.lucene.demo.IndexFiles.indexDoc (IndexFiles.java:277)
    [11] org.apache.lucene.demo.IndexFiles$1.visitFile (IndexFiles.java:212)
    [12] org.apache.lucene.demo.IndexFiles$1.visitFile (IndexFiles.java:208)
    [13] java.nio.file.Files.walkFileTree (Files.java:2,725)
    [14] java.nio.file.Files.walkFileTree (Files.java:2,797)
    [15] org.apache.lucene.demo.IndexFiles.indexDocs (IndexFiles.java:206)
    [16] org.apache.lucene.demo.IndexFiles.main (IndexFiles.java:157)

    + +

    写入内容

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    main[1] where
    [1] org.apache.lucene.codecs.lucene90.compressing.Lucene90CompressingStoredFieldsWriter.writeField (Lucene90CompressingStoredFieldsWriter.java:276)
    [2] org.apache.lucene.index.StoredFieldsConsumer.writeField (StoredFieldsConsumer.java:65)
    [3] org.apache.lucene.index.IndexingChain.processField (IndexingChain.java:749)
    [4] org.apache.lucene.index.IndexingChain.processDocument (IndexingChain.java:620)
    [5] org.apache.lucene.index.DocumentsWriterPerThread.updateDocuments (DocumentsWriterPerThread.java:241)
    [6] org.apache.lucene.index.DocumentsWriter.updateDocuments (DocumentsWriter.java:432)
    [7] org.apache.lucene.index.IndexWriter.updateDocuments (IndexWriter.java:1,531)
    [8] org.apache.lucene.index.IndexWriter.updateDocument (IndexWriter.java:1,816)
    [9] org.apache.lucene.index.IndexWriter.addDocument (IndexWriter.java:1,469)
    [10] org.apache.lucene.demo.IndexFiles.indexDoc (IndexFiles.java:271)
    [11] org.apache.lucene.demo.IndexFiles$1.visitFile (IndexFiles.java:212)
    [12] org.apache.lucene.demo.IndexFiles$1.visitFile (IndexFiles.java:208)
    [13] java.nio.file.Files.walkFileTree (Files.java:2,725)
    [14] java.nio.file.Files.walkFileTree (Files.java:2,797)
    [15] org.apache.lucene.demo.IndexFiles.indexDocs (IndexFiles.java:206)
    [16] org.apache.lucene.demo.IndexFiles.main (IndexFiles.java:157)

    + +

    查看fdt文件

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    hexdump -C _0.fdt
    00000000 3f d7 6c 17 1c 4c 75 63 65 6e 65 39 30 53 74 6f |?.l..Lucene90Sto|
    00000010 72 65 64 46 69 65 6c 64 73 46 61 73 74 44 61 74 |redFieldsFastDat|
    00000020 61 00 00 00 01 85 88 12 2b 0c 73 6b 95 30 38 76 |a.......+.sk.08v|
    00000030 c9 0a 2a 52 29 00 00 0a 00 01 00 1c 02 06 03 07 |..*R)...........|
    00000040 07 07 07 07 07 07 07 07 20 00 1a 60 2f 68 6f 6d |........ ..`/hom|
    00000050 65 2f 60 75 62 75 6e 74 75 60 2f 64 6f 63 2f 6d |e/`ubuntu`/doc/m|
    00000060 60 6f 6e 67 6f 2e 74 60 78 74 00 1a 2f 68 60 6f |`ongo.t`xt../h`o|
    00000070 6d 65 2f 75 62 60 75 6e 74 75 2f 64 60 6f 63 2f |me/ub`untu/d`oc/|
    00000080 68 65 6c 60 6c 6f 2e 74 78 74 c0 28 93 e8 00 00 |hel`lo.txt.(....|
    00000090 00 00 00 00 00 00 c8 75 0a 41 |.......u.A|
    0000009a
    + +

    fdt描述

    然后分析fdt格式:
    [1-4]代表第一个字节到第四个字节

    +

    [1-4]前四位字节是大端的magic number CODEC_MAGIC = 0x3fd76c17
    [5-33] 第五个字节描述字符串长度,后面的[6-33]是具体的字符串,也就是16进制1c也就是10进制的28 , 因为字符串长度是28的字符串Lucene90StoredFieldsFastData
    [34-37]字符串后面是写死的版本大端的1
    [38-53] 16字节用唯一id描述这个文件

    +

    缓冲池

    TermsHashPerField持有三个缓冲池intPool,bytePool,termBytePool

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    TermsHashPerField(
    int streamCount,
    IntBlockPool intPool,
    ByteBlockPool bytePool,
    ByteBlockPool termBytePool,
    Counter bytesUsed,
    TermsHashPerField nextPerField,
    String fieldName,
    IndexOptions indexOptions) {
    this.intPool = intPool;
    this.bytePool = bytePool;
    this.streamCount = streamCount;
    this.fieldName = fieldName;
    this.nextPerField = nextPerField;
    assert indexOptions != IndexOptions.NONE;
    this.indexOptions = indexOptions;
    PostingsBytesStartArray byteStarts = new PostingsBytesStartArray(this, bytesUsed);
    bytesHash = new BytesRefHash(termBytePool, HASH_INIT_SIZE, byteStarts);
    }

    +

    生成term

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    main[1] where
    [1] org.apache.lucene.util.BytesRefHash.add (BytesRefHash.java:247)
    [2] org.apache.lucene.index.TermsHashPerField.add (TermsHashPerField.java:193)
    [3] org.apache.lucene.index.IndexingChain$PerField.invert (IndexingChain.java:1,224)
    [4] org.apache.lucene.index.IndexingChain.processField (IndexingChain.java:729)
    [5] org.apache.lucene.index.IndexingChain.processDocument (IndexingChain.java:620)
    [6] org.apache.lucene.index.DocumentsWriterPerThread.updateDocuments (DocumentsWriterPerThread.java:241)
    [7] org.apache.lucene.index.DocumentsWriter.updateDocuments (DocumentsWriter.java:432)
    [8] org.apache.lucene.index.IndexWriter.updateDocuments (IndexWriter.java:1,531)
    [9] org.apache.lucene.index.IndexWriter.updateDocument (IndexWriter.java:1,816)
    [10] org.apache.lucene.demo.IndexFiles.indexDoc (IndexFiles.java:277)
    [11] org.apache.lucene.demo.IndexFiles$1.visitFile (IndexFiles.java:212)
    [12] org.apache.lucene.demo.IndexFiles$1.visitFile (IndexFiles.java:208)
    [13] java.nio.file.Files.walkFileTree (Files.java:2,725)
    [14] java.nio.file.Files.walkFileTree (Files.java:2,797)
    [15] org.apache.lucene.demo.IndexFiles.indexDocs (IndexFiles.java:206)
    [16] org.apache.lucene.demo.IndexFiles.main (IndexFiles.java:157)

    + +

    arch 查询

    相关阅读

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    main[1] where
    [1] org.apache.lucene.codecs.lucene90.blocktree.SegmentTermsEnum.seekExact (SegmentTermsEnum.java:476)
    [2] org.apache.lucene.index.TermStates.loadTermsEnum (TermStates.java:117)
    [3] org.apache.lucene.index.TermStates.build (TermStates.java:102)
    [4] org.apache.lucene.search.TermQuery.createWeight (TermQuery.java:227)
    [5] org.apache.lucene.search.IndexSearcher.createWeight (IndexSearcher.java:885)
    [6] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:686)
    [7] org.apache.lucene.search.IndexSearcher.searchAfter (IndexSearcher.java:532)
    [8] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:542)
    [9] org.apache.lucene.demo.SearchFiles.doPagingSearch (SearchFiles.java:180)
    [10] org.apache.lucene.demo.SearchFiles.main (SearchFiles.java:150)

    + + + + +

    相关阅读

    + + +
    + + + + +
    +
    +
    +
    + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    背景

    想要熟悉clickhouse的内容。

    +

    实现

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    ## /ClickHouse/src/Functions/isNaN.cpp
    ## 添加一个struct
    struct testtrue
    {
    static constexpr auto name = "testtrue"; ##函数名
    template <typename T>
    static bool execute(const T t) ### 执行回调
    {
    /// Suppression for PVS-Studio.
    return true || t;
    }
    };
    ### 起别名
    using FunctionTestTure = FunctionNumericPredicate<testtrue>;


    void registerFunctionIsNaN(FunctionFactory & factory)
    {
    factory.registerFunction<FunctionIsNaN>();
    factory.registerFunction<FunctionTestTure>(); ### 回调注册这个函数
    }
    + +

    重新编译并调用:

    +
    1
    2
    3
    4
    5
    6
    7
    8
    SELECT testtrue(2)

    Query id: 87e4625d-8b79-4c3a-8153-b333d6a0614f

    ┌─testtrue(2)─┐
    │ 1 │
    └─────────────┘

    + +

    注册路径

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    (lldb) bt
    * thread #1, name = 'clickhouse-serv', stop reason = breakpoint 1.1
    * frame #0: 0x00000000148ee2ac clickhouse-server`DB::registerFunctionIsNaN(factory=0x000000002f984a50) at isNaN.cpp:43:5
    frame #1: 0x0000000010339e2c clickhouse-server`DB::registerFunctionsMiscellaneous(factory=0x000000002f984a50) at registerFunctionsMiscellaneous.cpp:128:5
    frame #2: 0x00000000103391a0 clickhouse-server`DB::registerFunctions() at registerFunctions.cpp:96:5
    frame #3: 0x000000000d063d35 clickhouse-server`DB::Server::main(this=0x00007fffffffd9b8, (null)=size=0) at Server.cpp:623:5
    frame #4: 0x00000000230abec5 clickhouse-server`Poco::Util::Application::run(this=0x00007fffffffd9b8) at Application.cpp:334:8
    frame #5: 0x000000000d06250b clickhouse-server`DB::Server::run(this=0x00007fffffffd9b8) at Server.cpp:461:25
    frame #6: 0x00000000230c6c70 clickhouse-server`Poco::Util::ServerApplication::run(this=0x00007fffffffd9b8, argc=1, argv=0x00007ffff70f7038) at ServerApplication.cpp:611:9
    frame #7: 0x000000000d05f8e1 clickhouse-server`mainEntryClickHouseServer(argc=1, argv=0x00007ffff70f7038) at Server.cpp:187:20
    frame #8: 0x000000000cf7fe63 clickhouse-server`main(argc_=1, argv_=0x00007fffffffdfb8) at main.cpp:409:12
    frame #9: 0x00007ffff7d92d90 libc.so.6`__libc_start_call_main(main=(clickhouse-server`main at main.cpp:380), argc=1, argv=0x00007fffffffdfb8) at libc_start_call_main.h:58:16
    frame #10: 0x00007ffff7d92e40 libc.so.6`__libc_start_main_impl(main=(clickhouse-server`main at main.cpp:380), argc=1, argv=0x00007fffffffdfb8, init=0x00007ffff7ffd040, fini=<unavailable>, rtld_fini=<unavailable>, stack_end=0x00007fffffffdfa8) at libc-start.c:392:3
    frame #11: 0x000000000cf7fb55 clickhouse-server`_start + 37

    +

    函数调用路径

    lldb 调试

    +
    1
    2
    (lldb) b DB::(anonymous namespace)::testtrue::execute

    + + +

    调用堆栈:

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    38
    39
    40
    41
    42
    43
    44
    (lldb) bt
    * thread #3, name = 'TCPHandler', stop reason = breakpoint 3.1
    * frame #0: 0x00000000148f3dca clickhouse-server`bool DB::(anonymous namespace)::testtrue::execute<char8_t>(t=0x02 u8'\U00000002') at isNaN.cpp:28:9
    frame #1: 0x00000000148f29dd clickhouse-server`COW<DB::IColumn>::immutable_ptr<DB::IColumn> DB::FunctionNumericPredicate<DB::(anonymous namespace)::testtrue>::execute<char8_t>(this=0x00007fff1e44db58, in_untyped=0x00007fff1e447820) const at FunctionNumericPredicate.h:89:31
    frame #2: 0x00000000148f1e8a clickhouse-server`DB::FunctionNumericPredicate<DB::(anonymous namespace)::testtrue>::executeImpl(this=0x00007fff1e44db58, arguments=size=1, (null)=std::__1::shared_ptr<const DB::IDataType>::element_type @ 0x00007fff1e4899c8 strong=2 weak=2, (null)=1) const at FunctionNumericPredicate.h:61:22
    frame #3: 0x00000000103a611c clickhouse-server`DB::IFunction::executeImplDryRun(this=0x00007fff1e44db58, arguments=size=1, result_type=std::__1::shared_ptr<const DB::IDataType>::element_type @ 0x00007fff1e4899c8 strong=2 weak=2, input_rows_count=1) const at IFunction.h:395:16
    frame #4: 0x00000000103a484d clickhouse-server`DB::FunctionToExecutableFunctionAdaptor::executeDryRunImpl(this=0x00007fff1e44dba0, arguments=size=1, result_type=std::__1::shared_ptr<const DB::IDataType>::element_type @ 0x00007fff1e4899c8 strong=2 weak=2, input_rows_count=1) const at IFunctionAdaptors.h:26:26
    frame #5: 0x000000001a8fa3d9 clickhouse-server`DB::IExecutableFunction::executeWithoutLowCardinalityColumns(this=0x00007fff1e44dba0, args=size=1, result_type=std::__1::shared_ptr<const DB::IDataType>::element_type @ 0x00007fff1e4899c8 strong=2 weak=2, input_rows_count=1, dry_run=true) const at IFunction.cpp:217:15
    frame #6: 0x000000001a8fa00a clickhouse-server`DB::IExecutableFunction::defaultImplementationForConstantArguments(this=0x00007fff1e44dba0, args=size=1, result_type=std::__1::shared_ptr<const DB::IDataType>::element_type @ 0x00007fff1e4899c8 strong=2 weak=2, input_rows_count=1, dry_run=true) const at IFunction.cpp:160:31
    frame #7: 0x000000001a8fa2c4 clickhouse-server`DB::IExecutableFunction::executeWithoutLowCardinalityColumns(this=0x00007fff1e44dba0, args=size=1, result_type=std::__1::shared_ptr<const DB::IDataType>::element_type @ 0x00007fff1e4899c8 strong=2 weak=2, input_rows_count=1, dry_run=true) const at IFunction.cpp:209:20
    frame #8: 0x000000001a8faf55 clickhouse-server`DB::IExecutableFunction::executeWithoutSparseColumns(this=0x00007fff1e44dba0, arguments=size=1, result_type=std::__1::shared_ptr<const DB::IDataType>::element_type @ 0x00007fff1e4899c8 strong=2 weak=2, input_rows_count=1, dry_run=true) const at IFunction.cpp:267:22
    frame #9: 0x000000001a8fbe43 clickhouse-server`DB::IExecutableFunction::execute(this=0x00007fff1e44dba0, arguments=size=1, result_type=std::__1::shared_ptr<const DB::IDataType>::element_type @ 0x00007fff1e4899c8 strong=2 weak=2, input_rows_count=1, dry_run=true) const at IFunction.cpp:337:16
    frame #10: 0x000000001b02a7b8 clickhouse-server`DB::ActionsDAG::addFunction(this=0x00007fff1e4ac248, function=std::__1::shared_ptr<DB::IFunctionOverloadResolver>::element_type @ 0x00007fff1e44dde0 strong=2 weak=1, children=size=0, result_name="testtrue(2)") at ActionsDAG.cpp:199:37
    frame #11: 0x000000001cdad1a1 clickhouse-server`DB::ScopeStack::addFunction(this=0x00007fff2d1e96f0, function=std::__1::shared_ptr<DB::IFunctionOverloadResolver>::element_type @ 0x00007fff1e44dde0 strong=2 weak=1, argument_names=size=1, result_name="") at ActionsVisitor.cpp:598:51
    frame #12: 0x000000001cdb7485 clickhouse-server`DB::ActionsMatcher::Data::addFunction(this=0x00007fff2d1e9698, function=std::__1::shared_ptr<DB::IFunctionOverloadResolver>::element_type @ 0x00007fff1e44dde0 strong=2 weak=1, argument_names=size=1, result_name=<unavailable>) at ActionsVisitor.h:140:27
    frame #13: 0x000000001cdb0a2b clickhouse-server`DB::ActionsMatcher::visit(node=0x00007fff2a45c9b8, ast=std::__1::shared_ptr<DB::IAST>::element_type @ 0x00007fff2a45c9b8 strong=1 weak=2, data=0x00007fff2d1e9698) at ActionsVisitor.cpp:1093:14
    frame #14: 0x000000001cdad64d clickhouse-server`DB::ActionsMatcher::visit(ast=std::__1::shared_ptr<DB::IAST>::element_type @ 0x00007fff2a45c9b8 strong=1 weak=2, data=0x00007fff2d1e9698) at ActionsVisitor.cpp:655:9
    frame #15: 0x000000001cdb125c clickhouse-server`DB::ActionsMatcher::visit(expression_list=0x00007fff1e4895b8, (null)=std::__1::shared_ptr<DB::IAST>::element_type @ 0x00007fff1e4895b8 strong=2 weak=2, data=0x00007fff2d1e9698) at ActionsVisitor.cpp:763:17
    frame #16: 0x000000001cdad6b9 clickhouse-server`DB::ActionsMatcher::visit(ast=std::__1::shared_ptr<DB::IAST>::element_type @ 0x00007fff1e4895b8 strong=2 weak=2, data=0x00007fff2d1e9698) at ActionsVisitor.cpp:659:9
    frame #17: 0x000000001b1ebce5 clickhouse-server`DB::InDepthNodeVisitor<DB::ActionsMatcher, true, false, std::__1::shared_ptr<DB::IAST> const>::visit(this=0x00007fff2d1e9638, ast=std::__1::shared_ptr<DB::IAST>::element_type @ 0x00007fff1e4895b8 strong=2 weak=2) at InDepthNodeVisitor.h:34:13
    frame #18: 0x000000001b1dc0ea clickhouse-server`DB::ExpressionAnalyzer::getRootActions(this=0x00007fff1e47f780, ast=std::__1::shared_ptr<DB::IAST>::element_type @ 0x00007fff1e4895b8 strong=2 weak=2, no_makeset_for_subqueries=false, actions=nullptr, only_consts=false) at ExpressionAnalyzer.cpp:587:48
    frame #19: 0x000000001b1e2f9e clickhouse-server`DB::SelectQueryExpressionAnalyzer::appendSelect(this=0x00007fff1e47f780, chain=0x00007fff2d1ea5a0, only_types=false) at ExpressionAnalyzer.cpp:1383:5
    frame #20: 0x000000001b1e6da8 clickhouse-server`DB::ExpressionAnalysisResult::ExpressionAnalysisResult(this=0x00007fff2d1eaa88, query_analyzer=0x00007fff1e47f780, metadata_snapshot=std::__1::shared_ptr<const DB::StorageInMemoryMetadata>::element_type @ 0x00007ffff705a800 strong=4 weak=1, first_stage_=true, second_stage_=true, only_types=false, filter_info_=nullptr, source_header=0x00007fff1e4c9550) at ExpressionAnalyzer.cpp:1830:24
    frame #21: 0x000000001b57ab9d clickhouse-server`DB::InterpreterSelectQuery::getSampleBlockImpl(this=0x00007fff1e4c9000) at InterpreterSelectQuery.cpp:692:23
    frame #22: 0x000000001b5747f9 clickhouse-server`DB::InterpreterSelectQuery::InterpreterSelectQuery(this=0x00007fff2d1ec148, try_move_to_prewhere=true)::$_1::operator()(bool) const at InterpreterSelectQuery.cpp:552:25
    frame #23: 0x000000001b5709f6 clickhouse-server`DB::InterpreterSelectQuery::InterpreterSelectQuery(this=0x00007fff1e4c9000, query_ptr_=std::__1::shared_ptr<DB::IAST>::element_type @ 0x00007fff1e48e198 strong=1 weak=2, context_=std::__1::shared_ptr<const DB::Context>::element_type @ 0x00007fff1e495000 strong=3 weak=2, input_pipe_= Has Value=false , storage_=nullptr, options_=0x00007fff1e461770, required_result_column_names=size=0, metadata_snapshot_=nullptr, subquery_for_sets_=size=0, prepared_sets_=size=0) at InterpreterSelectQuery.cpp:555:5
    frame #24: 0x000000001b56edf3 clickhouse-server`DB::InterpreterSelectQuery::InterpreterSelectQuery(this=0x00007fff1e4c9000, query_ptr_=std::__1::shared_ptr<DB::IAST>::element_type @ 0x00007fff1e48e198 strong=1 weak=2, context_=std::__1::shared_ptr<const DB::Context>::element_type @ 0x00007fff1e495000 strong=3 weak=2, options_=0x00007fff1e461770, required_result_column_names_=size=0) at InterpreterSelectQuery.cpp:165:7
    frame #25: 0x000000001b5f4ce5 clickhouse-server`std::__1::__unique_if<DB::InterpreterSelectQuery>::__unique_single std::__1::make_unique<DB::InterpreterSelectQuery, std::__1::shared_ptr<DB::IAST> const&, std::__1::shared_ptr<DB::Context>&, DB::SelectQueryOptions&, std::__1::vector<std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> >, std::__1::allocator<std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> > > > const&>(__args=std::__1::shared_ptr<DB::IAST>::element_type @ 0x00007fff1e48e198 strong=1 weak=2, __args=std::__1::shared_ptr<DB::Context>::element_type @ 0x00007fff1e495000 strong=3 weak=2, __args=0x00007fff1e461770, __args=size=0) at unique_ptr.h:725:32
    frame #26: 0x000000001b5f2d09 clickhouse-server`DB::InterpreterSelectWithUnionQuery::buildCurrentChildInterpreter(this=0x00007fff1e461700, ast_ptr_=std::__1::shared_ptr<DB::IAST>::element_type @ 0x00007fff1e48e198 strong=1 weak=2, current_required_result_column_names=size=0) at InterpreterSelectWithUnionQuery.cpp:223:16
    frame #27: 0x000000001b5f23ed clickhouse-server`DB::InterpreterSelectWithUnionQuery::InterpreterSelectWithUnionQuery(this=0x00007fff1e461700, query_ptr_=std::__1::shared_ptr<DB::IAST>::element_type @ 0x00007fff1e48f218 strong=2 weak=2, context_=std::__1::shared_ptr<const DB::Context>::element_type @ 0x00007fff1e492800 strong=5 weak=9, options_=0x00007fff2d1ef588, required_result_column_names=size=0) at InterpreterSelectWithUnionQuery.cpp:140:13
    frame #28: 0x000000001b53b830 clickhouse-server`std::__1::__unique_if<DB::InterpreterSelectWithUnionQuery>::__unique_single std::__1::make_unique<DB::InterpreterSelectWithUnionQuery, std::__1::shared_ptr<DB::IAST>&, std::__1::shared_ptr<DB::Context>&, DB::SelectQueryOptions const&>(__args=std::__1::shared_ptr<DB::IAST>::element_type @ 0x00007fff1e48f218 strong=2 weak=2, __args=std::__1::shared_ptr<DB::Context>::element_type @ 0x00007fff1e492800 strong=5 weak=9, __args=0x00007fff2d1ef588) at unique_ptr.h:725:32
    frame #29: 0x000000001b539e5d clickhouse-server`DB::InterpreterFactory::get(query=std::__1::shared_ptr<DB::IAST>::element_type @ 0x00007fff1e48f218 strong=2 weak=2, context=std::__1::shared_ptr<DB::Context>::element_type @ 0x00007fff1e492800 strong=5 weak=9, options=0x00007fff2d1ef588) at InterpreterFactory.cpp:122:16
    frame #30: 0x000000001b961577 clickhouse-server`DB::executeQueryImpl(begin="select testtrue(2);", end="", context=std::__1::shared_ptr<DB::Context>::element_type @ 0x00007fff1e492800 strong=5 weak=9, internal=false, stage=Complete, istr=0x0000000000000000) at executeQuery.cpp:658:27
    frame #31: 0x000000001b95ee64 clickhouse-server`DB::executeQuery(query="select testtrue(2);", context=std::__1::shared_ptr<DB::Context>::element_type @ 0x00007fff1e492800 strong=5 weak=9, internal=false, stage=Complete) at executeQuery.cpp:1067:30
    frame #32: 0x000000001c590f25 clickhouse-server`DB::TCPHandler::runImpl(this=0x00007fff1e46e000) at TCPHandler.cpp:332:24
    frame #33: 0x000000001c59f9e5 clickhouse-server`DB::TCPHandler::run(this=0x00007fff1e46e000) at TCPHandler.cpp:1781:9
    frame #34: 0x0000000023091f79 clickhouse-server`Poco::Net::TCPServerConnection::start(this=0x00007fff1e46e000) at TCPServerConnection.cpp:43:3
    frame #35: 0x0000000023092786 clickhouse-server`Poco::Net::TCPServerDispatcher::run(this=0x00007fff26648600) at TCPServerDispatcher.cpp:115:20
    frame #36: 0x00000000232d2a94 clickhouse-server`Poco::PooledThread::run(this=0x00007ffff702df80) at ThreadPool.cpp:199:14
    frame #37: 0x00000000232cf5ba clickhouse-server`Poco::(anonymous namespace)::RunnableHolder::run(this=0x00007ffff7001330) at Thread.cpp:55:11
    frame #38: 0x00000000232ce39e clickhouse-server`Poco::ThreadImpl::runnableEntry(pThread=0x00007ffff702dfb8) at Thread_POSIX.cpp:345:27
    frame #39: 0x00007ffff7dfdb43 libc.so.6`start_thread(arg=<unavailable>) at pthread_create.c:442:8
    frame #40: 0x00007ffff7e8fa00 libc.so.6`__clone3 at clone3.S:81

    + +

    function的实现

    所有函数都是继承IFunction

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    ## ClickHouse/src/Functions/IFunction.h

    class IFunction
    {
    public:

    virtual ~IFunction() = default;

    virtual String getName() const = 0;

    virtual ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type, size_t input_rows_count) const = 0;
    virtual ColumnPtr executeImplDryRun(const ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type, size_t input_rows_count) const
    {
    return executeImpl(arguments, result_type, input_rows_count);
    }
    ...
    };

    using FunctionPtr = std::shared_ptr<IFunction>;
    +

    核心是virtual方法executeImpl

    +

    看看实现的模板类

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    38
    39
    40
    41
    ## ClickHouse/src/Functions/FunctionNumericPredicate.h
    ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr &, size_t /*input_rows_count*/) const override
    {
    const auto * in = arguments.front().column.get();

    ColumnPtr res;
    if (!((res = execute<UInt8>(in))
    || (res = execute<UInt16>(in))
    || (res = execute<UInt32>(in))
    || (res = execute<UInt64>(in))
    || (res = execute<Int8>(in))
    || (res = execute<Int16>(in))
    || (res = execute<Int32>(in))
    || (res = execute<Int64>(in))
    || (res = execute<Float32>(in))
    || (res = execute<Float64>(in))))
    throw Exception{"Illegal column " + in->getName() + " of first argument of function " + getName(), ErrorCodes::ILLEGAL_COLUMN};

    return res;
    }

    template <typename T>
    ColumnPtr execute(const IColumn * in_untyped) const
    {
    if (const auto in = checkAndGetColumn<ColumnVector<T>>(in_untyped))
    {
    const auto size = in->size();

    auto out = ColumnUInt8::create(size);

    const auto & in_data = in->getData();
    auto & out_data = out->getData();

    for (const auto i : collections::range(0, size))
    out_data[i] = Impl::execute(in_data[i]);

    return out;
    }

    return nullptr;
    }
    + + +
    + + + + +
    +
    +
    +
    + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    背景

    当前业务上是用curl 请求clickhouse,然后写入clickhouse。所以很好奇clickhouse的整个生命周期

    +

    请求

    解析http请求

    报文请求

    1
    echo -ne '1,Hello\n2,World\n' | curl -sSF 'file=@-' "http://localhost:8123/&qu?query=SELECT+*+FROM+file&file_format=CSV&file_types=UInt8,String";
    + +

    请求到clickhouse

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    38
    39
    (lldb) p in
    (DB::PeekableReadBuffer) $3 = {
    DB::BufferWithOwnMemory<DB::ReadBuffer> = {
    DB::ReadBuffer = {
    DB::BufferBase = {
    pos = 0x0000398b7c4ea0fe "--------------------------0c8b1c0a5e3c9b36\r\nContent-Disposition: form-data; name=\"file\"; filename=\"-\"\r\n\r\n1,Hello\n2,World\n\r\n--------------------------0c8b1c0a5e3c9b36--\r\n"
    bytes = 0
    working_buffer = (begin_pos = "--------------------------0c8b1c0a5e3c9b36\r\nContent-Disposition: form-data; name=\"file\"; filename=\"-\"\r\n\r\n1,Hello\n2,World\n\r\n--------------------------0c8b1c0a5e3c9b36--\r\n", end_pos = "")
    internal_buffer = (begin_pos = "--------------------------0c8b1c0a5e3c9b36\r\nContent-Disposition: form-data; name=\"file\"; filename=\"-\"\r\n\r\n1,Hello\n2,World\n\r\n--------------------------0c8b1c0a5e3c9b36--\r\n", end_pos = "")
    padded = false
    }
    nextimpl_working_buffer_offset = 0
    }
    memory = (m_capacity = 0, m_size = 0, m_data = 0x0000000000000000, alignment = 0)
    }
    sub_buf = 0x00007fff2628c080
    peeked_size = 0
    checkpoint = Has Value=false {}
    checkpoint_in_own_memory = false
    stack_memory = "'\xf9\f\0\0\0\0(\0\0\0\0\0\0\0 "
    use_stack_memory = true
    }
    (lldb) bt
    * thread #4, name = 'HTTPHandler', stop reason = step over
    * frame #0: 0x000000001c5a98dc clickhouse-server`DB::HTMLForm::MultipartReadBuffer::readLine(this=0x00007fff2c9eede8, append_crlf=true) at HTMLForm.cpp:271:9
    frame #1: 0x000000001c5a95df clickhouse-server`DB::HTMLForm::MultipartReadBuffer::skipToNextBoundary(this=0x00007fff2c9eede8) at HTMLForm.cpp:253:21
    frame #2: 0x000000001c5a8ad4 clickhouse-server`DB::HTMLForm::readMultipart(this=0x00007fff2c9f0e10, in_=0x00007fff2628c080, handler=0x00007fff2c9ef1f0) at HTMLForm.cpp:186:13
    frame #3: 0x000000001c5a7e39 clickhouse-server`DB::HTMLForm::load(this=0x00007fff2c9f0e10, request=0x00007fff2c9f1438, requestBody=0x00007fff2628c080, handler=0x00007fff2c9ef1f0) at HTMLForm.cpp:99:13
    frame #4: 0x000000001d3ba404 clickhouse-server`DB::DynamicQueryHandler::getQuery(this=0x00007fff262b4000, request=0x00007fff2c9f1438, params=0x00007fff2c9f0e10, context=std::__1::shared_ptr<DB::Context>::element_type @ 0x00007fff2628f800 strong=2 weak=6) at HTTPHandler.cpp:1032:12
    frame #5: 0x000000001d3b5ed4 clickhouse-server`DB::HTTPHandler::processQuery(this=0x00007fff262b4000, request=0x00007fff2c9f1438, params=0x00007fff2c9f0e10, response=0x00007fff2c9f14f0, used_output=0x00007fff2c9f0ec8, query_scope= Has Value=true ) at HTTPHandler.cpp:764:26
    frame #6: 0x000000001d3b90de clickhouse-server`DB::HTTPHandler::handleRequest(this=0x00007fff262b4000, request=0x00007fff2c9f1438, response=0x00007fff2c9f14f0) at HTTPHandler.cpp:960:9
    frame #7: 0x000000001d3f09f7 clickhouse-server`DB::HTTPServerConnection::run(this=0x00007fff2628c000) at HTTPServerConnection.cpp:65:34
    frame #8: 0x000000002308f119 clickhouse-server`Poco::Net::TCPServerConnection::start(this=0x00007fff2628c000) at TCPServerConnection.cpp:43:3
    frame #9: 0x000000002308f926 clickhouse-server`Poco::Net::TCPServerDispatcher::run(this=0x00007fff29fa8800) at TCPServerDispatcher.cpp:115:20
    frame #10: 0x00000000232cfc34 clickhouse-server`Poco::PooledThread::run(this=0x00007ffff702e200) at ThreadPool.cpp:199:14
    frame #11: 0x00000000232cc75a clickhouse-server`Poco::(anonymous namespace)::RunnableHolder::run(this=0x00007ffff7001350) at Thread.cpp:55:11
    frame #12: 0x00000000232cb53e clickhouse-server`Poco::ThreadImpl::runnableEntry(pThread=0x00007ffff702e238) at Thread_POSIX.cpp:345:27
    frame #13: 0x00007ffff7dfeb43 libc.so.6`start_thread(arg=<unavailable>) at pthread_create.c:442:8
    frame #14: 0x00007ffff7e90a00 libc.so.6`__clone3 at clone3.S:81
    +

    生成语法树:

    +
    1
    std::tie(ast, streams) = executeQueryImpl(begin, end, context, false, QueryProcessingStage::Complete, &istr);
    + + +
    + + + + +
    +
    +
    +
    + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    背景

    单元测试是一个很基础的功能,会使用单元测试是一个维护良好的程序的基础。
    所以通过单元测试维护一个程序也是必然。

    +

    gtest quick start

    相关阅读

    + + +
    + + + + +
    +
    +
    +
    + + + + + + + + + + +
    + + + + +
    + + + + + + + + +
    +
    + +
    + +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/page/13/index.html b/page/13/index.html new file mode 100644 index 0000000000..0f51f912d0 --- /dev/null +++ b/page/13/index.html @@ -0,0 +1,1281 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + dinosaur + + + + + + + + + + + + +
    +
    + +
    +
    + + + + + +
    + + + + + + + + +
    + +
    + +
    +
    + + +
    + + 0% +
    + + +
    +
    +
    + + +
    + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    背景

    背景是目前在使用clickhouse,想自己搭建一个实例,并且建表成功

    +

    zookeeper

      +
    • 步骤1:
      下载zookeeper

      +
    • +
    • 步骤2:启动zookeeper:

      +
      1
      2
      3
      4
      #### 切换到bin目录
      cd apache-zookeeper-3.8.0-bin/bin/
      ## 启动zk
      ./zkServer.sh
    • +
    • 步骤3:创建zk节点 : path为/path/to/zookeeper/node

      +
      1
      2
      3
      4
      5
      6
      7
      8
      9
      10
      11
      12
      13
      14
      15
      ### 启动zkCli
      cd apache-zookeeper-3.8.0-bin/bin/
      ### 启动节点
      ./zkCli.sh
      ### 创建zk节点 需要一级一级建
      ### 创建节点命令: create path
      [zk: localhost:2181(CONNECTED) 11] create /path
      Created /path
      [zk: localhost:2181(CONNECTED) 12] create /path/to
      Created /path/to
      [zk: localhost:2181(CONNECTED) 13] create /path/to/zookeeper
      Created /path/to/zookeeper
      [zk: localhost:2181(CONNECTED) 14] create /path/to/zookeeper/node
      Created /path/to/zookeeper/node

    • +
    +

    建表

    建表之前先要把cluseter配置好
    我的配置:

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    <!-- zk 配置 -->
    <zookeeper>
    <node>
    <host>localhost</host>
    <port>2181</port>
    </node>
    <session_timeout_ms>30000</session_timeout_ms>
    <operation_timeout_ms>10000</operation_timeout_ms>
    <!-- Optional. Chroot suffix. Should exist. -->
    <root>/path/to/zookeeper/node</root>

    </zookeeper>
    <!-- 宏变量, 建表的时候指定的path中的变量从宏里面读取 -->
    <macros>
    <cluster>testcluster</cluster>
    <shard>01</shard>
    <replica>example01-01-1</replica>
    </macros>
    <remote_servers>
    <!-- cluster 名称叫做 testcluster , 名字随便取的-->
    <testcluster>
    <shard>
    <replica>
    <host>localhost</host>
    <port>9000</port>
    </replica>
    </shard>
    </testcluster>
    </remote_servers>
    <distributed_ddl>

    +

    建表语句:

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    ### 这里的{cluster} 就是上面配置的testcluster
    CREATE TABLE test ON CLUSTER `{cluster}`
    (
    `timestamp` DateTime,
    `contractid` UInt32,
    `userid` UInt32
    )
    ENGINE = ReplicatedMergeTree('/clickhouse/tables/{cluster}/{shard}/default/test', '{replica}')
    PARTITION BY toYYYYMM(timestamp)
    ORDER BY (contractid, toDate(timestamp), userid)
    SAMPLE BY userid

    Query id: 56c07fac-9a0b-4b0b-bf8f-fb808ce452e6

    + +

    查询zk配置

    1
    SELECT  path  FROM system.zookeeper
    + +

    遇到错误

    遇到错误:There is no DistributedDDL configuration in server config

    +

    原因是: clickhosue的配置没有配对,需要参考上面给的链接添加配置

    +
    1
    2
    3
    4
    5
    6
    7
    <distributed_ddl>
    <!-- Path in ZooKeeper to queue with DDL queries -->
    <path>/clickhouse/task_queue/ddl</path>
    <cleanup_delay_period>60</cleanup_delay_period>
    <task_max_lifetime>86400</task_max_lifetime>
    <max_tasks_in_queue>1000</max_tasks_in_queue>
    </distributed_ddl>
    + + +
    + + + + +
    +
    +
    +
    + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    问题复现

      +
    • 建表语句如下
    • +
    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    CREATE TABLE test ON CLUSTER `{cluster}`
    (
    `timestamp` DateTime,
    `contractid` UInt32,
    `userid` UInt32
    )
    ENGINE = ReplicatedMergeTree('/clickhouse/tables/{cluster}/{shard}/default/test', '{replica}')
    PARTITION BY toYYYYMM(timestamp)
    ORDER BY (contractid, toDate(timestamp), userid)
    SAMPLE BY userid

    +
      +
    • 第一次insert
    • +
    +
    1
    2
    insert into test ( userid ,contractid ,  timestamp ) values (1,1,'2022-02-02');

    +
      +
    • 返回结果是一行记录:
      1
      2
      3
      4
      5
      6
      SELECT *  FROM test;

      ┌───────────timestamp─┬─contractid─┬─userid─┐
      │ 2022-02-02 00:00:00 │ 1 │ 1 │
      └─────────────────────┴────────────┴────────┘

    • +
    +

    第二次insert

    +
    1
    2
    insert into test ( userid ,contractid ,  timestamp ) values (1,1,'2022-02-02');

    +

    返回还是一行:

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    :) insert into test ( userid ,contractid ,  timestamp ) values (1,1,'2022-02-02');

    INSERT INTO test (userid, contractid, timestamp) FORMAT Values

    Query id: 706e2447-95eb-4515-a7b7-cf363512b673

    Ok.

    1 row in set. Elapsed: 0.056 sec.

    dai-MS-7B89 :) select * from test

    SELECT *
    FROM test

    Query id: 3ba7cd7f-4621-4286-8646-79737ec3e763

    ┌───────────timestamp─┬─contractid─┬─userid─┐
    │ 2022-02-02 00:00:00 │ 1 │ 1 │
    └─────────────────────┴────────────┴────────┘

    1 row in set. Elapsed: 0.030 sec.

    + + +

    两次插入一样的数据的话, clickhouse会做对应的去重操作,这样两次插入只会插入一条数据

    +

    如何解决

    clickhouse提供了参数控制是否去重的参数insert-deduplicate

    +
    1
    set insert_deduplicate=0;
    + +

    然后重新insert同一行记录,就不会因为去重导致重复插入数据被丢弃了。

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    insert into test ( userid ,contractid ,  timestamp ) values (1,1,'2022-02-02');

    INSERT INTO test (userid, contractid, timestamp) FORMAT Values

    Query id: a8df989b-0b63-4b45-a1b8-22c13b18bf0a

    Ok.

    1 row in set. Elapsed: 0.070 sec.

    dai-MS-7B89 :) select * from test

    SELECT *
    FROM test

    Query id: e077b55e-bfd9-4678-ae46-9fc05714b3f7

    ┌───────────timestamp─┬─contractid─┬─userid─┐
    │ 2022-02-02 00:00:00 │ 1 │ 1 │
    └─────────────────────┴────────────┴────────┘
    ┌───────────timestamp─┬─contractid─┬─userid─┐
    │ 2022-02-02 00:00:00 │ 1 │ 1 │
    └─────────────────────┴────────────┴────────┘
    + + +

    日志和源码分析

    日志分析

    1
    2
    3
    4
    5
    2022.05.15 23:32:04.515912 [ 68323 ] {64b40d4f-0d00-4747-9af3-4afb56b6a84b} <Trace> MergedBlockOutputStream: filled checksums 202202_2_2_0 (state Temporary)
    2022.05.15 23:32:04.517872 [ 68323 ] {64b40d4f-0d00-4747-9af3-4afb56b6a84b} <Debug> default.test (7d656761-7cd0-4866-a43e-f0e4cea97654) (Replicated OutputStream): Wrote block with ID '202202_8166901380224458449_12408515745921908624', 1 rows
    2022.05.15 23:32:04.533981 [ 68323 ] {64b40d4f-0d00-4747-9af3-4afb56b6a84b} <Information> default.test (7d656761-7cd0-4866-a43e-f0e4cea97654) (Replicated OutputStream): Block with ID 202202_8166901380224458449_12408515745921908624 already exists locally as part 202202_0_0_0; ignoring it.


    + +

    用lldb调试clickhouse

    +
    1
    lldb ./clickhouse-server
    + +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    void ReplicatedMergeTreeSink::consume(Chunk chunk)
    {
    auto block = getHeader().cloneWithColumns(chunk.detachColumns());

    String block_id;

    if (deduplicate) // 从上面生成
    {
    String block_dedup_token; // 生成token

    /// We add the hash from the data and partition identifier to deduplication ID.
    /// That is, do not insert the same data to the same partition twice.

    const String & dedup_token = settings.insert_deduplication_token;
    if (!dedup_token.empty())
    {
    /// multiple blocks can be inserted within the same insert query
    /// an ordinal number is added to dedup token to generate a distinctive block id for each block
    block_dedup_token = fmt::format("{}_{}", dedup_token, chunk_dedup_seqnum);
    ++chunk_dedup_seqnum;
    }

    block_id = temp_part.part->getZeroLevelPartBlockID(block_dedup_token);
    LOG_DEBUG(log, "Wrote block with ID '{}', {} rows", block_id, current_block.block.rows());
    }
    else
    {
    LOG_DEBUG(log, "Wrote block with {} rows", current_block.block.rows());
    }
    + +

    相关阅读

    + + +
    + + + + +
    +
    +
    +
    + + + + + + + + + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +
    1
    2
    3
    4
    5
    6
    7
    8
    Method* InstanceKlass::class_initializer() const {
    Method* clinit = find_method(
    vmSymbols::class_initializer_name(), vmSymbols::void_method_signature());
    if (clinit != NULL && clinit->has_valid_initializer_flags()) {
    return clinit;
    }
    return NULL;
    }
    + +

    寻找方法:

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    38
    39
    40
    41
    42
    43
    44
    45
    46
    47
    48
    49
    50
    51
    52
    53
    src\hotspot\share\oops\instanceKlass.cpp

    int InstanceKlass::find_method_index(const Array<Method*>* methods,
    const Symbol* name,
    const Symbol* signature,
    OverpassLookupMode overpass_mode,
    StaticLookupMode static_mode,
    PrivateLookupMode private_mode) {
    const bool skipping_overpass = (overpass_mode == OverpassLookupMode::skip);
    const bool skipping_static = (static_mode == StaticLookupMode::skip);
    const bool skipping_private = (private_mode == PrivateLookupMode::skip);
    const int hit = quick_search(methods, name);
    if (hit != -1) {
    const Method* const m = methods->at(hit);

    // Do linear search to find matching signature. First, quick check
    // for common case, ignoring overpasses if requested.
    if (method_matches(m, signature, skipping_overpass, skipping_static, skipping_private)) {
    return hit;
    }

    // search downwards through overloaded methods
    int i;
    for (i = hit - 1; i >= 0; --i) {
    const Method* const m = methods->at(i);
    assert(m->is_method(), "must be method");
    if (m->name() != name) {
    break;
    }
    if (method_matches(m, signature, skipping_overpass, skipping_static, skipping_private)) {
    return i;
    }
    }
    // search upwards
    for (i = hit + 1; i < methods->length(); ++i) {
    const Method* const m = methods->at(i);
    assert(m->is_method(), "must be method");
    if (m->name() != name) {
    break;
    }
    if (method_matches(m, signature, skipping_overpass, skipping_static, skipping_private)) {
    return i;
    }
    }
    // not found
    #ifdef ASSERT
    const int index = (skipping_overpass || skipping_static || skipping_private) ? -1 :
    linear_search(methods, name, signature);
    assert(-1 == index, "binary search should have found entry %d", index);
    #endif
    }
    return -1;
    }
    + +

    相关阅读

    + + +
    + + + + +
    +
    +
    +
    + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    背景

    了解线程相关处理逻辑

    +

    jni

    jni和php的扩展类似,都是用一个符号绑定一个native函数

    +

    堆栈

    堆栈:

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    (gdb) bt
    #0 __pthread_create_2_1 (newthread=0x7ffff5aa5458, attr=0x7ffff5aa54a0, start_routine=0x7ffff6c0d1aa <thread_native_entry(Thread*)>, arg=0x7ffff02e7810) at pthread_create.c:625
    #1 0x00007ffff6c0d813 in os::create_thread (thread=0x7ffff02e7810, thr_type=os::java_thread, req_stack_size=0) at /home/ubuntu/daixiao/jdk/src/hotspot/os/linux/os_linux.cpp:867
    #2 0x00007ffff6ee1eb7 in JavaThread::JavaThread (this=0x7ffff02e7810, entry_point=0x7ffff6837419 <thread_entry(JavaThread*, JavaThread*)>, stack_sz=0) at /home/ubuntu/daixiao/jdk/src/hotspot/share/runtime/thread.cpp:1195
    #3 0x00007ffff68375e4 in JVM_StartThread (env=0x7ffff0028c38, jthread=0x7ffff5aa5760) at /home/ubuntu/daixiao/jdk/src/hotspot/share/prims/jvm.cpp:2890
    #4 0x00007fffe100f68b in ?? ()
    #5 0x00007ffff5aa56e0 in ?? ()
    #6 0x00007ffff5aa56f8 in ?? ()
    #7 0x0000000000000000 in ?? ()
    + + + +

    相关阅读

    + + +
    + + + + +
    +
    +
    +
    + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + + + + +
    + + + + +
    +
    +
    +
    + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    38
    39
    40
    41
    42
    43
    44
    45
    46
    47
    48
    49
    50
    51
    52
    53
    54
    55
    56
    57
    58
    59
    60
    61
    62
    63
    doInvoke:92, DubboInvoker (org.apache.dubbo.rpc.protocol.dubbo)
    invoke:173, AbstractInvoker (org.apache.dubbo.rpc.protocol)
    invoke:52, AsyncToSyncInvoker (org.apache.dubbo.rpc.protocol)
    invoke:78, ListenerInvokerWrapper (org.apache.dubbo.rpc.listener)
    invoke:91, MonitorFilter (org.apache.dubbo.monitor.support)
    invoke:61, FilterNode (org.apache.dubbo.rpc.protocol)
    invoke:52, FutureFilter (org.apache.dubbo.rpc.protocol.dubbo.filter)
    invoke:61, FilterNode (org.apache.dubbo.rpc.protocol)
    invoke:69, ConsumerContextFilter (org.apache.dubbo.rpc.filter)
    invoke:61, FilterNode (org.apache.dubbo.rpc.protocol)
    invoke:56, InvokerWrapper (org.apache.dubbo.rpc.protocol)
    doInvoke:79, FailoverClusterInvoker (org.apache.dubbo.rpc.cluster.support)
    invoke:265, AbstractClusterInvoker (org.apache.dubbo.rpc.cluster.support)
    intercept:47, ClusterInterceptor (org.apache.dubbo.rpc.cluster.interceptor)
    invoke:92, AbstractCluster$InterceptorInvokerNode (org.apache.dubbo.rpc.cluster.support.wrapper)
    invoke:93, MockClusterInvoker (org.apache.dubbo.rpc.cluster.support.wrapper)
    invoke:170, MigrationInvoker (org.apache.dubbo.registry.client.migration)
    invoke:96, InvokerInvocationHandler (org.apache.dubbo.rpc.proxy)
    testRpc:-1, proxy1 (org.apache.dubbo.common.bytecode)
    testRpc:30, ThirdDubboManagerImpl (com.patpat.mms.mdp.biz.engine.dependencies.api.impl)
    testRpc:41, PushTest (com.patpat.mms.mdp.biz.engine.rest)
    invoke0:-1, NativeMethodAccessorImpl (jdk.internal.reflect)
    invoke:62, NativeMethodAccessorImpl (jdk.internal.reflect)
    invoke:43, DelegatingMethodAccessorImpl (jdk.internal.reflect)
    invoke:566, Method (java.lang.reflect)
    runReflectiveCall:59, FrameworkMethod$1 (org.junit.runners.model)
    run:12, ReflectiveCallable (org.junit.internal.runners.model)
    invokeExplosively:56, FrameworkMethod (org.junit.runners.model)
    evaluate:17, InvokeMethod (org.junit.internal.runners.statements)
    evaluate:74, RunBeforeTestExecutionCallbacks (org.springframework.test.context.junit4.statements)
    evaluate:84, RunAfterTestExecutionCallbacks (org.springframework.test.context.junit4.statements)
    evaluate:75, RunBeforeTestMethodCallbacks (org.springframework.test.context.junit4.statements)
    evaluate:86, RunAfterTestMethodCallbacks (org.springframework.test.context.junit4.statements)
    evaluate:84, SpringRepeat (org.springframework.test.context.junit4.statements)
    runLeaf:366, ParentRunner (org.junit.runners)
    runChild:251, SpringJUnit4ClassRunner (org.springframework.test.context.junit4)
    runChild:97, SpringJUnit4ClassRunner (org.springframework.test.context.junit4)
    run:331, ParentRunner$4 (org.junit.runners)
    schedule:79, ParentRunner$1 (org.junit.runners)
    runChildren:329, ParentRunner (org.junit.runners)
    access$100:66, ParentRunner (org.junit.runners)
    evaluate:293, ParentRunner$2 (org.junit.runners)
    evaluate:61, RunBeforeTestClassCallbacks (org.springframework.test.context.junit4.statements)
    evaluate:70, RunAfterTestClassCallbacks (org.springframework.test.context.junit4.statements)
    evaluate:306, ParentRunner$3 (org.junit.runners)
    run:413, ParentRunner (org.junit.runners)
    run:190, SpringJUnit4ClassRunner (org.springframework.test.context.junit4)
    runChild:128, Suite (org.junit.runners)
    runChild:27, Suite (org.junit.runners)
    run:331, ParentRunner$4 (org.junit.runners)
    schedule:79, ParentRunner$1 (org.junit.runners)
    runChildren:329, ParentRunner (org.junit.runners)
    access$100:66, ParentRunner (org.junit.runners)
    evaluate:293, ParentRunner$2 (org.junit.runners)
    evaluate:306, ParentRunner$3 (org.junit.runners)
    run:413, ParentRunner (org.junit.runners)
    run:137, JUnitCore (org.junit.runner)
    startRunnerWithArgs:69, JUnit4IdeaTestRunner (com.intellij.junit4)
    execute:38, IdeaTestRunner$Repeater$1 (com.intellij.rt.junit)
    repeat:11, TestsRepeater (com.intellij.rt.execution.junit)
    startRunnerWithArgs:35, IdeaTestRunner$Repeater (com.intellij.rt.junit)
    prepareStreamsAndStart:235, JUnitStarter (com.intellij.rt.junit)
    main:54, JUnitStarter (com.intellij.rt.junit)
    + + +

    网络io

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    38
    39
    40
    41
    42
    43
    44
    45
    46
    47
    48
    49
    50
    51
    52
    53
    54
    55
    56
    57
    58
    59
    60
    send:157, NettyChannel (org.apache.dubbo.remoting.transport.netty4)
    send:181, AbstractClient (org.apache.dubbo.remoting.transport)
    send:53, AbstractPeer (org.apache.dubbo.remoting.transport)
    request:137, HeaderExchangeChannel (org.apache.dubbo.remoting.exchange.support.header)
    request:95, HeaderExchangeClient (org.apache.dubbo.remoting.exchange.support.header)
    request:93, ReferenceCountExchangeClient (org.apache.dubbo.rpc.protocol.dubbo)
    doInvoke:108, DubboInvoker (org.apache.dubbo.rpc.protocol.dubbo)
    invoke:173, AbstractInvoker (org.apache.dubbo.rpc.protocol)
    invoke:52, AsyncToSyncInvoker (org.apache.dubbo.rpc.protocol)
    invoke:78, ListenerInvokerWrapper (org.apache.dubbo.rpc.listener)
    invoke:91, MonitorFilter (org.apache.dubbo.monitor.support)
    invoke:61, FilterNode (org.apache.dubbo.rpc.protocol)
    invoke:52, FutureFilter (org.apache.dubbo.rpc.protocol.dubbo.filter)
    invoke:61, FilterNode (org.apache.dubbo.rpc.protocol)
    invoke:69, ConsumerContextFilter (org.apache.dubbo.rpc.filter)
    invoke:61, FilterNode (org.apache.dubbo.rpc.protocol)
    invoke:56, InvokerWrapper (org.apache.dubbo.rpc.protocol)
    doInvoke:79, FailoverClusterInvoker (org.apache.dubbo.rpc.cluster.support)
    invoke:265, AbstractClusterInvoker (org.apache.dubbo.rpc.cluster.support)
    intercept:47, ClusterInterceptor (org.apache.dubbo.rpc.cluster.interceptor)
    invoke:92, AbstractCluster$InterceptorInvokerNode (org.apache.dubbo.rpc.cluster.support.wrapper)
    invoke:93, MockClusterInvoker (org.apache.dubbo.rpc.cluster.support.wrapper)
    invoke:170, MigrationInvoker (org.apache.dubbo.registry.client.migration)
    invoke:96, InvokerInvocationHandler (org.apache.dubbo.rpc.proxy)
    testRpc:-1, proxy1 (org.apache.dubbo.common.bytecode)
    testRpc:30, ThirdDubboManagerImpl (com.patpat.mms.mdp.biz.engine.dependencies.api.impl)
    testRpc:41, PushTest (com.patpat.mms.mdp.biz.engine.rest)
    invoke0:-1, NativeMethodAccessorImpl (jdk.internal.reflect)
    invoke:62, NativeMethodAccessorImpl (jdk.internal.reflect)
    invoke:43, DelegatingMethodAccessorImpl (jdk.internal.reflect)
    invoke:566, Method (java.lang.reflect)
    runReflectiveCall:59, FrameworkMethod$1 (org.junit.runners.model)
    run:12, ReflectiveCallable (org.junit.internal.runners.model)
    invokeExplosively:56, FrameworkMethod (org.junit.runners.model)
    evaluate:17, InvokeMethod (org.junit.internal.runners.statements)
    evaluate:74, RunBeforeTestExecutionCallbacks (org.springframework.test.context.junit4.statements)
    evaluate:84, RunAfterTestExecutionCallbacks (org.springframework.test.context.junit4.statements)
    evaluate:75, RunBeforeTestMethodCallbacks (org.springframework.test.context.junit4.statements)
    evaluate:86, RunAfterTestMethodCallbacks (org.springframework.test.context.junit4.statements)
    evaluate:84, SpringRepeat (org.springframework.test.context.junit4.statements)
    runLeaf:366, ParentRunner (org.junit.runners)
    runChild:251, SpringJUnit4ClassRunner (org.springframework.test.context.junit4)
    runChild:97, SpringJUnit4ClassRunner (org.springframework.test.context.junit4)
    run:331, ParentRunner$4 (org.junit.runners)
    schedule:79, ParentRunner$1 (org.junit.runners)
    runChildren:329, ParentRunner (org.junit.runners)
    access$100:66, ParentRunner (org.junit.runners)
    evaluate:293, ParentRunner$2 (org.junit.runners)
    evaluate:61, RunBeforeTestClassCallbacks (org.springframework.test.context.junit4.statements)
    evaluate:70, RunAfterTestClassCallbacks (org.springframework.test.context.junit4.statements)
    evaluate:306, ParentRunner$3 (org.junit.runners)
    run:413, ParentRunner (org.junit.runners)
    run:190, SpringJUnit4ClassRunner (org.springframework.test.context.junit4)
    run:137, JUnitCore (org.junit.runner)
    startRunnerWithArgs:69, JUnit4IdeaTestRunner (com.intellij.junit4)
    execute:38, IdeaTestRunner$Repeater$1 (com.intellij.rt.junit)
    repeat:11, TestsRepeater (com.intellij.rt.execution.junit)
    startRunnerWithArgs:35, IdeaTestRunner$Repeater (com.intellij.rt.junit)
    prepareStreamsAndStart:235, JUnitStarter (com.intellij.rt.junit)
    main:54, JUnitStarter (com.intellij.rt.junit)
    + + +
    1
    final WriteTask task = WriteTask.newInstance(next, m, promise, flush);
    + + + +

    堆栈

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    38
    39
    40
    41
    42
    43
    44
    45
    46
    47
    48
    49
    50
    51
    52
    53
    54
    55
    56
    57
    58
    59
    60
    61
    62
    63
    64
    65
    66
    67
    68
    69
    70
    71
    72
    73
    run:1067, AbstractChannelHandlerContext$WriteTask (io.netty.channel)
    safeExecute$$$capture:164, AbstractEventExecutor (io.netty.util.concurrent)
    safeExecute:-1, AbstractEventExecutor (io.netty.util.concurrent)
    - Async stack trace
    addTask:-1, SingleThreadEventExecutor (io.netty.util.concurrent)
    execute:828, SingleThreadEventExecutor (io.netty.util.concurrent)
    execute:818, SingleThreadEventExecutor (io.netty.util.concurrent)
    safeExecute:989, AbstractChannelHandlerContext (io.netty.channel)
    write:796, AbstractChannelHandlerContext (io.netty.channel)
    writeAndFlush:758, AbstractChannelHandlerContext (io.netty.channel)
    writeAndFlush:808, AbstractChannelHandlerContext (io.netty.channel)
    writeAndFlush:1025, DefaultChannelPipeline (io.netty.channel)
    writeAndFlush:306, AbstractChannel (io.netty.channel)
    send:162, NettyChannel (org.apache.dubbo.remoting.transport.netty4)
    send:181, AbstractClient (org.apache.dubbo.remoting.transport)
    send:53, AbstractPeer (org.apache.dubbo.remoting.transport)
    request:137, HeaderExchangeChannel (org.apache.dubbo.remoting.exchange.support.header)
    request:95, HeaderExchangeClient (org.apache.dubbo.remoting.exchange.support.header)
    request:93, ReferenceCountExchangeClient (org.apache.dubbo.rpc.protocol.dubbo)
    doInvoke:108, DubboInvoker (org.apache.dubbo.rpc.protocol.dubbo)
    invoke:173, AbstractInvoker (org.apache.dubbo.rpc.protocol)
    invoke:52, AsyncToSyncInvoker (org.apache.dubbo.rpc.protocol)
    invoke:78, ListenerInvokerWrapper (org.apache.dubbo.rpc.listener)
    invoke:91, MonitorFilter (org.apache.dubbo.monitor.support)
    invoke:61, FilterNode (org.apache.dubbo.rpc.protocol)
    invoke:52, FutureFilter (org.apache.dubbo.rpc.protocol.dubbo.filter)
    invoke:61, FilterNode (org.apache.dubbo.rpc.protocol)
    invoke:69, ConsumerContextFilter (org.apache.dubbo.rpc.filter)
    invoke:61, FilterNode (org.apache.dubbo.rpc.protocol)
    invoke:56, InvokerWrapper (org.apache.dubbo.rpc.protocol)
    doInvoke:79, FailoverClusterInvoker (org.apache.dubbo.rpc.cluster.support)
    invoke:265, AbstractClusterInvoker (org.apache.dubbo.rpc.cluster.support)
    intercept:47, ClusterInterceptor (org.apache.dubbo.rpc.cluster.interceptor)
    invoke:92, AbstractCluster$InterceptorInvokerNode (org.apache.dubbo.rpc.cluster.support.wrapper)
    invoke:93, MockClusterInvoker (org.apache.dubbo.rpc.cluster.support.wrapper)
    invoke:170, MigrationInvoker (org.apache.dubbo.registry.client.migration)
    invoke:96, InvokerInvocationHandler (org.apache.dubbo.rpc.proxy)
    testRpc:-1, proxy1 (org.apache.dubbo.common.bytecode)
    testRpc:30, ThirdDubboManagerImpl (com.patpat.mms.mdp.biz.engine.dependencies.api.impl)
    testRpc:41, PushTest (com.patpat.mms.mdp.biz.engine.rest)
    invoke0:-2, NativeMethodAccessorImpl (jdk.internal.reflect)
    invoke:62, NativeMethodAccessorImpl (jdk.internal.reflect)
    invoke:43, DelegatingMethodAccessorImpl (jdk.internal.reflect)
    invoke:566, Method (java.lang.reflect)
    runReflectiveCall:59, FrameworkMethod$1 (org.junit.runners.model)
    run:12, ReflectiveCallable (org.junit.internal.runners.model)
    invokeExplosively:56, FrameworkMethod (org.junit.runners.model)
    evaluate:17, InvokeMethod (org.junit.internal.runners.statements)
    evaluate:74, RunBeforeTestExecutionCallbacks (org.springframework.test.context.junit4.statements)
    evaluate:84, RunAfterTestExecutionCallbacks (org.springframework.test.context.junit4.statements)
    evaluate:75, RunBeforeTestMethodCallbacks (org.springframework.test.context.junit4.statements)
    evaluate:86, RunAfterTestMethodCallbacks (org.springframework.test.context.junit4.statements)
    evaluate:84, SpringRepeat (org.springframework.test.context.junit4.statements)
    runLeaf:366, ParentRunner (org.junit.runners)
    runChild:251, SpringJUnit4ClassRunner (org.springframework.test.context.junit4)
    runChild:97, SpringJUnit4ClassRunner (org.springframework.test.context.junit4)
    run:331, ParentRunner$4 (org.junit.runners)
    schedule:79, ParentRunner$1 (org.junit.runners)
    runChildren:329, ParentRunner (org.junit.runners)
    access$100:66, ParentRunner (org.junit.runners)
    evaluate:293, ParentRunner$2 (org.junit.runners)
    evaluate:61, RunBeforeTestClassCallbacks (org.springframework.test.context.junit4.statements)
    evaluate:70, RunAfterTestClassCallbacks (org.springframework.test.context.junit4.statements)
    evaluate:306, ParentRunner$3 (org.junit.runners)
    run:413, ParentRunner (org.junit.runners)
    run:190, SpringJUnit4ClassRunner (org.springframework.test.context.junit4)
    run:137, JUnitCore (org.junit.runner)
    startRunnerWithArgs:69, JUnit4IdeaTestRunner (com.intellij.junit4)
    execute:38, IdeaTestRunner$Repeater$1 (com.intellij.rt.junit)
    repeat:11, TestsRepeater (com.intellij.rt.execution.junit)
    startRunnerWithArgs:35, IdeaTestRunner$Repeater (com.intellij.rt.junit)
    prepareStreamsAndStart:235, JUnitStarter (com.intellij.rt.junit)
    main:54, JUnitStarter (com.intellij.rt.junit)
    + + +

    相关阅读

    + + +
    + + + + +
    +
    +
    +
    + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +
    1
    2
    3
    4
    5
    A ClassCastException is thrown if a cast is found at run time to be impermissible.
    Some casts result in an error at compile time. Some casts can be proven, at compile time,
    always to be correct at run time. For example, it is always correct to convert a value of a
    class type to the type of its superclass; such a cast should require no special action at run
    time. Finally, some casts cannot be proven to be either always correct or always i
    + +

    相关阅读:

    + + + +
    + + + + +
    +
    +
    +
    + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    文法介绍

    文法 (Grammar) 由四部分组成:

    +

    : 描述的是终结符

    +

    : 描述的是非终结符

    +

    P : 产生式

    +

    S : 开始的产生式

    +

    lr(0)

    right : terminal and notermial
    left : noterminal
    procduction: left and right
    configuration: production whith dot
    successor: set of configuration

    +

    build configure set

    the problem is that how to build the configure set :

    +

    the configure set is combine with two set : basic set and closure set

    +
      +
    • basic set

      +
    • +
    • closure set

      +
    • +
    +
    1
    2
     The basis set consists of all configurations in S having a marker before an s, but with the
    marker moved to follow the s;
    + +

    closure set : {A -> .w | A->w is production}

    +

    lr(0)

    lr(0) 特别在于状态机:

    +
      +
    • reduce state : 转换函数的transition 要么 (一个底部 + 0到1个非终结符)
    • +
    • read state : 转换函数的transition 都是终结符
    • +
    +

    lr(0) 算法

    lr(0) 描述的需要注意以下几个内容:

    +
      +
    • stack : stack 存了两个内容.一个是state 还有一个是 也就是终结符和非终结符的并集
    • +
    +

    冲突

    移入-规约冲突

    +
      +
    • 原因: 存在产生式P1P2 , P1右部是产生式P2右部的前缀
    • +
    +

    规约-规约冲突:

    +
      +
    • 原因: 存在产生式P1P2,P1 右部和P2右部有公共后缀
    • +
    +

    如何解决冲突

    移入-规约冲突解决:
    FOLLOW(P)没有交集

    +

    规约-规约冲突解决:
    FIRST(t) 没有交集

    +

    为了解决冲突,我们引入了slr(1),改进了lr(0)的遇到冲突的时候无法解决的问题

    +

    SLR(1)

    相关阅读

    + + +
    + + + + +
    +
    +
    +
    + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    前言

    jdk 某个版本之后是自举的,所以编译jdk之前先有一个jdk。
    主要参照https://openjdk.java.net/groups/build/doc/building.html

    +
    下载jdk
    字节码

    src\hotspot\share\interpreter\bytecodes.cpp

    +

    BytecodeInterpreter::run

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    (gdb) bt
    #0 TemplateTable::if_icmp (cc=4294967295) at /home/ubuntu/jdk/src/hotspot/cpu/x86/templateTable_x86.cpp:2381
    #1 0x00007ffff70a9519 in Template::generate (this=0x7ffff7d60be0 <TemplateTable::_template_table+5088>, masm=0x7ffff0019760) at /home/ubuntu/jdk/src/hotspot/share/interpreter/templateTable.cpp:63
    #2 0x00007ffff709bde8 in TemplateInterpreterGenerator::generate_and_dispatch (this=0x7ffff5c5da40, t=0x7ffff7d60be0 <TemplateTable::_template_table+5088>, tos_out=vtos)
    at /home/ubuntu/jdk/src/hotspot/share/interpreter/templateInterpreterGenerator.cpp:392
    #3 0x00007ffff709b934 in TemplateInterpreterGenerator::set_short_entry_points (this=0x7ffff5c5da40, t=0x7ffff7d60be0 <TemplateTable::_template_table+5088>,
    bep=@0x7ffff5c5d398: 0x7fffe1008a37 "H\211d$\330H\201", <incomplete sequence \354\200>, cep=@0x7ffff5c5d3a0: 0x7fffe1008a37 "H\211d$\330H\201", <incomplete sequence \354\200>,
    sep=@0x7ffff5c5d3a8: 0x7fffe1008a37 "H\211d$\330H\201", <incomplete sequence \354\200>, aep=@0x7ffff5c5d3b0: 0x7fffe1008a37 "H\211d$\330H\201", <incomplete sequence \354\200>,
    iep=@0x7ffff5c5d3b8: 0x7fffe1028807 "PSQRH\213M\330H\205\311\017\204", <incomplete sequence \312>, lep=@0x7ffff5c5d3c0: 0x7fffe1008a37 "H\211d$\330H\201", <incomplete sequence \354\200>,
    fep=@0x7ffff5c5d3c8: 0x7fffe1008a37 "H\211d$\330H\201", <incomplete sequence \354\200>, dep=@0x7ffff5c5d3d0: 0x7fffe1008a37 "H\211d$\330H\201", <incomplete sequence \354\200>,
    vep=@0x7ffff5c5d3d8: 0x7fffe1028800 "\213\004$H\203\304\bPSQRH\213M\330H\205\311\017\204", <incomplete sequence \312>) at /home/ubuntu/jdk/src/hotspot/share/interpreter/templateInterpreterGenerator.cpp:356
    #4 0x00007ffff709b46c in TemplateInterpreterGenerator::set_entry_points (this=0x7ffff5c5da40, code=Bytecodes::_if_icmpeq) at /home/ubuntu/jdk/src/hotspot/share/interpreter/templateInterpreterGenerator.cpp:325
    #5 0x00007ffff709b06d in TemplateInterpreterGenerator::set_entry_points_for_all_bytes (this=0x7ffff5c5da40) at /home/ubuntu/jdk/src/hotspot/share/interpreter/templateInterpreterGenerator.cpp:281
    #6 0x00007ffff709ac13 in TemplateInterpreterGenerator::generate_all (this=0x7ffff5c5da40) at /home/ubuntu/jdk/src/hotspot/share/interpreter/templateInterpreterGenerator.cpp:234
    #7 0x00007ffff70993cb in TemplateInterpreterGenerator::TemplateInterpreterGenerator (this=0x7ffff5c5da40, _code=0x7ffff00a28b0) at /home/ubuntu/jdk/src/hotspot/share/interpreter/templateInterpreterGenerator.cpp:42
    #8 0x00007ffff7097e5b in TemplateInterpreter::initialize () at /home/ubuntu/jdk/src/hotspot/share/interpreter/templateInterpreter.cpp:56
    #9 0x00007ffff69e06a2 in interpreter_init () at /home/ubuntu/jdk/src/hotspot/share/interpreter/interpreter.cpp:116
    #10 0x00007ffff69bbe73 in init_globals () at /home/ubuntu/jdk/src/hotspot/share/runtime/init.cpp:119
    #11 0x00007ffff70d59b7 in Threads::create_vm (args=0x7ffff5c5de20, canTryAgain=0x7ffff5c5dd2b) at /home/ubuntu/jdk/src/hotspot/share/runtime/thread.cpp:3728
    #12 0x00007ffff6adcd5d in JNI_CreateJavaVM_inner (vm=0x7ffff5c5de78, penv=0x7ffff5c5de80, args=0x7ffff5c5de20) at /home/ubuntu/jdk/src/hotspot/share/prims/jni.cpp:3945
    #13 0x00007ffff6add0b1 in JNI_CreateJavaVM (vm=0x7ffff5c5de78, penv=0x7ffff5c5de80, args=0x7ffff5c5de20) at /home/ubuntu/jdk/src/hotspot/share/prims/jni.cpp:4036
    #14 0x00007ffff7fba88c in InitializeJVM (pvm=0x7ffff5c5de78, penv=0x7ffff5c5de80, ifn=0x7ffff5c5ded0) at /home/ubuntu/jdk/src/java.base/share/native/libjli/java.c:1527
    #15 0x00007ffff7fb7447 in JavaMain (_args=0x7fffffffb040) at /home/ubuntu/jdk/src/java.base/share/native/libjli/java.c:414
    #16 0x00007ffff7d7a609 in start_thread (arg=<optimized out>) at pthread_create.c:477
    #17 0x00007ffff7ed8293 in clone () at ../sysdeps/unix/sysv/linux/x86_64/clone.S:95
    + +

    类加载

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    38
    39
    Thread 2 "java" hit Breakpoint 3, SystemDictionary::load_instance_class (class_name=0x7fffcc2d90f0, class_loader=..., __the_thread__=0x7ffff001b800)
    at /home/dinosaur/jdk12/jdk-jdk-12-25/src/hotspot/share/classfile/systemDictionary.cpp:1386
    1386 InstanceKlass* SystemDictionary::load_instance_class(Symbol* class_name, Handle class_loader, TRAPS) {
    (gdb) bt
    #0 SystemDictionary::load_instance_class (class_name=0x7fffcc2d90f0, class_loader=..., __the_thread__=0x7ffff001b800)
    at /home/dinosaur/jdk12/jdk-jdk-12-25/src/hotspot/share/classfile/systemDictionary.cpp:1386
    #1 0x00007ffff61bf8ab in SystemDictionary::resolve_instance_class_or_null (name=0x7fffcc2d90f0, class_loader=..., protection_domain=..., __the_thread__=0x7ffff001b800)
    at /home/dinosaur/jdk12/jdk-jdk-12-25/src/hotspot/share/classfile/systemDictionary.cpp:854
    #2 0x00007ffff61bdcf8 in SystemDictionary::resolve_instance_class_or_null_helper (class_name=0x7fffcc2d90f0, class_loader=..., protection_domain=..., __the_thread__=0x7ffff001b800)
    at /home/dinosaur/jdk12/jdk-jdk-12-25/src/hotspot/share/classfile/systemDictionary.cpp:272
    #3 0x00007ffff61bdb5e in SystemDictionary::resolve_or_null (class_name=0x7fffcc2d90f0, class_loader=..., protection_domain=..., __the_thread__=0x7ffff001b800)
    at /home/dinosaur/jdk12/jdk-jdk-12-25/src/hotspot/share/classfile/systemDictionary.cpp:255
    #4 0x00007ffff61bd7d1 in SystemDictionary::resolve_or_fail (class_name=0x7fffcc2d90f0, class_loader=..., protection_domain=..., throw_error=true, __the_thread__=0x7ffff001b800)
    at /home/dinosaur/jdk12/jdk-jdk-12-25/src/hotspot/share/classfile/systemDictionary.cpp:203
    #5 0x00007ffff61bdae8 in SystemDictionary::resolve_or_fail (class_name=0x7fffcc2d90f0, throw_error=true, __the_thread__=0x7ffff001b800)
    at /home/dinosaur/jdk12/jdk-jdk-12-25/src/hotspot/share/classfile/systemDictionary.cpp:245
    #6 0x00007ffff61c3490 in SystemDictionary::resolve_wk_klass (id=SystemDictionary::Object_klass_knum, __the_thread__=0x7ffff001b800)
    at /home/dinosaur/jdk12/jdk-jdk-12-25/src/hotspot/share/classfile/systemDictionary.cpp:1938
    #7 0x00007ffff61c35c5 in SystemDictionary::resolve_wk_klasses_until (limit_id=SystemDictionary::Cloneable_klass_knum, start_id=@0x7ffff7fbc974: SystemDictionary::Object_klass_knum,
    __the_thread__=0x7ffff001b800) at /home/dinosaur/jdk12/jdk-jdk-12-25/src/hotspot/share/classfile/systemDictionary.cpp:1948
    #8 0x00007ffff5c5c46d in SystemDictionary::resolve_wk_klasses_through (end_id=SystemDictionary::Class_klass_knum, start_id=@0x7ffff7fbc974: SystemDictionary::Object_klass_knum,
    __the_thread__=0x7ffff001b800) at /home/dinosaur/jdk12/jdk-jdk-12-25/src/hotspot/share/classfile/systemDictionary.hpp:391
    #9 0x00007ffff61c37a3 in SystemDictionary::resolve_well_known_classes (__the_thread__=0x7ffff001b800) at /home/dinosaur/jdk12/jdk-jdk-12-25/src/hotspot/share/classfile/systemDictionary.cpp:1991
    #10 0x00007ffff61c32d8 in SystemDictionary::initialize (__the_thread__=0x7ffff001b800) at /home/dinosaur/jdk12/jdk-jdk-12-25/src/hotspot/share/classfile/systemDictionary.cpp:1898
    #11 0x00007ffff623b65c in Universe::genesis (__the_thread__=0x7ffff001b800) at /home/dinosaur/jdk12/jdk-jdk-12-25/src/hotspot/share/memory/universe.cpp:329
    #12 0x00007ffff623dd21 in universe2_init () at /home/dinosaur/jdk12/jdk-jdk-12-25/src/hotspot/share/memory/universe.cpp:953
    #13 0x00007ffff5a575e7 in init_globals () at /home/dinosaur/jdk12/jdk-jdk-12-25/src/hotspot/share/runtime/init.cpp:125
    #14 0x00007ffff620ecbe in Threads::create_vm (args=0x7ffff7fbce20, canTryAgain=0x7ffff7fbcd2b) at /home/dinosaur/jdk12/jdk-jdk-12-25/src/hotspot/share/runtime/thread.cpp:3731
    #15 0x00007ffff5b6b598 in JNI_CreateJavaVM_inner (vm=0x7ffff7fbce78, penv=0x7ffff7fbce80, args=0x7ffff7fbce20) at /home/dinosaur/jdk12/jdk-jdk-12-25/src/hotspot/share/prims/jni.cpp:3935
    #16 0x00007ffff5b6b8c2 in JNI_CreateJavaVM (vm=0x7ffff7fbce78, penv=0x7ffff7fbce80, args=0x7ffff7fbce20) at /home/dinosaur/jdk12/jdk-jdk-12-25/src/hotspot/share/prims/jni.cpp:4021
    #17 0x00007ffff7bc6601 in InitializeJVM (pvm=0x7ffff7fbce78, penv=0x7ffff7fbce80, ifn=0x7ffff7fbced0) at /home/dinosaur/jdk12/jdk-jdk-12-25/src/java.base/share/native/libjli/java.c:1529
    #18 0x00007ffff7bc320c in JavaMain (_args=0x7fffffffaa30) at /home/dinosaur/jdk12/jdk-jdk-12-25/src/java.base/share/native/libjli/java.c:414
    #19 0x00007ffff71956db in start_thread (arg=0x7ffff7fbd700) at pthread_create.c:463
    #20 0x00007ffff78ef61f in clone () at ../sysdeps/unix/sysv/linux/x86_64/clone.S:95


    (gdb) p class_name->_body@10
    $14 = {"ja", "va", "/l", "an", "g/", "Ob", "je", "ct", "\361", <incomplete sequence \361>, "\377\377"}

    + + +

    解析文件流

    1
    ClassFileParser::parse_stream
    + +

    find_transitive_override

    重载

    +
    1
    2
    find_transitive_override
    update_inherited_vtable
    + + +

    签名

    1
    Method::name_and_sig_as_C_string
    + +

    相关阅读

    + + +
    + + + + +
    +
    +
    +
    + + + + + + + + + + +
    + + + + +
    + + + + + + + + +
    +
    + +
    + +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/page/14/index.html b/page/14/index.html new file mode 100644 index 0000000000..346a1965e2 --- /dev/null +++ b/page/14/index.html @@ -0,0 +1,1332 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + dinosaur + + + + + + + + + + + + +
    +
    + +
    +
    + + + + + +
    + + + + + + + + +
    + +
    + +
    +
    + + +
    + + 0% +
    + + +
    +
    +
    + + +
    + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    1 安装

    1
    2
    3
    4
    5
    git clone https://github.com/RoaringBitmap/CRoaring.git
    cd CRoaring/
    cd build/
    cmake -DCMAKE_BUILD_TYPE=Debug ..
    make -j4
    +

    然后就可以了

    + + +
    + + + + +
    +
    +
    +
    + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    bitmap 在某个长度之后会占用内存比array小,利用这个特性,可以将数据的存储压缩成bitmap存储.

    +

    背景

    当我们有多个数字的数组,我们可以用多种方式描述一个数字.

    +
    1
    array = [1 , 2 , 3 ,5,7]     (1)
    +

    方案1 , 直接使用数组

    假设每个数字是一个Uint32 ,也就是4字节的数字.

    +

    那么上面例子(1) 中占用的字节数:5*4 = 20 字节 , 如果我们要存越大的数据需要的内存越多.我们占用的内存是线性的

    +
    1
    memory = array.size() * 4
    +

    优点:

    +
      +
    • 有多少内存就可以存多少数据
      缺点:
    • +
    • 占用内存是线性的
    • +
    +

    方案2 , 直接使用4个字节的的位图

    bitmap

    +
    1
    2
    3
    4
    5
    6
    7
    uint32 num = 0 ; 
    num |= num << 1 ;
    num |= num << 2 ;
    num |= num << 3 ;
    num |= num << 5 ;
    num |= num << 7 ;

    +

    那么可以存多少个数字呢?
    4*8 = 32 也就是可以存32个数字

    +

    优点:

    +
      +
    • 占用内存是O(1) , 存储数量不随着数据变大而变大
    • +
    +

    缺点

    +
      +
    • 用4个字节的位图最多可以描述一个32个Uint的数字
    • +
    +

    roaring bitmap

    roaring bitmap

    +

    更进一步,我们要存2^32个数

      +
    • 只使用bitmap
      如果要用bitmap来存,我们要用 2^32 / 8 = 2^29 byte = 256m

      +
    • +
    • 只使用array
      需要的内存: 4*array.length byte

      +
    • +
    +

    bitmap和array的区别: bitmap会固定占用的内存,array则是动态占用内存。
    上面的例子(存储4字节长度的数字数组),bitmap会固定占用256m,而array则动态长度。在数组比较小的时候,使用array比较好,在数组长度比较大的时候,则使用bitmap比较好。

    +

    核心公式: number_length * n * 8 = 2^n
    这里解释一下公式长度: number_length 描述的是一个数字的字节长度,比如要存储的是uint32 , 则number_length = 4 , 如果要存储uint64 ,则number_length = 8

    +

    4 *n*8 = 2^n 的解是16

    +

    所以用16个字节来描述一个联合体:{bitmap , array} , 当数组数量小于16的时候使用array 存储,当数量大于等于16的时候使用bitmap描述

    +

    roaring bitmap 容器的运算

    参考相关代码:

    +
    1
    CRoaring/include/roaring/containers/containers.h
    + +

    相关阅读:

    + + + +
    + + + + +
    +
    +
    +
    + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    A lease is a contract that gives its holder specified rights
    over property for a limited period of time. In the context
    of caching, a lease grants to its holder control over writes
    to the covered datum during the term of the lease, such that
    the server must obtain the approval of the leaseholder before the datum may be written. When a leaseholder grants
    approval for a write, it invalidates its local copy of the da
    租约是一个租约持有人在一定时间内有特别的权限的合约.
    在缓存这个场景下,租约保证他的持有人在租约期限内有写的权限 , 所以当服务器

    +

    相关阅读

      +
    • Leases: An Efficient Fault-Tolerant Mechanism for Distributed File Cache Consistency
    • +
    + + +
    + + + + +
    +
    +
    +
    + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +
    1
    rabbitmq-server/deps/rabbit/src/rabbit_msg_store.erl
    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    38
    39
    40
    41
    42
    43
    44
    45
    46
    47
    48
    49
    50
    51
    52
    53
    54
    55
    56
    57
    58
    59
    60
    61
    62
    63
    64
    65
    66
    67
    68
    69
    70
    71
    72
    73
    74
    75
    76
    77
    78
    79
    80
    81
    82
    83
    84
    85
    86
    87
    88
    89
    90
    91
    92
    93
    94
    95
    96
    97
    98
    99
    100
    101
    102
    103
    104
    105
    106
    107
    108
    109
    110
    111
    112
    113
    114
    115
    116
    117
    118
    119
    120
    121
    122
    123
    124
    125
    126
    127
    128
    129
    130
    131
    132
    133
    134
    135
    136
    137
    138
    139
    140
    141
    142
    143
    144
    145
    146
    147
    148
    149
    150
    151
    152
    153
    154
    155
    156
    157
    158
    159
    160
    161
    162
    163
    164
    165
    166
    167
    168
    169
    170
    171
    172
    173
    174
    175
    176
    177
    178
    179
    180
    181
    182
    183
    184
    185
    186
    187
    188
    189
    190
    191
    192
    193
    194
    195
    196
    197
    198
    199
    200
    201
    202
    203
    204
    205
    206
    207
    208
    209
    210
    211
    212
    213
    214
    215
    216
    217
    218
    219
    220
    221
    222
    223
    224
    225
    226
    227
    228
    229
    230
    231
    232
    233
    234
    235
    236
    237
    238
    239
    240
    241
    242
    243
    244
    245
    246
    247
    248
    249
    250
    251
    252
    253
    254
    255
    256
    257
    258
    259
    260
    261
    262
    263
    264
    265
    266
    267
    268
    269
    270
    271
    272
    273
    274
    275
    276
    277

    %% Message store is responsible for storing messages
    %% on disk and loading them back. The store handles both
    %% persistent messages and transient ones (when a node
    %% is under RAM pressure and needs to page messages out
    %% to disk). The store is responsible for locating messages
    %% on disk and maintaining an index.
    消息存储是响应式地存储在硬盘或者把他们从硬盘加载到内存.存储的例程回调包括持久化的消息和非持久化的消息(当使用内存高出一定阈值,会把消息调入到硬盘).
    这个存储模块会返回消息在硬盘的偏移,以及维持这个消息到硬盘的映射的索引.

    %% There are two message stores per node: one for transient
    %% and one for persistent messages.
    每个节点有两种消息:
    一个是对易失的消息 , 一个是持久化的消息
    %%
    %% Queue processes interact with the stores via clients.
    队列进程和存储模块通过客户端交互
    %% The components:
    %%
    %% Index: this is a mapping from MsgId to #msg_location{}.
    %% By default, it's in ETS, but other implementations can
    %% be used.
    %% FileSummary: this maps File to #file_summary{} and is stored
    %% in ETS.
    包括两个组件:


    %% The basic idea is that messages are appended to the current file up
    %% until that file becomes too big (> file_size_limit). At that point,
    %% the file is closed and a new file is created on the _right_ of the
    %% old file which is used for new messages. Files are named
    %% numerically ascending, thus the file with the lowest name is the
    %% eldest file.
    基本的思路是将消息加入到文件里面,直到文件变得足够大。在这个时候,会将文件关闭,
    然后创建一个新的文件添加到旧有文件的右边。文件名会升序命名,因此文件名数字比较
    小的就是比较旧的文件。
    %% We need to keep track of which messages are in which files (this is
    %% the index); how much useful data is in each file and which files
    %% are on the left and right of each other. This is the purpose of the
    %% file summary ETS table.
    我们需要确定消息在哪个文件(这就是一个索引的功能); 还有多少数据是有效在每个文
    件以及每个文件的排序.这个的目的是为了确定表的统计.
    %% As messages are removed from files, holes appear in these
    %% files. The field ValidTotalSize contains the total amount of useful
    %% data left in the file. This is needed for garbage collection.
    当消息被从文件中移除,文件会出现空洞.ValidTotalSize字段会比有用的数据小,这需
    要垃圾回收.
    %% When we discover that a file is now empty, we delete it. When we
    %% discover that it can be combined with the useful data in either its
    %% left or right neighbour, and overall, across all the files, we have
    %% ((the amount of garbage) / (the sum of all file sizes)) >
    %% ?GARBAGE_FRACTION, we start a garbage collection run concurrently,
    %% which will compact the two files together.
    当发现文件是空,我们会删除他.当我们发现文件可以和自己的左右邻居合并
    当garbage 数量与所有文件大
    小的比例超过一定阈值,会开始垃圾回收

    This keeps disk
    %% utilisation high and aids performance. We deliberately do this
    %% lazily in order to prevent doing GC on files which are soon to be
    %% emptied (and hence deleted).

    %% Given the compaction between two files, the left file (i.e. elder
    %% file) is considered the ultimate destination for the good data in
    %% the right file. If necessary, the good data in the left file which
    %% is fragmented throughout the file is written out to a temporary
    %% file, then read back in to form a contiguous chunk of good data at
    %% the start of the left file. Thus the left file is garbage collected
    %% and compacted. Then the good data from the right file is copied
    %% onto the end of the left file. Index and file summary tables are
    %% updated.
    %%
    %% On non-clean startup, we scan the files we discover, dealing with
    %% the possibilities of a crash having occurred during a compaction
    %% (this consists of tidyup - the compaction is deliberately designed
    %% such that data is duplicated on disk rather than risking it being
    %% lost), and rebuild the file summary and index ETS table.
    %%
    %% So, with this design, messages move to the left. Eventually, they
    %% should end up in a contiguous block on the left and are then never
    %% rewritten. But this isn't quite the case. If in a file there is one
    %% message that is being ignored, for some reason, and messages in the
    %% file to the right and in the current block are being read all the
    %% time then it will repeatedly be the case that the good data from
    %% both files can be combined and will be written out to a new
    %% file. Whenever this happens, our shunned message will be rewritten.
    %%
    %% So, provided that we combine messages in the right order,
    %% (i.e. left file, bottom to top, right file, bottom to top),
    %% eventually our shunned message will end up at the bottom of the
    %% left file. The compaction/combining algorithm is smart enough to
    %% read in good data from the left file that is scattered throughout
    %% (i.e. C and D in the below diagram), then truncate the file to just
    %% above B (i.e. truncate to the limit of the good contiguous region
    %% at the start of the file), then write C and D on top and then write
    %% E, F and G from the right file on top. Thus contiguous blocks of
    %% good data at the bottom of files are not rewritten.
    %%
    %% +-------+ +-------+ +-------+
    %% | X | | G | | G |
    %% +-------+ +-------+ +-------+
    %% | D | | X | | F |
    %% +-------+ +-------+ +-------+
    %% | X | | X | | E |
    %% +-------+ +-------+ +-------+
    %% | C | | F | ===> | D |
    %% +-------+ +-------+ +-------+
    %% | X | | X | | C |
    %% +-------+ +-------+ +-------+
    %% | B | | X | | B |
    %% +-------+ +-------+ +-------+
    %% | A | | E | | A |
    %% +-------+ +-------+ +-------+
    %% left right left
    %%
    %% From this reasoning, we do have a bound on the number of times the
    %% message is rewritten. From when it is inserted, there can be no
    %% files inserted between it and the head of the queue, and the worst
    %% case is that every time it is rewritten, it moves one position lower
    %% in the file (for it to stay at the same position requires that
    %% there are no holes beneath it, which means truncate would be used
    %% and so it would not be rewritten at all). Thus this seems to
    %% suggest the limit is the number of messages ahead of it in the
    %% queue, though it's likely that that's pessimistic, given the
    %% requirements for compaction/combination of files.
    %%
    %% The other property that we have is the bound on the lowest
    %% utilisation, which should be 50% - worst case is that all files are
    %% fractionally over half full and can't be combined (equivalent is
    %% alternating full files and files with only one tiny message in
    %% them).
    %%
    %% Messages are reference-counted. When a message with the same msg id
    %% is written several times we only store it once, and only remove it
    %% from the store when it has been removed the same number of times.
    %%
    %% The reference counts do not persist. Therefore the initialisation
    %% function must be provided with a generator that produces ref count
    %% deltas for all recovered messages. This is only used on startup
    %% when the shutdown was non-clean.
    %%
    %% Read messages with a reference count greater than one are entered
    %% into a message cache. The purpose of the cache is not especially
    %% performance, though it can help there too, but prevention of memory
    %% explosion. It ensures that as messages with a high reference count
    %% are read from several processes they are read back as the same
    %% binary object rather than multiples of identical binary
    %% objects.
    %%
    %% Reads can be performed directly by clients without calling to the
    %% server. This is safe because multiple file handles can be used to
    %% read files. However, locking is used by the concurrent GC to make
    %% sure that reads are not attempted from files which are in the
    %% process of being garbage collected.
    %%
    %% When a message is removed, its reference count is decremented. Even
    %% if the reference count becomes 0, its entry is not removed. This is
    %% because in the event of the same message being sent to several
    %% different queues, there is the possibility of one queue writing and
    %% removing the message before other queues write it at all. Thus
    %% accommodating 0-reference counts allows us to avoid unnecessary
    %% writes here. Of course, there are complications: the file to which
    %% the message has already been written could be locked pending
    %% deletion or GC, which means we have to rewrite the message as the
    %% original copy will now be lost.
    %%
    %% The server automatically defers reads, removes and contains calls
    %% that occur which refer to files which are currently being
    %% GC'd. Contains calls are only deferred in order to ensure they do
    %% not overtake removes.
    %%
    %% The current file to which messages are being written has a
    %% write-back cache. This is written to immediately by clients and can
    %% be read from by clients too. This means that there are only ever
    %% writes made to the current file, thus eliminating delays due to
    %% flushing write buffers in order to be able to safely read from the
    %% current file. The one exception to this is that on start up, the
    %% cache is not populated with msgs found in the current file, and
    %% thus in this case only, reads may have to come from the file
    %% itself. The effect of this is that even if the msg_store process is
    %% heavily overloaded, clients can still write and read messages with
    %% very low latency and not block at all.
    %%
    %% Clients of the msg_store are required to register before using the
    %% msg_store. This provides them with the necessary client-side state
    %% to allow them to directly access the various caches and files. When
    %% they terminate, they should deregister. They can do this by calling
    %% either client_terminate/1 or client_delete_and_terminate/1. The
    %% differences are: (a) client_terminate is synchronous. As a result,
    %% if the msg_store is badly overloaded and has lots of in-flight
    %% writes and removes to process, this will take some time to
    %% return. However, once it does return, you can be sure that all the
    %% actions you've issued to the msg_store have been processed. (b) Not
    %% only is client_delete_and_terminate/1 asynchronous, but it also
    %% permits writes and subsequent removes from the current
    %% (terminating) client which are still in flight to be safely
    %% ignored. Thus from the point of view of the msg_store itself, and
    %% all from the same client:
    %%
    %% (T) = termination; (WN) = write of msg N; (RN) = remove of msg N
    %% --> W1, W2, W1, R1, T, W3, R2, W2, R1, R2, R3, W4 -->
    %%
    %% The client obviously sent T after all the other messages (up to
    %% W4), but because the msg_store prioritises messages, the T can be
    %% promoted and thus received early.
    %%
    %% Thus at the point of the msg_store receiving T, we have messages 1
    %% and 2 with a refcount of 1. After T, W3 will be ignored because
    %% it's an unknown message, as will R3, and W4. W2, R1 and R2 won't be
    %% ignored because the messages that they refer to were already known
    %% to the msg_store prior to T. However, it can be a little more
    %% complex: after the first R2, the refcount of msg 2 is 0. At that
    %% point, if a GC occurs or file deletion, msg 2 could vanish, which
    %% would then mean that the subsequent W2 and R2 are then ignored.
    %%
    %% The use case then for client_delete_and_terminate/1 is if the
    %% client wishes to remove everything it's written to the msg_store:
    %% it issues removes for all messages it's written and not removed,
    %% and then calls client_delete_and_terminate/1. At that point, any
    %% in-flight writes (and subsequent removes) can be ignored, but
    %% removes and writes for messages the msg_store already knows about
    %% will continue to be processed normally (which will normally just
    %% involve modifying the reference count, which is fast). Thus we save
    %% disk bandwidth for writes which are going to be immediately removed
    %% again by the the terminating client.
    %%
    %% We use a separate set to keep track of the dying clients in order
    %% to keep that set, which is inspected on every write and remove, as
    %% small as possible. Inspecting the set of all clients would degrade
    %% performance with many healthy clients and few, if any, dying
    %% clients, which is the typical case.
    %%
    %% Client termination messages are stored in a separate ets index to
    %% avoid filling primary message store index and message files with
    %% client termination messages.
    %%
    %% When the msg_store has a backlog (i.e. it has unprocessed messages
    %% in its mailbox / gen_server priority queue), a further optimisation
    %% opportunity arises: we can eliminate pairs of 'write' and 'remove'
    %% from the same client for the same message. A typical occurrence of
    %% these is when an empty durable queue delivers persistent messages
    %% to ack'ing consumers. The queue will asynchronously ask the
    %% msg_store to 'write' such messages, and when they are acknowledged
    %% it will issue a 'remove'. That 'remove' may be issued before the
    %% msg_store has processed the 'write'. There is then no point going
    %% ahead with the processing of that 'write'.
    %%
    %% To detect this situation a 'flying_ets' table is shared between the
    %% clients and the server. The table is keyed on the combination of
    %% client (reference) and msg id, and the value represents an
    %% integration of all the writes and removes currently "in flight" for
    %% that message between the client and server - '+1' means all the
    %% writes/removes add up to a single 'write', '-1' to a 'remove', and
    %% '0' to nothing. (NB: the integration can never add up to more than
    %% one 'write' or 'read' since clients must not write/remove a message
    %% more than once without first removing/writing it).
    %%
    %% Maintaining this table poses two challenges: 1) both the clients
    %% and the server access and update the table, which causes
    %% concurrency issues, 2) we must ensure that entries do not stay in
    %% the table forever, since that would constitute a memory leak. We
    %% address the former by carefully modelling all operations as
    %% sequences of atomic actions that produce valid results in all
    %% possible interleavings. We address the latter by deleting table
    %% entries whenever the server finds a 0-valued entry during the
    %% processing of a write/remove. 0 is essentially equivalent to "no
    %% entry". If, OTOH, the value is non-zero we know there is at least
    %% one other 'write' or 'remove' in flight, so we get an opportunity
    %% later to delete the table entry when processing these.
    %%
    %% There are two further complications. We need to ensure that 1)
    %% eliminated writes still get confirmed, and 2) the write-back cache
    %% doesn't grow unbounded. These are quite straightforward to
    %% address. See the comments in the code.
    %%
    %% For notes on Clean Shutdown and startup, see documentation in
    %% rabbit_variable_queue.
    + + +
    + + + + +
    +
    +
    +
    + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    为什么我们需要心跳

    tcp 靠的是什么保证链接?

    +

    序列号和重传,这是传输层的事情,但是对于应用层来说,是感知不到对端断开的,所以需要应用层的心跳.

    +

    php的心跳有什么问题?

    php大部分都是单进程模型,所以没有一个额外的线程去定时给这个tcp链接发一个心跳包,导致一旦运行比较长的时间(心跳时间*2),对端的rabbitmq会断开连接

    +

    所以大部分场景我们需要保证我们的运行时间小于心跳时间 , 不然会有pipe broken的问题,其实这个问题一般是超过心跳时间,导致rabbitmq 手动断开tcp连接了

    + + +
    + + + + +
    +
    +
    +
    + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    lucene 分为两部分:

    +
      +
    • 写入
      写入则是写入文件系统

      +
    • +
    • 查询
      则是通过了 分词、排序、topk提取等过程,获取对应的docid,再通过docid 回查对应的内容

      +
    • +
    +

    Vint

    vint 是一个可变长的数组,是一个小端的变长数组,每个字节最高位置1代表后面还有(也就是最后一个字节的最高位是0)

    +

    相关代码

    1
    2
    IndexWriterConfig iwc = new IndexWriterConfig(analyzer);
    iwc.setUseCompoundFile(false); // 生成多个文件
    + +

    开始debug

    1
    2
    3
    4
    5
    6
    ### 调试java 代码

    java -agentlib:jdwp=transport=dt_socket,server=y,address=8000 -cp ./lucene-demo-9.1.0-SNAPSHOT.jar:/home/ubuntu/lucene-9.1.0/lucene/core/build/libs/lucene-core-9.1.0-SNAPSHOT.jar:/home/ubuntu/lucene-9.1.0/lucene/queryparser/build/libs/lucene-queryparser-9.1.0-SNAPSHOT.jar org.apache.lucene.demo.SearchFiles

    ### jdb 连接上jdk
    jdb -attach 8000 -sourcepath /home/ubuntu/lucene-9.1.0/lucene/demo/src/java/
    + +

    查看fdt文件

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    hexdump -C _0.fdt
    00000000 3f d7 6c 17 1c 4c 75 63 65 6e 65 39 30 53 74 6f |?.l..Lucene90Sto|
    00000010 72 65 64 46 69 65 6c 64 73 46 61 73 74 44 61 74 |redFieldsFastDat|
    00000020 61 00 00 00 01 85 88 12 2b 0c 73 6b 95 30 38 76 |a.......+.sk.08v|
    00000030 c9 0a 2a 52 29 00 00 0a 00 01 00 1c 02 06 03 07 |..*R)...........|
    00000040 07 07 07 07 07 07 07 07 20 00 1a 60 2f 68 6f 6d |........ ..`/hom|
    00000050 65 2f 60 75 62 75 6e 74 75 60 2f 64 6f 63 2f 6d |e/`ubuntu`/doc/m|
    00000060 60 6f 6e 67 6f 2e 74 60 78 74 00 1a 2f 68 60 6f |`ongo.t`xt../h`o|
    00000070 6d 65 2f 75 62 60 75 6e 74 75 2f 64 60 6f 63 2f |me/ub`untu/d`oc/|
    00000080 68 65 6c 60 6c 6f 2e 74 78 74 c0 28 93 e8 00 00 |hel`lo.txt.(....|
    00000090 00 00 00 00 00 00 c8 75 0a 41 |.......u.A|
    0000009a
    +

    writeField

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    38
    39
    40
    41
    42
    43
    44
    45
    46
    47
    48
    49
    50
    51
    ubuntu@VM-0-3-ubuntu:~$ jdb -attach 8000 -sourcepath /home/ubuntu/lucene-9.1.0/lucene/demo/src/java/:/home/ubuntu/lucene-9.1.0/lucene/core/src/java/ 
    Set uncaught java.lang.Throwable
    Set deferred uncaught java.lang.Throwable
    Initializing jdb ...
    >
    VM Started: No frames on the current call stack

    main[1] stop in org.apache.lucene.codecs.lucene90.compressing.Lucene90CompressingStoredFieldsWriter.writeField
    Deferring breakpoint org.apache.lucene.codecs.lucene90.compressing.Lucene90CompressingStoredFieldsWriter.writeField.
    It will be set after the class is loaded.
    main[1] cont
    > Set deferred breakpoint org.apache.lucene.codecs.lucene90.compressing.Lucene90CompressingStoredFieldsWriter.writeField

    Breakpoint hit: "thread=main", org.apache.lucene.codecs.lucene90.compressing.Lucene90CompressingStoredFieldsWriter.writeField(), line=276 bci=0
    276 ++numStoredFieldsInDoc;

    main[1] wheree^H^H
    Unrecognized command: 'wher'. Try help...
    main[1] where
    [1] org.apache.lucene.codecs.lucene90.compressing.Lucene90CompressingStoredFieldsWriter.writeField (Lucene90CompressingStoredFieldsWriter.java:276)
    [2] org.apache.lucene.index.StoredFieldsConsumer.writeField (StoredFieldsConsumer.java:65)
    [3] org.apache.lucene.index.IndexingChain.processField (IndexingChain.java:749)
    [4] org.apache.lucene.index.IndexingChain.processDocument (IndexingChain.java:620)
    [5] org.apache.lucene.index.DocumentsWriterPerThread.updateDocuments (DocumentsWriterPerThread.java:241)
    [6] org.apache.lucene.index.DocumentsWriter.updateDocuments (DocumentsWriter.java:432)
    [7] org.apache.lucene.index.IndexWriter.updateDocuments (IndexWriter.java:1,531)
    [8] org.apache.lucene.index.IndexWriter.updateDocument (IndexWriter.java:1,816)
    [9] org.apache.lucene.index.IndexWriter.addDocument (IndexWriter.java:1,469)
    [10] org.apache.lucene.demo.IndexFiles.indexDoc (IndexFiles.java:271)
    [11] org.apache.lucene.demo.IndexFiles$1.visitFile (IndexFiles.java:212)
    [12] org.apache.lucene.demo.IndexFiles$1.visitFile (IndexFiles.java:208)
    [13] java.nio.file.Files.walkFileTree (Files.java:2,725)
    [14] java.nio.file.Files.walkFileTree (Files.java:2,797)
    [15] org.apache.lucene.demo.IndexFiles.indexDocs (IndexFiles.java:206)
    [16] org.apache.lucene.demo.IndexFiles.main (IndexFiles.java:157)
    main[1] list
    272
    273 @Override
    274 public void writeField(FieldInfo info, IndexableField field) throws IOException {
    275
    276 => ++numStoredFieldsInDoc;
    277
    278 int bits = 0;
    279 final BytesRef bytes;
    280 final String string;
    281
    main[1] print field
    field = "stored,indexed,omitNorms,indexOptions=DOCS<path:/home/ubuntu/doc/mongo.txt>"
    main[1] print info
    info = "org.apache.lucene.index.FieldInfo@32464a14"

    + +

    分词和倒排索引

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    main[1] where
    [1] org.apache.lucene.index.IndexingChain$PerField.invert (IndexingChain.java:1,138)
    [2] org.apache.lucene.index.IndexingChain.processField (IndexingChain.java:729)
    [3] org.apache.lucene.index.IndexingChain.processDocument (IndexingChain.java:620)
    [4] org.apache.lucene.index.DocumentsWriterPerThread.updateDocuments (DocumentsWriterPerThread.java:241)
    [5] org.apache.lucene.index.DocumentsWriter.updateDocuments (DocumentsWriter.java:432)
    [6] org.apache.lucene.index.IndexWriter.updateDocuments (IndexWriter.java:1,531)
    [7] org.apache.lucene.index.IndexWriter.updateDocument (IndexWriter.java:1,816)
    [8] org.apache.lucene.demo.IndexFiles.indexDoc (IndexFiles.java:277)
    [9] org.apache.lucene.demo.IndexFiles$1.visitFile (IndexFiles.java:212)
    [10] org.apache.lucene.demo.IndexFiles$1.visitFile (IndexFiles.java:208)
    [11] java.nio.file.Files.walkFileTree (Files.java:2,725)
    [12] java.nio.file.Files.walkFileTree (Files.java:2,797)
    [13] org.apache.lucene.demo.IndexFiles.indexDocs (IndexFiles.java:206)
    [14] org.apache.lucene.demo.IndexFiles.main (IndexFiles.java:157)
    + +

    term描述

    1
    2
    3
    IntBlockPool intPool,
    ByteBlockPool bytePool,
    ByteBlockPool termBytePool,
    + +

    倒排索引term 在内存中用以下内容描述:
    intPool 包含三个变量:

    +
      +
    • 二维数组buffers[][]
    • +
    • int bufferUpto 描述的是二维数组 buffers[][]的第一级的偏移 , 一般都是这样用 int[] buff = buffers[bufferUpto + offset]
    • +
    • int intUpto 描述的是整体的偏移量,描述是偏移所有的buffers 的字节数
    • +
    • int intOffset 描述的是header buffer的偏移量
    • +
    +

    那么buffers[xxx][yyy]的值又是什么呢?
    这个buffers二维数组存的也是偏移量.是什么的偏移量呢?

    +

    intPool描述的是bytePooltermBytePool 的偏移量

    +

    term 写入tim文件

    会将term一个个写入

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    main[1] where 
    [1] org.apache.lucene.codecs.lucene90.blocktree.Lucene90BlockTreeTermsWriter$TermsWriter.writeBlock (Lucene90BlockTreeTermsWriter.java:963)
    [2] org.apache.lucene.codecs.lucene90.blocktree.Lucene90BlockTreeTermsWriter$TermsWriter.writeBlocks (Lucene90BlockTreeTermsWriter.java:709)
    [3] org.apache.lucene.codecs.lucene90.blocktree.Lucene90BlockTreeTermsWriter$TermsWriter.finish (Lucene90BlockTreeTermsWriter.java:1,105)
    [4] org.apache.lucene.codecs.lucene90.blocktree.Lucene90BlockTreeTermsWriter.write (Lucene90BlockTreeTermsWriter.java:370)
    [5] org.apache.lucene.codecs.perfield.PerFieldPostingsFormat$FieldsWriter.write (PerFieldPostingsFormat.java:171)
    [6] org.apache.lucene.index.FreqProxTermsWriter.flush (FreqProxTermsWriter.java:131)
    [7] org.apache.lucene.index.IndexingChain.flush (IndexingChain.java:300)
    [8] org.apache.lucene.index.DocumentsWriterPerThread.flush (DocumentsWriterPerThread.java:391)
    [9] org.apache.lucene.index.DocumentsWriter.doFlush (DocumentsWriter.java:493)
    [10] org.apache.lucene.index.DocumentsWriter.flushAllThreads (DocumentsWriter.java:672)
    [11] org.apache.lucene.index.IndexWriter.doFlush (IndexWriter.java:4,014)
    [12] org.apache.lucene.index.IndexWriter.flush (IndexWriter.java:3,988)
    [13] org.apache.lucene.index.IndexWriter.shutdown (IndexWriter.java:1,321)
    [14] org.apache.lucene.index.IndexWriter.close (IndexWriter.java:1,361)
    [15] org.apache.lucene.demo.IndexFiles.main (IndexFiles.java:166)

    + + +

    查询

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    main[1] where
    [1] org.apache.lucene.search.TopScoreDocCollector$SimpleTopScoreDocCollector.getLeafCollector (TopScoreDocCollector.java:57)
    [2] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:759)
    [3] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:693)
    [4] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:687)
    [5] org.apache.lucene.search.IndexSearcher.searchAfter (IndexSearcher.java:532)
    [6] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:542)
    [7] org.apache.lucene.demo.SearchFiles.doPagingSearch (SearchFiles.java:180)
    [8] org.apache.lucene.demo.SearchFiles.main (SearchFiles.java:150)

    + +

    获取term

    从terms reader 读取term

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    main[1] print fieldMap.get(field)
    fieldMap.get(field) = "BlockTreeTerms(seg=_j terms=18,postings=20,positions=25,docs=2)"
    main[1] where
    [1] org.apache.lucene.codecs.lucene90.blocktree.Lucene90BlockTreeTermsReader.terms (Lucene90BlockTreeTermsReader.java:294)
    [2] org.apache.lucene.codecs.perfield.PerFieldPostingsFormat$FieldsReader.terms (PerFieldPostingsFormat.java:353)
    [3] org.apache.lucene.index.CodecReader.terms (CodecReader.java:114)
    [4] org.apache.lucene.index.Terms.getTerms (Terms.java:41)
    [5] org.apache.lucene.index.TermStates.loadTermsEnum (TermStates.java:115)
    [6] org.apache.lucene.index.TermStates.build (TermStates.java:102)
    [7] org.apache.lucene.search.TermQuery.createWeight (TermQuery.java:227)
    [8] org.apache.lucene.search.IndexSearcher.createWeight (IndexSearcher.java:885)
    [9] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:686)
    [10] org.apache.lucene.search.IndexSearcher.searchAfter (IndexSearcher.java:532)
    [11] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:542)
    [12] org.apache.lucene.demo.SearchFiles.doPagingSearch (SearchFiles.java:180)
    [13] org.apache.lucene.demo.SearchFiles.main (SearchFiles.java:150)


    + + +

    通过arc 获取对应output

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    Breakpoint hit: "thread=main", org.apache.lucene.util.fst.FST.findTargetArc(), line=1,412 bci=0
    1,412 if (labelToMatch == END_LABEL) {

    main[1] where
    [1] org.apache.lucene.util.fst.FST.findTargetArc (FST.java:1,412)
    [2] org.apache.lucene.codecs.lucene90.blocktree.SegmentTermsEnum.seekExact (SegmentTermsEnum.java:511)
    [3] org.apache.lucene.index.TermStates.loadTermsEnum (TermStates.java:117)
    [4] org.apache.lucene.index.TermStates.build (TermStates.java:102)
    [5] org.apache.lucene.search.TermQuery.createWeight (TermQuery.java:227)
    [6] org.apache.lucene.search.IndexSearcher.createWeight (IndexSearcher.java:885)
    [7] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:686)
    [8] org.apache.lucene.search.IndexSearcher.searchAfter (IndexSearcher.java:532)
    [9] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:542)
    [10] org.apache.lucene.demo.SearchFiles.doPagingSearch (SearchFiles.java:180)
    [11] org.apache.lucene.demo.SearchFiles.main (SearchFiles.java:150)

    + +

    打开tim文件

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    main[1] where
    [1] org.apache.lucene.codecs.lucene90.blocktree.Lucene90BlockTreeTermsReader.<init> (Lucene90BlockTreeTermsReader.java:135)
    [2] org.apache.lucene.codecs.lucene90.Lucene90PostingsFormat.fieldsProducer (Lucene90PostingsFormat.java:427)
    [3] org.apache.lucene.codecs.perfield.PerFieldPostingsFormat$FieldsReader.<init> (PerFieldPostingsFormat.java:329)
    [4] org.apache.lucene.codecs.perfield.PerFieldPostingsFormat.fieldsProducer (PerFieldPostingsFormat.java:391)
    [5] org.apache.lucene.index.SegmentCoreReaders.<init> (SegmentCoreReaders.java:118)
    [6] org.apache.lucene.index.SegmentReader.<init> (SegmentReader.java:91)
    [7] org.apache.lucene.index.StandardDirectoryReader$1.doBody (StandardDirectoryReader.java:94)
    [8] org.apache.lucene.index.StandardDirectoryReader$1.doBody (StandardDirectoryReader.java:77)
    [9] org.apache.lucene.index.SegmentInfos$FindSegmentsFile.run (SegmentInfos.java:809)
    [10] org.apache.lucene.index.StandardDirectoryReader.open (StandardDirectoryReader.java:109)
    [11] org.apache.lucene.index.StandardDirectoryReader.open (StandardDirectoryReader.java:67)
    [12] org.apache.lucene.index.DirectoryReader.open (DirectoryReader.java:60)
    [13] org.apache.lucene.demo.SearchFiles.main (SearchFiles.java:105)

    + +

    获取topk的数据核心函数mergeAux,一个辅助函数获取topk的内容

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    Step completed: "thread=main", org.apache.lucene.search.TopDocs.mergeAux(), line=291 bci=43
    291 for (int shardIDX = 0; shardIDX < shardHits.length; shardIDX++) {

    main[1] where
    [1] org.apache.lucene.search.TopDocs.mergeAux (TopDocs.java:291)
    [2] org.apache.lucene.search.TopDocs.merge (TopDocs.java:216)
    [3] org.apache.lucene.search.IndexSearcher$2.reduce (IndexSearcher.java:528)
    [4] org.apache.lucene.search.IndexSearcher$2.reduce (IndexSearcher.java:505)
    [5] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:694)
    [6] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:687)
    [7] org.apache.lucene.search.IndexSearcher.searchAfter (IndexSearcher.java:532)
    [8] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:542)
    [9] org.apache.lucene.demo.SearchFiles.doPagingSearch (SearchFiles.java:180)
    [10] org.apache.lucene.demo.SearchFiles.main (SearchFiles.java:150)

    + + + + + + +

    docid 获取对应的文案内容

    通过docid 获取document

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    main[1] where
    [1] org.apache.lucene.store.ByteBufferIndexInput$SingleBufferImpl.seek (ByteBufferIndexInput.java:529)
    [2] org.apache.lucene.codecs.lucene90.compressing.Lucene90CompressingStoredFieldsReader$BlockState.document (Lucene90CompressingStoredFieldsReader.java:594)
    [3] org.apache.lucene.codecs.lucene90.compressing.Lucene90CompressingStoredFieldsReader.document (Lucene90CompressingStoredFieldsReader.java:610)
    [4] org.apache.lucene.codecs.lucene90.compressing.Lucene90CompressingStoredFieldsReader.visitDocument (Lucene90CompressingStoredFieldsReader.java:628)
    [5] org.apache.lucene.index.CodecReader.document (CodecReader.java:89)
    [6] org.apache.lucene.index.BaseCompositeReader.document (BaseCompositeReader.java:154)
    [7] org.apache.lucene.index.IndexReader.document (IndexReader.java:380)
    [8] org.apache.lucene.search.IndexSearcher.doc (IndexSearcher.java:380)
    [9] org.apache.lucene.demo.SearchFiles.doPagingSearch (SearchFiles.java:214)
    [10] org.apache.lucene.demo.SearchFiles.main (SearchFiles.java:150)
    +

    seek 方法通过偏移获取document,其中seek 中curBufjava.nio.DirectByteBufferR

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    525    
    526 @Override
    527 public void seek(long pos) throws IOException {
    528 try {
    529 => curBuf.position((int) pos);
    530 } catch (IllegalArgumentException e) {
    531 if (pos < 0) {
    532 throw new IllegalArgumentException("Seeking to negative position: " + this, e);
    533 } else {
    534 throw new EOFException("seek past EOF: " + this);
    main[1] print curBuf
    curBuf = "java.nio.DirectByteBufferR[pos=60 lim=154 cap=154]"

    + + +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    main[1] list
    168
    169 // NOTE: AIOOBE not EOF if you read too much
    170 @Override
    171 public void readBytes(byte[] b, int offset, int len) {
    172 => System.arraycopy(bytes, pos, b, offset, len);
    173 pos += len;
    174 }
    175 }
    main[1] where
    [1] org.apache.lucene.store.ByteArrayDataInput.readBytes (ByteArrayDataInput.java:172)
    [2] org.apache.lucene.store.DataInput.readString (DataInput.java:265)
    [3] org.apache.lucene.codecs.lucene90.compressing.Lucene90CompressingStoredFieldsReader.readField (Lucene90CompressingStoredFieldsReader.java:246)
    [4] org.apache.lucene.codecs.lucene90.compressing.Lucene90CompressingStoredFieldsReader.visitDocument (Lucene90CompressingStoredFieldsReader.java:640)
    [5] org.apache.lucene.index.CodecReader.document (CodecReader.java:89)
    [6] org.apache.lucene.index.BaseCompositeReader.document (BaseCompositeReader.java:154)
    [7] org.apache.lucene.index.IndexReader.document (IndexReader.java:380)
    [8] org.apache.lucene.search.IndexSearcher.doc (IndexSearcher.java:380)
    [9] org.apache.lucene.demo.SearchFiles.doPagingSearch (SearchFiles.java:214)
    [10] org.apache.lucene.demo.SearchFiles.main (SearchFiles.java:150)
    + + +

    通过堆外内存加载文件数据

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    Breakpoint hit: "thread=main", org.apache.lucene.store.ByteBufferIndexInput.setCurBuf(), line=83 bci=0
    83 this.curBuf = curBuf;

    main[1] where
    [1] org.apache.lucene.store.ByteBufferIndexInput.setCurBuf (ByteBufferIndexInput.java:83)
    [2] org.apache.lucene.store.ByteBufferIndexInput$SingleBufferImpl.<init> (ByteBufferIndexInput.java:520)
    [3] org.apache.lucene.store.ByteBufferIndexInput.newInstance (ByteBufferIndexInput.java:60)
    [4] org.apache.lucene.store.MMapDirectory.openInput (MMapDirectory.java:238)
    [5] org.apache.lucene.store.Directory.openChecksumInput (Directory.java:152)
    [6] org.apache.lucene.index.SegmentInfos.readCommit (SegmentInfos.java:297)
    [7] org.apache.lucene.index.StandardDirectoryReader$1.doBody (StandardDirectoryReader.java:88)
    [8] org.apache.lucene.index.StandardDirectoryReader$1.doBody (StandardDirectoryReader.java:77)
    [9] org.apache.lucene.index.SegmentInfos$FindSegmentsFile.run (SegmentInfos.java:809)
    [10] org.apache.lucene.index.StandardDirectoryReader.open (StandardDirectoryReader.java:109)
    [11] org.apache.lucene.index.StandardDirectoryReader.open (StandardDirectoryReader.java:67)
    [12] org.apache.lucene.index.DirectoryReader.open (DirectoryReader.java:60)
    [13] org.apache.lucene.demo.SearchFiles.main (SearchFiles.java:105)

    + + +

    filechannel 的 map

    java 对应的类

    +
    1
    2
    3
    4
    src\java.base\share\classes\sun\nio\ch\FileChannelImpl.java
    // Creates a new mapping
    private native long map0(int prot, long position, long length, boolean isSync)
    throws IOException;
    + +

    native 对应的c实现类

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    38
    39
    40
    41
    42
    43
    44
    45
    46
    47
    48
    49
    50
    51
    52
    53
    54
    55
    56
    57
    58
    59
    60
    61
    62
    63
    64
    65
    66
    67
    68
    69
    70
    71
    72
    73
    74
    75
    76
    src\java.base\unix\native\libnio\ch\FileChannelImpl.c
    JNIEXPORT jlong JNICALL
    Java_sun_nio_ch_FileChannelImpl_map0(JNIEnv *env, jobject this,
    jint prot, jlong off, jlong len, jboolean map_sync)
    {
    void *mapAddress = 0;
    jobject fdo = (*env)->GetObjectField(env, this, chan_fd);
    jint fd = fdval(env, fdo);
    int protections = 0;
    int flags = 0;

    // should never be called with map_sync and prot == PRIVATE
    assert((prot != sun_nio_ch_FileChannelImpl_MAP_PV) || !map_sync);

    if (prot == sun_nio_ch_FileChannelImpl_MAP_RO) {
    protections = PROT_READ;
    flags = MAP_SHARED;
    } else if (prot == sun_nio_ch_FileChannelImpl_MAP_RW) {
    protections = PROT_WRITE | PROT_READ;
    flags = MAP_SHARED;
    } else if (prot == sun_nio_ch_FileChannelImpl_MAP_PV) {
    protections = PROT_WRITE | PROT_READ;
    flags = MAP_PRIVATE;
    }

    // if MAP_SYNC and MAP_SHARED_VALIDATE are not defined then it is
    // best to define them here. This ensures the code compiles on old
    // OS releases which do not provide the relevant headers. If run
    // on the same machine then it will work if the kernel contains
    // the necessary support otherwise mmap should fail with an
    // invalid argument error

    #ifndef MAP_SYNC
    #define MAP_SYNC 0x80000
    #endif
    #ifndef MAP_SHARED_VALIDATE
    #define MAP_SHARED_VALIDATE 0x03
    #endif

    if (map_sync) {
    // ensure
    // 1) this is Linux on AArch64, x86_64, or PPC64 LE
    // 2) the mmap APIs are available at compile time
    #if !defined(LINUX) || ! (defined(aarch64) || (defined(amd64) && defined(_LP64)) || defined(ppc64le))
    // TODO - implement for solaris/AIX/BSD/WINDOWS and for 32 bit
    JNU_ThrowInternalError(env, "should never call map on platform where MAP_SYNC is unimplemented");
    return IOS_THROWN;
    #else
    flags |= MAP_SYNC | MAP_SHARED_VALIDATE;
    #endif
    }

    mapAddress = mmap64(
    0, /* Let OS decide location */
    len, /* Number of bytes to map */
    protections, /* File permissions */
    flags, /* Changes are shared */
    fd, /* File descriptor of mapped file */
    off); /* Offset into file */

    if (mapAddress == MAP_FAILED) {
    if (map_sync && errno == ENOTSUP) {
    JNU_ThrowIOExceptionWithLastError(env, "map with mode MAP_SYNC unsupported");
    return IOS_THROWN;
    }

    if (errno == ENOMEM) {
    JNU_ThrowOutOfMemoryError(env, "Map failed");
    return IOS_THROWN;
    }
    return handle(env, -1, "Map failed");
    }

    return ((jlong) (unsigned long) mapAddress);
    }

    + +

    mmap 映射文件读取硬盘中的内容

    FileChannel.open 底层是一个native方法,如果是linux系统,就是mmap64

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    main[1] list
    228
    229 /** Creates an IndexInput for the file with the given name. */
    230 @Override
    231 public IndexInput openInput(String name, IOContext context) throws IOException {
    232 => ensureOpen();
    233 ensureCanRead(name);
    234 Path path = directory.resolve(name);
    235 try (FileChannel c = FileChannel.open(path, StandardOpenOption.READ)) {
    236 final String resourceDescription = "MMapIndexInput(path=\"" + path.toString() + "\")";
    237 final boolean useUnmap = getUseUnmap();
    main[1] print name
    name = "_j.fnm"
    main[1] where
    [1] org.apache.lucene.store.MMapDirectory.openInput (MMapDirectory.java:232)
    [2] org.apache.lucene.store.Directory.openChecksumInput (Directory.java:152)
    [3] org.apache.lucene.codecs.lucene90.Lucene90FieldInfosFormat.read (Lucene90FieldInfosFormat.java:124)
    [4] org.apache.lucene.index.SegmentCoreReaders.<init> (SegmentCoreReaders.java:111)
    [5] org.apache.lucene.index.SegmentReader.<init> (SegmentReader.java:91)
    [6] org.apache.lucene.index.StandardDirectoryReader$1.doBody (StandardDirectoryReader.java:94)
    [7] org.apache.lucene.index.StandardDirectoryReader$1.doBody (StandardDirectoryReader.java:77)
    [8] org.apache.lucene.index.SegmentInfos$FindSegmentsFile.run (SegmentInfos.java:809)
    [9] org.apache.lucene.index.StandardDirectoryReader.open (StandardDirectoryReader.java:109)
    [10] org.apache.lucene.index.StandardDirectoryReader.open (StandardDirectoryReader.java:67)
    [11] org.apache.lucene.index.DirectoryReader.open (DirectoryReader.java:60)
    [12] org.apache.lucene.demo.SearchFiles.main (SearchFiles.java:105)
    + + + +

    读取mmap后的数据

    mmap之后的buf在哪里会被用到呢?
    和普通的文件读写类似,也就是seek后读字节

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    lucene\core\src\java\org\apache\lucene\store\ByteBufferIndexInput.java
    @Override
    public final void readBytes(byte[] b, int offset, int len) throws IOException {
    try {
    guard.getBytes(curBuf, b, offset, len);
    } catch (
    @SuppressWarnings("unused")
    BufferUnderflowException e) {
    int curAvail = curBuf.remaining();
    while (len > curAvail) {
    guard.getBytes(curBuf, b, offset, curAvail);
    len -= curAvail;
    offset += curAvail;
    curBufIndex++;
    if (curBufIndex >= buffers.length) {
    throw new EOFException("read past EOF: " + this);
    }
    setCurBuf(buffers[curBufIndex]);
    curBuf.position(0);
    curAvail = curBuf.remaining();
    }
    guard.getBytes(curBuf, b, offset, len);
    } catch (
    @SuppressWarnings("unused")
    NullPointerException npe) {
    throw new AlreadyClosedException("Already closed: " + this);
    }
    }
    +

    mmap后读取数据

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    main[1] where
    [1] jdk.internal.misc.Unsafe.copyMemory (Unsafe.java:782)
    [2] java.nio.DirectByteBuffer.get (DirectByteBuffer.java:308)
    [3] org.apache.lucene.store.ByteBufferGuard.getBytes (ByteBufferGuard.java:93)
    [4] org.apache.lucene.store.ByteBufferIndexInput.readBytes (ByteBufferIndexInput.java:114)
    [5] org.apache.lucene.store.BufferedChecksumIndexInput.readBytes (BufferedChecksumIndexInput.java:46)
    [6] org.apache.lucene.store.DataInput.readString (DataInput.java:265)
    [7] org.apache.lucene.codecs.CodecUtil.checkHeaderNoMagic (CodecUtil.java:202)
    [8] org.apache.lucene.codecs.CodecUtil.checkHeader (CodecUtil.java:193)
    [9] org.apache.lucene.codecs.CodecUtil.checkIndexHeader (CodecUtil.java:253)
    [10] org.apache.lucene.codecs.lucene90.Lucene90FieldInfosFormat.read (Lucene90FieldInfosFormat.java:128)
    [11] org.apache.lucene.index.SegmentCoreReaders.<init> (SegmentCoreReaders.java:111)
    [12] org.apache.lucene.index.SegmentReader.<init> (SegmentReader.java:91)
    [13] org.apache.lucene.index.StandardDirectoryReader$1.doBody (StandardDirectoryReader.java:94)
    [14] org.apache.lucene.index.StandardDirectoryReader$1.doBody (StandardDirectoryReader.java:77)
    [15] org.apache.lucene.index.SegmentInfos$FindSegmentsFile.run (SegmentInfos.java:809)
    [16] org.apache.lucene.index.StandardDirectoryReader.open (StandardDirectoryReader.java:109)
    [17] org.apache.lucene.index.StandardDirectoryReader.open (StandardDirectoryReader.java:67)
    [18] org.apache.lucene.index.DirectoryReader.open (DirectoryReader.java:60)
    [19] org.apache.lucene.demo.SearchFiles.main (SearchFiles.java:105)

    + +

    文件格式介绍

    .fnm 文件

    格式出处

    +

    fnm 文件 由这几部分组成:

    +
      +
    • Header
    • +
    • FieldsCount : 字段的个数
    • +
    • 数组,长度为FieldsCount , 数组中每个元素包含包含这几个字段: [FieldName: 字段名 ,FieldNumber:字段number ,FieldBits, DocValuesBits, DocValuesGen ,DimensionCount , DimensionNumBytes ]
    • +
    • Footer
    • +
    +

    fnm 描述的field的基础信息,也可以算是metadata信息

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    38
    39
    40
    41
    42
    43


    Field names are stored in the field info file, with suffix .fnm.

    FieldInfos (.fnm) --> Header,FieldsCount, <FieldName,FieldNumber, FieldBits,DocValuesBits,DocValuesGen,Attributes,DimensionCount,DimensionNumBytes> ,Footer

    Data types:

    Header --> IndexHeader
    FieldsCount --> VInt
    FieldName --> String
    FieldBits, IndexOptions, DocValuesBits --> Byte
    FieldNumber, DimensionCount, DimensionNumBytes --> VInt
    Attributes --> Map<String,String>
    DocValuesGen --> Int64
    Footer --> CodecFooter
    Field Descriptions:
    FieldsCount: the number of fields in this file.
    FieldName: name of the field as a UTF-8 String.
    FieldNumber: the field's number. Note that unlike previous versions of Lucene, the fields are not numbered implicitly by their order in the file, instead explicitly.
    FieldBits: a byte containing field options.
    The low order bit (0x1) is one for fields that have term vectors stored, and zero for fields without term vectors.
    If the second lowest order-bit is set (0x2), norms are omitted for the indexed field.
    If the third lowest-order bit is set (0x4), payloads are stored for the indexed field.
    IndexOptions: a byte containing index options.
    0: not indexed
    1: indexed as DOCS_ONLY
    2: indexed as DOCS_AND_FREQS
    3: indexed as DOCS_AND_FREQS_AND_POSITIONS
    4: indexed as DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS
    DocValuesBits: a byte containing per-document value types. The type recorded as two four-bit integers, with the high-order bits representing norms options, and the low-order bits representing DocValues options. Each four-bit integer can be decoded as such:
    0: no DocValues for this field.
    1: NumericDocValues. (DocValuesType.NUMERIC)
    2: BinaryDocValues. (DocValuesType#BINARY)
    3: SortedDocValues. (DocValuesType#SORTED)
    DocValuesGen is the generation count of the field's DocValues. If this is -1, there are no DocValues updates to that field. Anything above zero means there are updates stored by DocValuesFormat.
    Attributes: a key-value map of codec-private attributes.
    PointDimensionCount, PointNumBytes: these are non-zero only if the field is indexed as points, e.g. using LongPoint
    VectorDimension: it is non-zero if the field is indexed as vectors.
    VectorSimilarityFunction: a byte containing distance function used for similarity calculation.
    0: EUCLIDEAN distance. (VectorSimilarityFunction.EUCLIDEAN)
    1: DOT_PRODUCT similarity. (VectorSimilarityFunction.DOT_PRODUCT)
    2: COSINE similarity. (VectorSimilarityFunction.COSINE)
    + + +

    .fdt

    文件路径: lucene\backward-codecs\src\java\org\apache\lucene\backward_codecs\lucene50\Lucene50CompoundFormat.java

    +

    没有找到90的版本的fdt格式,只有2.9.4的,将就使用fdt格式

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    main[1] print fieldsStreamFN
    fieldsStreamFN = "_j.fdt"
    main[1] list
    124 numDocs = si.maxDoc();
    125
    126 final String fieldsStreamFN =
    127 IndexFileNames.segmentFileName(segment, segmentSuffix, FIELDS_EXTENSION);
    128 => ChecksumIndexInput metaIn = null;
    129 try {
    130 // Open the data file
    131 fieldsStream = d.openInput(fieldsStreamFN, context);
    132 version =
    133 CodecUtil.checkIndexHeader(
    main[1] where
    [1] org.apache.lucene.codecs.lucene90.compressing.Lucene90CompressingStoredFieldsReader.<init> (Lucene90CompressingStoredFieldsReader.java:128)
    [2] org.apache.lucene.codecs.lucene90.compressing.Lucene90CompressingStoredFieldsFormat.fieldsReader (Lucene90CompressingStoredFieldsFormat.java:133)
    [3] org.apache.lucene.codecs.lucene90.Lucene90StoredFieldsFormat.fieldsReader (Lucene90StoredFieldsFormat.java:136)
    [4] org.apache.lucene.index.SegmentCoreReaders.<init> (SegmentCoreReaders.java:138)
    [5] org.apache.lucene.index.SegmentReader.<init> (SegmentReader.java:91)
    [6] org.apache.lucene.index.StandardDirectoryReader$1.doBody (StandardDirectoryReader.java:94)
    [7] org.apache.lucene.index.StandardDirectoryReader$1.doBody (StandardDirectoryReader.java:77)
    [8] org.apache.lucene.index.SegmentInfos$FindSegmentsFile.run (SegmentInfos.java:809)
    [9] org.apache.lucene.index.StandardDirectoryReader.open (StandardDirectoryReader.java:109)
    [10] org.apache.lucene.index.StandardDirectoryReader.open (StandardDirectoryReader.java:67)
    [11] org.apache.lucene.index.DirectoryReader.open (DirectoryReader.java:60)
    [12] org.apache.lucene.demo.SearchFiles.main (SearchFiles.java:105)
    + + +

    加载doc的内容到Document 对象

    整个流程是通过docid 获取document 的内容

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22

    @Override
    public void visitDocument(int docID, StoredFieldVisitor visitor) throws IOException {

    final SerializedDocument doc = document(docID); // 通过docID 获取doc对象

    for (int fieldIDX = 0; fieldIDX < doc.numStoredFields; fieldIDX++) {
    final long infoAndBits = doc.in.readVLong();
    final int fieldNumber = (int) (infoAndBits >>> TYPE_BITS);
    final FieldInfo fieldInfo = fieldInfos.fieldInfo(fieldNumber);

    final int bits = (int) (infoAndBits & TYPE_MASK);
    assert bits <= NUMERIC_DOUBLE : "bits=" + Integer.toHexString(bits);

    switch (visitor.needsField(fieldInfo)) {
    case YES:
    readField(doc.in, visitor, fieldInfo, bits); // 通过input , 也就是input 绑定的fd ,去读mmap64 映射的文件 ,在这里会读取后缀名为 .fdt 的文件
    break;
    ...
    }
    }
    }
    + + +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    main[1] where
    [1] org.apache.lucene.document.Document.add (Document.java:60)
    [2] org.apache.lucene.document.DocumentStoredFieldVisitor.stringField (DocumentStoredFieldVisitor.java:74)
    [3] org.apache.lucene.codecs.lucene90.compressing.Lucene90CompressingStoredFieldsReader.readField (Lucene90CompressingStoredFieldsReader.java:246)
    [4] org.apache.lucene.codecs.lucene90.compressing.Lucene90CompressingStoredFieldsReader.visitDocument (Lucene90CompressingStoredFieldsReader.java:640)
    [5] org.apache.lucene.index.CodecReader.document (CodecReader.java:89)
    [6] org.apache.lucene.index.BaseCompositeReader.document (BaseCompositeReader.java:154)
    [7] org.apache.lucene.index.IndexReader.document (IndexReader.java:380)
    [8] org.apache.lucene.search.IndexSearcher.doc (IndexSearcher.java:380)
    [9] org.apache.lucene.demo.SearchFiles.doPagingSearch (SearchFiles.java:214)
    [10] org.apache.lucene.demo.SearchFiles.main (SearchFiles.java:150)

    +

    通过docid 构建 SerializedDocument

    首先入口在这里:

    +
    1
    org.apache.lucene.codecs.lucene90.compressing.Lucene90CompressingStoredFieldsReader.document
    +

    Lucene90CompressingStoredFieldsReader的document 方法

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    main[1] where
    [1] org.apache.lucene.store.ByteBufferIndexInput$SingleBufferImpl.seek (ByteBufferIndexInput.java:529)
    [2] org.apache.lucene.codecs.lucene90.compressing.Lucene90CompressingStoredFieldsReader.document (Lucene90CompressingStoredFieldsReader.java:606)
    [3] org.apache.lucene.codecs.lucene90.compressing.Lucene90CompressingStoredFieldsReader.visitDocument (Lucene90CompressingStoredFieldsReader.java:628)
    [4] org.apache.lucene.index.CodecReader.document (CodecReader.java:89)
    [5] org.apache.lucene.index.BaseCompositeReader.document (BaseCompositeReader.java:154)
    [6] org.apache.lucene.index.IndexReader.document (IndexReader.java:380)
    [7] org.apache.lucene.search.IndexSearcher.doc (IndexSearcher.java:380)
    [8] org.apache.lucene.demo.SearchFiles.doPagingSearch (SearchFiles.java:214)
    [9] org.apache.lucene.demo.SearchFiles.main (SearchFiles.java:150)
    + +
    1
    2
    3
    4
    5
    6
    7
    8
    SerializedDocument document(int docID) throws IOException {
    if (state.contains(docID) == false) {
    fieldsStream.seek(indexReader.getStartPointer(docID)); // 通过mmap64 偏移
    state.reset(docID);
    }
    assert state.contains(docID);
    return state.document(docID); // 再看具体的实现 , 这个state 对象对应的类是一个静态内部类
    }
    + +

    下面看看静态内部类的实现

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
      /**
    * Get the serialized representation of the given docID. This docID has to be contained in the
    * current block.
    */
    SerializedDocument document(int docID) throws IOException {
    if (contains(docID) == false) {
    throw new IllegalArgumentException();
    }

    final int index = docID - docBase;
    final int offset = Math.toIntExact(offsets[index]);
    final int length = Math.toIntExact(offsets[index + 1]) - offset;
    final int totalLength = Math.toIntExact(offsets[chunkDocs]);
    final int numStoredFields = Math.toIntExact(this.numStoredFields[index]);

    final BytesRef bytes;
    if (merging) {
    bytes = this.bytes;
    } else {
    bytes = new BytesRef();
    }

    final DataInput documentInput;
    if (length == 0) {
    ...
    } else {
    fieldsStream.seek(startPointer); // seek mmap64 偏移量获取文件
    decompressor.decompress(fieldsStream, totalLength, offset, length, bytes); // 解压对应的数据
    assert bytes.length == length;
    documentInput = new ByteArrayDataInput(bytes.bytes, bytes.offset, bytes.length); // 将数据塞入bytes
    }

    return new SerializedDocument(documentInput, length, numStoredFields); // 构建SerializedDocument
    }
    }
    + +

    下面具体描述加载内容的过程:

    +
    1
    2
    3
    4
    5
     pos = 4
    main[1] dump bytes
    bytes = {
    120, 116, 0, 26, 47, 104, 111, 109, 101, 47, 117, 98, 117, 110, 116, 117, 47, 100, 111, 99, 47, 104, 101, 108, 108, 111, 46, 116, 120, 116, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
    }
    + + +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    main[1] print in
    in = "MMapIndexInput(path="/home/ubuntu/index/_j.fdt")"
    main[1] where
    [1] org.apache.lucene.codecs.lucene90.LZ4WithPresetDictCompressionMode$LZ4WithPresetDictDecompressor.decompress (LZ4WithPresetDictCompressionMode.java:88)
    [2] org.apache.lucene.codecs.lucene90.compressing.Lucene90CompressingStoredFieldsReader$BlockState.document (Lucene90CompressingStoredFieldsReader.java:595)
    [3] org.apache.lucene.codecs.lucene90.compressing.Lucene90CompressingStoredFieldsReader.document (Lucene90CompressingStoredFieldsReader.java:610)
    [4] org.apache.lucene.codecs.lucene90.compressing.Lucene90CompressingStoredFieldsReader.visitDocument (Lucene90CompressingStoredFieldsReader.java:628)
    [5] org.apache.lucene.index.CodecReader.document (CodecReader.java:89)
    [6] org.apache.lucene.index.BaseCompositeReader.document (BaseCompositeReader.java:154)
    [7] org.apache.lucene.index.IndexReader.document (IndexReader.java:380)
    [8] org.apache.lucene.search.IndexSearcher.doc (IndexSearcher.java:380)
    [9] org.apache.lucene.demo.SearchFiles.doPagingSearch (SearchFiles.java:214)
    [10] org.apache.lucene.demo.SearchFiles.main (SearchFiles.java:150)

    + + + +

    term 文件的加载和处理

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    38
    39
    40
    41
    42
    43
    public SegmentTermsEnum(FieldReader fr) throws IOException {
    this.fr = fr;

    // if (DEBUG) {
    // System.out.println("BTTR.init seg=" + fr.parent.segment);
    // }
    stack = new SegmentTermsEnumFrame[0];

    // Used to hold seek by TermState, or cached seek
    staticFrame = new SegmentTermsEnumFrame(this, -1);

    if (fr.index == null) {
    fstReader = null;
    } else {
    fstReader = fr.index.getBytesReader();
    }

    // Init w/ root block; don't use index since it may
    // not (and need not) have been loaded
    for (int arcIdx = 0; arcIdx < arcs.length; arcIdx++) {
    arcs[arcIdx] = new FST.Arc<>();
    }

    currentFrame = staticFrame;
    final FST.Arc<BytesRef> arc;
    if (fr.index != null) {
    arc = fr.index.getFirstArc(arcs[0]);
    // Empty string prefix must have an output in the index!
    assert arc.isFinal();
    } else {
    arc = null;
    }
    // currentFrame = pushFrame(arc, rootCode, 0);
    // currentFrame.loadBlock();
    validIndexPrefix = 0;
    // if (DEBUG) {
    // System.out.println("init frame state " + currentFrame.ord);
    // printSeekState();
    // }

    // System.out.println();
    // computeBlockStats().print(System.out);
    }
    + + +

    解析获取getArc

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    private FST.Arc<BytesRef> getArc(int ord) {
    if (ord >= arcs.length) {
    @SuppressWarnings({"rawtypes", "unchecked"})
    final FST.Arc<BytesRef>[] next =
    new FST.Arc[ArrayUtil.oversize(1 + ord, RamUsageEstimator.NUM_BYTES_OBJECT_REF)];
    System.arraycopy(arcs, 0, next, 0, arcs.length);
    for (int arcOrd = arcs.length; arcOrd < next.length; arcOrd++) {
    next[arcOrd] = new FST.Arc<>();
    }
    arcs = next;
    }
    return arcs[ord];
    }
    + +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    Breakpoint hit: "thread=main", org.apache.lucene.codecs.lucene90.blocktree.SegmentTermsEnum.getArc(), line=222 bci=0
    222 if (ord >= arcs.length) {

    main[1] where
    [1] org.apache.lucene.codecs.lucene90.blocktree.SegmentTermsEnum.getArc (SegmentTermsEnum.java:222)
    [2] org.apache.lucene.codecs.lucene90.blocktree.SegmentTermsEnum.seekExact (SegmentTermsEnum.java:511)
    [3] org.apache.lucene.index.TermStates.loadTermsEnum (TermStates.java:117)
    [4] org.apache.lucene.index.TermStates.build (TermStates.java:102)
    [5] org.apache.lucene.search.TermQuery.createWeight (TermQuery.java:227)
    [6] org.apache.lucene.search.IndexSearcher.createWeight (IndexSearcher.java:885)
    [7] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:686)
    [8] org.apache.lucene.search.IndexSearcher.searchAfter (IndexSearcher.java:532)
    [9] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:542)
    [10] org.apache.lucene.demo.SearchFiles.doPagingSearch (SearchFiles.java:180)
    [11] org.apache.lucene.demo.SearchFiles.main (SearchFiles.java:150)

    +

    获取所有的数据

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    main[1] where
    [1] org.apache.lucene.search.Weight$DefaultBulkScorer.scoreAll (Weight.java:300)
    [2] org.apache.lucene.search.Weight$DefaultBulkScorer.score (Weight.java:247)
    [3] org.apache.lucene.search.BulkScorer.score (BulkScorer.java:38)
    [4] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:770)
    [5] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:693)
    [6] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:687)
    [7] org.apache.lucene.search.IndexSearcher.searchAfter (IndexSearcher.java:532)
    [8] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:542)
    [9] org.apache.lucene.demo.SearchFiles.doPagingSearch (SearchFiles.java:180)
    [10] org.apache.lucene.demo.SearchFiles.main (SearchFiles.java:150)
    main[1] list
    296 DocIdSetIterator iterator,
    297 TwoPhaseIterator twoPhase,
    298 Bits acceptDocs)
    299 throws IOException {
    300 => if (twoPhase == null) {
    301 for (int doc = iterator.nextDoc();
    302 doc != DocIdSetIterator.NO_MORE_DOCS;
    303 doc = iterator.nextDoc()) {
    304 if (acceptDocs == null || acceptDocs.get(doc)) {
    305 collector.collect(doc);
    main[1] print iterator
    iterator = "org.apache.lucene.search.ImpactsDISI@6279cee3"

    + + +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    main[1] list
    494 @Override
    495 public int advance(int target) throws IOException {
    496 // current skip docID < docIDs generated from current buffer <= next skip docID
    497 // we don't need to skip if target is buffered already
    498 => if (docFreq > BLOCK_SIZE && target > nextSkipDoc) {
    499
    500 if (skipper == null) {
    501 // Lazy init: first time this enum has ever been used for skipping
    502 skipper =
    503 new Lucene90SkipReader(
    main[1] where
    [1] org.apache.lucene.codecs.lucene90.Lucene90PostingsReader$BlockDocsEnum.advance (Lucene90PostingsReader.java:498)
    [2] org.apache.lucene.index.SlowImpactsEnum.advance (SlowImpactsEnum.java:77)
    [3] org.apache.lucene.search.ImpactsDISI.advance (ImpactsDISI.java:135)
    [4] org.apache.lucene.search.ImpactsDISI.nextDoc (ImpactsDISI.java:140)
    [5] org.apache.lucene.search.Weight$DefaultBulkScorer.scoreAll (Weight.java:301)
    [6] org.apache.lucene.search.Weight$DefaultBulkScorer.score (Weight.java:247)
    [7] org.apache.lucene.search.BulkScorer.score (BulkScorer.java:38)
    [8] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:770)
    [9] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:693)
    [10] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:687)
    [11] org.apache.lucene.search.IndexSearcher.searchAfter (IndexSearcher.java:532)
    [12] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:542)
    [13] org.apache.lucene.demo.SearchFiles.doPagingSearch (SearchFiles.java:180)
    [14] org.apache.lucene.demo.SearchFiles.main (SearchFiles.java:150)
    + + +

    生成iterator 的相关类 , 对应的是SegmentTermsEnum

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    main[1] where
    [1] org.apache.lucene.search.TermQuery$TermWeight.getTermsEnum (TermQuery.java:145)
    [2] org.apache.lucene.search.TermQuery$TermWeight.scorer (TermQuery.java:107)
    [3] org.apache.lucene.search.Weight.bulkScorer (Weight.java:166)
    [4] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:767)
    [5] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:693)
    [6] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:687)
    [7] org.apache.lucene.search.IndexSearcher.searchAfter (IndexSearcher.java:532)
    [8] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:542)
    [9] org.apache.lucene.demo.SearchFiles.doPagingSearch (SearchFiles.java:180)
    [10] org.apache.lucene.demo.SearchFiles.main (SearchFiles.java:150)
    main[1] print termsEnum
    termsEnum = "org.apache.lucene.codecs.lucene90.blocktree.SegmentTermsEnum@1a84f40f"
    + +

    getTermsEnum 方法能拿到term的统计位置偏移,SegmentTermsEnum 不包含dociterator

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    main[1] where
    [1] org.apache.lucene.index.Term.bytes (Term.java:128)
    [2] org.apache.lucene.search.TermQuery$TermWeight.getTermsEnum (TermQuery.java:145)
    [3] org.apache.lucene.search.TermQuery$TermWeight.scorer (TermQuery.java:107)
    [4] org.apache.lucene.search.Weight.bulkScorer (Weight.java:166)
    [5] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:767)
    [6] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:693)
    [7] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:687)
    [8] org.apache.lucene.search.IndexSearcher.searchAfter (IndexSearcher.java:532)
    [9] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:542)
    [10] org.apache.lucene.demo.SearchFiles.doPagingSearch (SearchFiles.java:180)
    [11] org.apache.lucene.demo.SearchFiles.main (SearchFiles.java:150)


    144 final TermsEnum termsEnum = context.reader().terms(term.field()).iterator();
    145 => termsEnum.seekExact(term.bytes(), state);
    146 return termsEnum;
    147 }
    +

    这里的term.bytes() 就是我们的搜索值 , 所以term对应的倒排信息是从这里开始读的(还没看完,暂时那么定)

    +

    读出倒排信息之后,开始排序.
    score 有iteration 可以遍历所有doc_id

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    main[1] list
    348 // (needsFreq=false)
    349 private boolean isFreqsRead;
    350 private int singletonDocID; // docid when there is a single pulsed posting, otherwise -1
    351
    352 => public BlockDocsEnum(FieldInfo fieldInfo) throws IOException {
    353 this.startDocIn = Lucene90PostingsReader.this.docIn;
    354 this.docIn = null;
    355 indexHasFreq = fieldInfo.getIndexOptions().compareTo(IndexOptions.DOCS_AND_FREQS) >= 0;
    356 indexHasPos =
    357 fieldInfo.getIndexOptions().compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS) >= 0;
    main[1] where
    [1] org.apache.lucene.codecs.lucene90.Lucene90PostingsReader$BlockDocsEnum.<init> (Lucene90PostingsReader.java:352)
    [2] org.apache.lucene.codecs.lucene90.Lucene90PostingsReader.postings (Lucene90PostingsReader.java:258)
    [3] org.apache.lucene.codecs.lucene90.Lucene90PostingsReader.impacts (Lucene90PostingsReader.java:280)
    [4] org.apache.lucene.codecs.lucene90.blocktree.SegmentTermsEnum.impacts (SegmentTermsEnum.java:1,150)
    [5] org.apache.lucene.search.TermQuery$TermWeight.scorer (TermQuery.java:114)
    [6] org.apache.lucene.search.Weight.bulkScorer (Weight.java:166)
    [7] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:767)
    [8] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:693)
    [9] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:687)
    [10] org.apache.lucene.search.IndexSearcher.searchAfter (IndexSearcher.java:532)
    [11] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:542)
    [12] org.apache.lucene.demo.SearchFiles.doPagingSearch (SearchFiles.java:180)
    [13] org.apache.lucene.demo.SearchFiles.main (SearchFiles.java:150)
    + +

    topk collector的堆栈

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    Breakpoint hit: "thread=main", org.apache.lucene.search.TopDocsCollector.populateResults(), line=64 bci=0
    64 for (int i = howMany - 1; i >= 0; i--) {

    main[1] where
    [1] org.apache.lucene.search.TopDocsCollector.populateResults (TopDocsCollector.java:64)
    [2] org.apache.lucene.search.TopDocsCollector.topDocs (TopDocsCollector.java:166)
    [3] org.apache.lucene.search.TopDocsCollector.topDocs (TopDocsCollector.java:98)
    [4] org.apache.lucene.search.IndexSearcher$2.reduce (IndexSearcher.java:526)
    [5] org.apache.lucene.search.IndexSearcher$2.reduce (IndexSearcher.java:505)
    [6] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:694)
    [7] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:687)
    [8] org.apache.lucene.search.IndexSearcher.searchAfter (IndexSearcher.java:532)
    [9] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:542)
    [10] org.apache.lucene.demo.SearchFiles.doPagingSearch (SearchFiles.java:180)
    [11] org.apache.lucene.demo.SearchFiles.main (SearchFiles.java:150)
    main[1]
    + +

    search 过程

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    main[1] dump collector
    collector = {
    org.apache.lucene.search.TopScoreDocCollector.docBase: 0
    org.apache.lucene.search.TopScoreDocCollector.pqTop: instance of org.apache.lucene.search.ScoreDoc(id=1529)
    org.apache.lucene.search.TopScoreDocCollector.hitsThresholdChecker: instance of org.apache.lucene.search.HitsThresholdChecker$LocalHitsThresholdChecker(id=1530)
    org.apache.lucene.search.TopScoreDocCollector.minScoreAcc: null
    org.apache.lucene.search.TopScoreDocCollector.minCompetitiveScore: 0.0
    org.apache.lucene.search.TopScoreDocCollector.$assertionsDisabled: true
    org.apache.lucene.search.TopDocsCollector.EMPTY_TOPDOCS: instance of org.apache.lucene.search.TopDocs(id=1531)
    org.apache.lucene.search.TopDocsCollector.pq: instance of org.apache.lucene.search.HitQueue(id=1532)
    org.apache.lucene.search.TopDocsCollector.totalHits: 0
    org.apache.lucene.search.TopDocsCollector.totalHitsRelation: instance of org.apache.lucene.search.TotalHits$Relation(id=1533)
    }
    main[1] print collector
    collector = "org.apache.lucene.search.TopScoreDocCollector$SimpleTopScoreDocCollector@62bd765"
    + + +

    获取hits 数量的过程

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    690      private <C extends Collector, T> T search(
    691 Weight weight, CollectorManager<C, T> collectorManager, C firstCollector) throws IOException {
    692 if (executor == null || leafSlices.length <= 1) {
    693 search(leafContexts, weight, firstCollector);
    694 => return collectorManager.reduce(Collections.singletonList(firstCollector));
    695 } else {
    696 final List<C> collectors = new ArrayList<>(leafSlices.length);
    697 collectors.add(firstCollector);
    698 final ScoreMode scoreMode = firstCollector.scoreMode();
    699 for (int i = 1; i < leafSlices.length; ++i) {
    main[1] where
    [1] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:694)
    [2] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:687)
    [3] org.apache.lucene.search.IndexSearcher.searchAfter (IndexSearcher.java:532)
    [4] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:542)
    [5] org.apache.lucene.demo.SearchFiles.doPagingSearch (SearchFiles.java:180)
    [6] org.apache.lucene.demo.SearchFiles.main (SearchFiles.java:150)
    + +

    org.apache.lucene.search.TopScoreDocCollector.create , 一直往上翻,发现org.apache.lucene.search.IndexSearcher.searchAfter 就已经有了.
    那么这个hit数量是从哪里初始化的呢?

    +

    很明显,search会填充firstCollector的数据,那么是在哪里赋值的呢?

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    protected void search(List<LeafReaderContext> leaves, Weight weight, Collector collector)
    throws IOException {

    // TODO: should we make this
    // threaded...? the Collector could be sync'd?
    // always use single thread:
    for (LeafReaderContext ctx : leaves) { // search each subreader
    final LeafCollector leafCollector;
    try {
    leafCollector = collector.getLeafCollector(ctx);
    } catch (
    @SuppressWarnings("unused")
    CollectionTerminatedException e) {
    // there is no doc of interest in this reader context
    // continue with the following leaf
    continue;
    }
    BulkScorer scorer = weight.bulkScorer(ctx); /// 在这里会获取total hits
    if (scorer != null) {
    try {
    scorer.score(leafCollector, ctx.reader().getLiveDocs());
    } catch (
    @SuppressWarnings("unused")
    CollectionTerminatedException e) {
    // collection was terminated prematurely
    // continue with the following leaf
    }
    }
    }
    }
    + +

    看完最后的堆栈,我们确定了totalHits 是在这里赋值的 , 也就是只要调用了一次就自增一, 很明显这是一个统计,那么这个统计就是命中的搜索内容,那么搜索内容是怎么来的呢?

    +

    我们只能往上追溯

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    38
    39
    40
    41
    42
    43
    44
    main[1] where
    [1] org.apache.lucene.search.TopScoreDocCollector$SimpleTopScoreDocCollector$1.collect (TopScoreDocCollector.java:73)
    [2] org.apache.lucene.search.Weight$DefaultBulkScorer.scoreAll (Weight.java:305)
    [3] org.apache.lucene.search.Weight$DefaultBulkScorer.score (Weight.java:247)
    [4] org.apache.lucene.search.BulkScorer.score (BulkScorer.java:38)
    [5] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:770)
    [6] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:693)
    [7] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:687)
    [8] org.apache.lucene.search.IndexSearcher.searchAfter (IndexSearcher.java:532)
    [9] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:542)
    [10] org.apache.lucene.demo.SearchFiles.doPagingSearch (SearchFiles.java:180)
    [11] org.apache.lucene.demo.SearchFiles.main (SearchFiles.java:150)

    @Override
    public void collect(int doc) throws IOException {
    float score = scorer.score(); // 计算分数 , 也是回调专用的score 函数 , 插件化

    // This collector relies on the fact that scorers produce positive values:
    assert score >= 0; // NOTE: false for NaN

    totalHits++; // hit +1 在这里触发
    hitsThresholdChecker.incrementHitCount();

    if (minScoreAcc != null && (totalHits & minScoreAcc.modInterval) == 0) {
    updateGlobalMinCompetitiveScore(scorer);
    }

    if (score <= pqTop.score) {
    if (totalHitsRelation == TotalHits.Relation.EQUAL_TO) {
    // we just reached totalHitsThreshold, we can start setting the min
    // competitive score now
    updateMinCompetitiveScore(scorer);
    }
    // Since docs are returned in-order (i.e., increasing doc Id), a document
    // with equal score to pqTop.score cannot compete since HitQueue favors
    // documents with lower doc Ids. Therefore reject those docs too.
    return;
    }
    pqTop.doc = doc + docBase;
    pqTop.score = score;
    pqTop = pq.updateTop();
    updateMinCompetitiveScore(scorer);
    }
    };
    + + +

    继续往上面推之后,我们找到了堆栈,scorer 是根据context生成的

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    /**
    * Optional method, to return a {@link BulkScorer} to score the query and send hits to a {@link
    * Collector}. Only queries that have a different top-level approach need to override this; the
    * default implementation pulls a normal {@link Scorer} and iterates and collects the resulting
    * hits which are not marked as deleted.
    *
    * @param context the {@link org.apache.lucene.index.LeafReaderContext} for which to return the
    * {@link Scorer}.
    * @return a {@link BulkScorer} which scores documents and passes them to a collector.
    * @throws IOException if there is a low-level I/O error
    */
    public BulkScorer bulkScorer(LeafReaderContext context) throws IOException {

    Scorer scorer = scorer(context);
    if (scorer == null) {
    // No docs match
    return null;
    }

    // This impl always scores docs in order, so we can
    // ignore scoreDocsInOrder:
    return new DefaultBulkScorer(scorer);
    }
    + +

    再往上看: 刚刚看到了bulkScorer 回调了一个scorer 方法,这个scorer抽象方法的实现是在org.apache.lucene.search.TermQuery$TermWeight.scorer

    +

    这个scorer方法根据入参context 以及外部类termQuery.term计算htis命中的个数

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    main[1] list
    103 assert termStates == null || termStates.wasBuiltFor(ReaderUtil.getTopLevelContext(context))
    104 : "The top-reader used to create Weight is not the same as the current reader's top-reader ("
    105 + ReaderUtil.getTopLevelContext(context);
    106 ;
    107 => final TermsEnum termsEnum = getTermsEnum(context);
    108 if (termsEnum == null) {
    109 return null;
    110 }
    111 LeafSimScorer scorer =
    112 new LeafSimScorer(simScorer, context.reader(), term.field(), scoreMode.needsScores()); // 这里term是外部类的term ,也就是this$0.term
    main[1] where
    [1] org.apache.lucene.search.TermQuery$TermWeight.scorer (TermQuery.java:107)
    [2] org.apache.lucene.search.Weight.bulkScorer (Weight.java:166)
    [3] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:767)
    [4] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:693)
    [5] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:687)
    [6] org.apache.lucene.search.IndexSearcher.searchAfter (IndexSearcher.java:532)
    [7] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:542)
    [8] org.apache.lucene.demo.SearchFiles.doPagingSearch (SearchFiles.java:180)
    [9] org.apache.lucene.demo.SearchFiles.main (SearchFiles.java:150)
    +

    所以之后会调用advance,最后调用的是下面这个advance方法, 这里会用到docTermStartFP , 那么这个遍历在哪里初始化?

    +

    其实是在termStates里面获取,初始化的地方在docTermStartFP = termState.docStartFP;

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    38
    39
    40
    41
    42
    43
    44
    45
    46
    47
    48
    49
    50
    51
    52
    53
    54
    55
    56
    57
    58
    59
    60
    61
    62
    63
    64
    65
    66
    67
    68
    69
    70
    71


    lucene\core\src\java\org\apache\lucene\codecs\lucene90\Lucene90PostingsReader.java
    @Override
    public int advance(int target) throws IOException {
    // current skip docID < docIDs generated from current buffer <= next skip docID
    // we don't need to skip if target is buffered already
    if (docFreq > BLOCK_SIZE && target > nextSkipDoc) {

    if (skipper == null) {
    // Lazy init: first time this enum has ever been used for skipping
    skipper =
    new Lucene90SkipReader(
    docIn.clone(), MAX_SKIP_LEVELS, indexHasPos, indexHasOffsets, indexHasPayloads);
    }

    if (!skipped) {
    assert skipOffset != -1;
    // This is the first time this enum has skipped
    // since reset() was called; load the skip data:
    skipper.init(docTermStartFP + skipOffset, docTermStartFP, 0, 0, docFreq);
    skipped = true;
    }

    // always plus one to fix the result, since skip position in Lucene90SkipReader
    // is a little different from MultiLevelSkipListReader
    final int newDocUpto = skipper.skipTo(target) + 1;

    if (newDocUpto >= blockUpto) {
    // Skipper moved
    assert newDocUpto % BLOCK_SIZE == 0 : "got " + newDocUpto;
    blockUpto = newDocUpto;

    // Force to read next block
    docBufferUpto = BLOCK_SIZE;
    accum = skipper.getDoc(); // actually, this is just lastSkipEntry
    docIn.seek(skipper.getDocPointer()); // now point to the block we want to search
    // even if freqBuffer were not read from the previous block, we will mark them as read,
    // as we don't need to skip the previous block freqBuffer in refillDocs,
    // as we have already positioned docIn where in needs to be.
    isFreqsRead = true;
    }
    // next time we call advance, this is used to
    // foresee whether skipper is necessary.
    nextSkipDoc = skipper.getNextSkipDoc();
    }
    if (docBufferUpto == BLOCK_SIZE) {
    refillDocs();
    }

    // Now scan... this is an inlined/pared down version
    // of nextDoc():
    long doc;
    while (true) {
    doc = docBuffer[docBufferUpto];

    if (doc >= target) {
    break;
    }
    ++docBufferUpto;
    }

    docBufferUpto++;
    return this.doc = (int) doc;
    }

    @Override
    public long cost() {
    return docFreq;
    }
    }
    + +

    那么我们继续看termStates是怎么初始化的? 我先猜测term会是termStates 的一个成员变量

    +

    通过断点,我们最后找到了下面这个:

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    main[1] list
    178 }
    179
    180 @Override
    181 public BlockTermState newTermState() {
    182 => return new IntBlockTermState();
    183 }
    184
    185 @Override
    186 public void close() throws IOException {
    187 IOUtils.close(docIn, posIn, payIn);
    main[1] where
    [1] org.apache.lucene.codecs.lucene90.Lucene90PostingsReader.newTermState (Lucene90PostingsReader.java:182)
    [2] org.apache.lucene.codecs.lucene90.blocktree.SegmentTermsEnumFrame.<init> (SegmentTermsEnumFrame.java:101)
    [3] org.apache.lucene.codecs.lucene90.blocktree.SegmentTermsEnum.<init> (SegmentTermsEnum.java:76)
    [4] org.apache.lucene.codecs.lucene90.blocktree.FieldReader.iterator (FieldReader.java:153)
    [5] org.apache.lucene.index.TermStates.loadTermsEnum (TermStates.java:116)
    [6] org.apache.lucene.index.TermStates.build (TermStates.java:102)
    [7] org.apache.lucene.search.TermQuery.createWeight (TermQuery.java:227)
    [8] org.apache.lucene.search.IndexSearcher.createWeight (IndexSearcher.java:885)
    [9] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:686)
    [10] org.apache.lucene.search.IndexSearcher.searchAfter (IndexSearcher.java:532)
    [11] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:542)
    [12] org.apache.lucene.demo.SearchFiles.doPagingSearch (SearchFiles.java:180)
    [13] org.apache.lucene.demo.SearchFiles.main (SearchFiles.java:150)
    main[1]
    + + +

    最后这里应该就是最最核心的获取词的流程了,i hope so

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    main[1] list
    113
    114 private static TermsEnum loadTermsEnum(LeafReaderContext ctx, Term term) throws IOException {
    115 final Terms terms = Terms.getTerms(ctx.reader(), term.field());
    116 final TermsEnum termsEnum = terms.iterator();
    117 => if (termsEnum.seekExact(term.bytes())) {
    118 return termsEnum;
    119 }
    120 return null;
    121 }
    122
    main[1] print term.bytes()
    term.bytes() = "[61 6d]"
    main[1] where
    [1] org.apache.lucene.index.TermStates.loadTermsEnum (TermStates.java:117)
    [2] org.apache.lucene.index.TermStates.build (TermStates.java:102)
    [3] org.apache.lucene.search.TermQuery.createWeight (TermQuery.java:227)
    [4] org.apache.lucene.search.IndexSearcher.createWeight (IndexSearcher.java:885)
    [5] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:686)
    [6] org.apache.lucene.search.IndexSearcher.searchAfter (IndexSearcher.java:532)
    [7] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:542)
    [8] org.apache.lucene.demo.SearchFiles.doPagingSearch (SearchFiles.java:180)
    [9] org.apache.lucene.demo.SearchFiles.main (SearchFiles.java:150)
    + + +

    最后的最后应该是调用这里: 获取所有的term的个数,具体是哪里还需要判断,但是路径应该就是这里了

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    38
    39
    40
    41
    42
    43
    44
    45
    46
    47
    48
    49
    50
    51
    52
    53
    54
    55
    56
    57
    58
    59
    60
    61
    62
    63
    64
    65
    66
    67
    68
    69
    70
    71
    72
    73
    74
    75
    76
    77
    78
    79
    80
    81
    82
    83
    84
    85
    86
    87
    88
    89
    90
    91
    92
    93
    94
    95
    96
    97
    98
    99
    100
    101
    102
    103
    104
    105
    106
    107
    108
    109
    110
    111
    112
    113
    114
    115
    116
    117
    118
    119
    120
    121
    122
    123
    124
    125
    126
    127
    128
    129
    130
    131
    132
    133
    134
    135
    136
    137
    138
    139
    140
    141
    142
    143
    144
    145
    146
    147
    148
    149
    150
    151
    152
    153
    154
    155
    156
    157
    158
    159
    160
    161
    162
    163
    164
    165
    166
    167
    168
    169
    170
    171
    172
    173
    174
    175
    176
    177
    178
    179
    180
    181
    182
    183
    184
    185
    186
    187
    188
    189
    190
    191
    192
    193
    194
    195
    196
    197
    198
    199
    200
    201
    202
    203
    204
    205
    206
    207
    208
    209
    210
    211
    212
    213
    214
    215
    // Target's prefix matches this block's prefix; we
    // scan the entries check if the suffix matches.
    public SeekStatus scanToTermLeaf(BytesRef target, boolean exactOnly) throws IOException {

    // if (DEBUG) System.out.println(" scanToTermLeaf: block fp=" + fp + " prefix=" + prefix + "
    // nextEnt=" + nextEnt + " (of " + entCount + ") target=" + brToString(target) + " term=" +
    // brToString(term));

    assert nextEnt != -1;

    ste.termExists = true;
    subCode = 0;

    if (nextEnt == entCount) {
    if (exactOnly) {
    fillTerm();
    }
    return SeekStatus.END;
    }

    assert prefixMatches(target);

    // TODO: binary search when all terms have the same length, which is common for ID fields,
    // which are also the most sensitive to lookup performance?
    // Loop over each entry (term or sub-block) in this block:
    do {
    nextEnt++;

    suffix = suffixLengthsReader.readVInt();

    // if (DEBUG) {
    // BytesRef suffixBytesRef = new BytesRef();
    // suffixBytesRef.bytes = suffixBytes;
    // suffixBytesRef.offset = suffixesReader.getPosition();
    // suffixBytesRef.length = suffix;
    // System.out.println(" cycle: term " + (nextEnt-1) + " (of " + entCount + ") suffix="
    // + brToString(suffixBytesRef));
    // }

    startBytePos = suffixesReader.getPosition();
    suffixesReader.skipBytes(suffix);

    // Loop over bytes in the suffix, comparing to the target
    final int cmp =
    Arrays.compareUnsigned(
    suffixBytes,
    startBytePos,
    startBytePos + suffix,
    target.bytes,
    target.offset + prefix,
    target.offset + target.length);

    if (cmp < 0) {
    // Current entry is still before the target;
    // keep scanning
    } else if (cmp > 0) {
    // Done! Current entry is after target --
    // return NOT_FOUND:
    fillTerm();

    // if (DEBUG) System.out.println(" not found");
    return SeekStatus.NOT_FOUND;
    } else {
    // Exact match!

    // This cannot be a sub-block because we
    // would have followed the index to this
    // sub-block from the start:

    assert ste.termExists;
    fillTerm();
    // if (DEBUG) System.out.println(" found!");
    return SeekStatus.FOUND;
    }
    } while (nextEnt < entCount);

    // It is possible (and OK) that terms index pointed us
    // at this block, but, we scanned the entire block and
    // did not find the term to position to. This happens
    // when the target is after the last term in the block
    // (but, before the next term in the index). EG
    // target could be foozzz, and terms index pointed us
    // to the foo* block, but the last term in this block
    // was fooz (and, eg, first term in the next block will
    // bee fop).
    // if (DEBUG) System.out.println(" block end");
    if (exactOnly) {
    fillTerm();
    }

    // TODO: not consistent that in the
    // not-exact case we don't next() into the next
    // frame here
    return SeekStatus.END;
    }

    // Target's prefix matches this block's prefix; we
    // scan the entries check if the suffix matches.
    public SeekStatus scanToTermNonLeaf(BytesRef target, boolean exactOnly) throws IOException {

    // if (DEBUG) System.out.println(" scanToTermNonLeaf: block fp=" + fp + " prefix=" + prefix +
    // " nextEnt=" + nextEnt + " (of " + entCount + ") target=" + brToString(target) + " term=" +
    // brToString(target));

    assert nextEnt != -1;

    if (nextEnt == entCount) {
    if (exactOnly) {
    fillTerm();
    ste.termExists = subCode == 0;
    }
    return SeekStatus.END;
    }

    assert prefixMatches(target);

    // Loop over each entry (term or sub-block) in this block:
    while (nextEnt < entCount) {

    nextEnt++;

    final int code = suffixLengthsReader.readVInt();
    suffix = code >>> 1;

    // if (DEBUG) {
    // BytesRef suffixBytesRef = new BytesRef();
    // suffixBytesRef.bytes = suffixBytes;
    // suffixBytesRef.offset = suffixesReader.getPosition();
    // suffixBytesRef.length = suffix;
    // System.out.println(" cycle: " + ((code&1)==1 ? "sub-block" : "term") + " " +
    // (nextEnt-1) + " (of " + entCount + ") suffix=" + brToString(suffixBytesRef));
    // }

    final int termLen = prefix + suffix;
    startBytePos = suffixesReader.getPosition();
    suffixesReader.skipBytes(suffix);
    ste.termExists = (code & 1) == 0;
    if (ste.termExists) {
    state.termBlockOrd++;
    subCode = 0;
    } else {
    subCode = suffixLengthsReader.readVLong();
    lastSubFP = fp - subCode;
    }

    final int cmp =
    Arrays.compareUnsigned(
    suffixBytes,
    startBytePos,
    startBytePos + suffix,
    target.bytes,
    target.offset + prefix,
    target.offset + target.length);

    if (cmp < 0) {
    // Current entry is still before the target;
    // keep scanning
    } else if (cmp > 0) {
    // Done! Current entry is after target --
    // return NOT_FOUND:
    fillTerm();

    // if (DEBUG) System.out.println(" maybe done exactOnly=" + exactOnly + "
    // ste.termExists=" + ste.termExists);

    if (!exactOnly && !ste.termExists) {
    // System.out.println(" now pushFrame");
    // TODO this
    // We are on a sub-block, and caller wants
    // us to position to the next term after
    // the target, so we must recurse into the
    // sub-frame(s):
    ste.currentFrame = ste.pushFrame(null, ste.currentFrame.lastSubFP, termLen);
    ste.currentFrame.loadBlock();
    while (ste.currentFrame.next()) {
    ste.currentFrame = ste.pushFrame(null, ste.currentFrame.lastSubFP, ste.term.length());
    ste.currentFrame.loadBlock(); /////////////////////////////////////////////////// 这里会有流的加载
    }
    }

    // if (DEBUG) System.out.println(" not found");
    return SeekStatus.NOT_FOUND;
    } else {
    // Exact match!

    // This cannot be a sub-block because we
    // would have followed the index to this
    // sub-block from the start:

    assert ste.termExists;
    fillTerm();
    // if (DEBUG) System.out.println(" found!");
    return SeekStatus.FOUND;
    }
    }

    // It is possible (and OK) that terms index pointed us
    // at this block, but, we scanned the entire block and
    // did not find the term to position to. This happens
    // when the target is after the last term in the block
    // (but, before the next term in the index). EG
    // target could be foozzz, and terms index pointed us
    // to the foo* block, but the last term in this block
    // was fooz (and, eg, first term in the next block will
    // bee fop).
    // if (DEBUG) System.out.println(" block end");
    if (exactOnly) {
    fillTerm();
    }

    // TODO: not consistent that in the
    // not-exact case we don't next() into the next
    // frame here
    return SeekStatus.END;
    }
    + +

    termState 是如何被反序列化的?

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    38
    39
    40
    41
    42
    43
    44
    45
    46
    47
    48
    49
    50
    51
    52
    53
    54
    55
    56
    57
    58
    59
    60
    61
    62
    63
    64
    65
    66
    67
    Breakpoint hit: "thread=main", org.apache.lucene.codecs.lucene90.Lucene90PostingsReader.decodeTerm(), line=194 bci=0
    194 final IntBlockTermState termState = (IntBlockTermState) _termState;

    main[1] where
    [1] org.apache.lucene.codecs.lucene90.Lucene90PostingsReader.decodeTerm (Lucene90PostingsReader.java:194)
    [2] org.apache.lucene.codecs.lucene90.blocktree.SegmentTermsEnumFrame.decodeMetaData (SegmentTermsEnumFrame.java:476)
    [3] org.apache.lucene.codecs.lucene90.blocktree.SegmentTermsEnum.termState (SegmentTermsEnum.java:1,178)
    [4] org.apache.lucene.index.TermStates.build (TermStates.java:104)
    [5] org.apache.lucene.search.TermQuery.createWeight (TermQuery.java:227)
    [6] org.apache.lucene.search.IndexSearcher.createWeight (IndexSearcher.java:885)
    [7] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:686)
    [8] org.apache.lucene.search.IndexSearcher.searchAfter (IndexSearcher.java:532)
    [9] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:542)
    [10] org.apache.lucene.demo.SearchFiles.doPagingSearch (SearchFiles.java:180)
    [11] org.apache.lucene.demo.SearchFiles.main (SearchFiles.java:150)


    @Override
    public void decodeTerm(
    DataInput in, FieldInfo fieldInfo, BlockTermState _termState, boolean absolute)
    throws IOException {
    final IntBlockTermState termState = (IntBlockTermState) _termState;
    final boolean fieldHasPositions =
    fieldInfo.getIndexOptions().compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS) >= 0;
    final boolean fieldHasOffsets =
    fieldInfo.getIndexOptions().compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS)
    >= 0;
    final boolean fieldHasPayloads = fieldInfo.hasPayloads();

    if (absolute) {
    termState.docStartFP = 0;
    termState.posStartFP = 0;
    termState.payStartFP = 0;
    }

    final long l = in.readVLong();
    if ((l & 0x01) == 0) {
    termState.docStartFP += l >>> 1;
    if (termState.docFreq == 1) {
    termState.singletonDocID = in.readVInt();
    } else {
    termState.singletonDocID = -1;
    }
    } else {
    assert absolute == false;
    assert termState.singletonDocID != -1;
    termState.singletonDocID += BitUtil.zigZagDecode(l >>> 1);
    }

    if (fieldHasPositions) {
    termState.posStartFP += in.readVLong();
    if (fieldHasOffsets || fieldHasPayloads) {
    termState.payStartFP += in.readVLong();
    }
    if (termState.totalTermFreq > BLOCK_SIZE) {
    termState.lastPosBlockOffset = in.readVLong();
    } else {
    termState.lastPosBlockOffset = -1;
    }
    }

    if (termState.docFreq > BLOCK_SIZE) {
    termState.skipOffset = in.readVLong();
    } else {
    termState.skipOffset = -1;
    }
    }
    +

    其实ste持有term的引用

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    main[2] dump ste.term.ref.bytes
    ste.term.ref.bytes = {
    97, 109, 0, 0, 0, 0, 0, 0
    }
    main[2] where
    [2] org.apache.lucene.codecs.lucene90.blocktree.SegmentTermsEnumFrame.decodeMetaData (SegmentTermsEnumFrame.java:476)
    [3] org.apache.lucene.codecs.lucene90.blocktree.SegmentTermsEnum.termState (SegmentTermsEnum.java:1,178)
    [4] org.apache.lucene.index.TermStates.build (TermStates.java:104)
    [5] org.apache.lucene.search.TermQuery.createWeight (TermQuery.java:227)
    [6] org.apache.lucene.search.IndexSearcher.createWeight (IndexSearcher.java:885)
    [7] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:686)
    [8] org.apache.lucene.search.IndexSearcher.searchAfter (IndexSearcher.java:532)
    [9] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:542)
    [10] org.apache.lucene.demo.SearchFiles.doPagingSearch (SearchFiles.java:180)
    [11] org.apache.lucene.demo.SearchFiles.main (SearchFiles.java:150)
    + +

    ste.in 描述的是读取的文件:

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
     ste.in = {
    $assertionsDisabled: true
    org.apache.lucene.store.ByteBufferIndexInput.EMPTY_FLOATBUFFER: instance of java.nio.HeapFloatBuffer(id=1473)
    org.apache.lucene.store.ByteBufferIndexInput.EMPTY_LONGBUFFER: instance of java.nio.HeapLongBuffer(id=1474)
    org.apache.lucene.store.ByteBufferIndexInput.EMPTY_INTBUFFER: instance of java.nio.HeapIntBuffer(id=1475)
    org.apache.lucene.store.ByteBufferIndexInput.length: 1993
    org.apache.lucene.store.ByteBufferIndexInput.chunkSizeMask: 1073741823
    org.apache.lucene.store.ByteBufferIndexInput.chunkSizePower: 30
    org.apache.lucene.store.ByteBufferIndexInput.guard: instance of org.apache.lucene.store.ByteBufferGuard(id=1476)
    org.apache.lucene.store.ByteBufferIndexInput.buffers: instance of java.nio.ByteBuffer[1] (id=1477)
    org.apache.lucene.store.ByteBufferIndexInput.curBufIndex: 0
    org.apache.lucene.store.ByteBufferIndexInput.curBuf: instance of java.nio.DirectByteBufferR(id=1479)
    org.apache.lucene.store.ByteBufferIndexInput.curLongBufferViews: null
    org.apache.lucene.store.ByteBufferIndexInput.curIntBufferViews: null
    org.apache.lucene.store.ByteBufferIndexInput.curFloatBufferViews: null
    org.apache.lucene.store.ByteBufferIndexInput.isClone: true
    org.apache.lucene.store.ByteBufferIndexInput.$assertionsDisabled: true
    org.apache.lucene.store.IndexInput.resourceDescription: "MMapIndexInput(path="/home/dai/index/_7.cfs") [slice=_7_Lucene90_0.tim]"
    }

    +

    相关阅读

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    38
    39
    40
    41
    42
    43
    44
    45
    46
    47
    48
    49
    50
    51
    52
    53
    54
    55
    56
    57
    58
    public void nextLeaf() {
    // if (DEBUG) System.out.println(" frame.next ord=" + ord + " nextEnt=" + nextEnt + "
    // entCount=" + entCount);
    assert nextEnt != -1 && nextEnt < entCount
    : "nextEnt=" + nextEnt + " entCount=" + entCount + " fp=" + fp;
    nextEnt++;
    suffix = suffixLengthsReader.readVInt();
    startBytePos = suffixesReader.getPosition();
    ste.term.setLength(prefix + suffix);
    ste.term.grow(ste.term.length());
    suffixesReader.readBytes(ste.term.bytes(), prefix, suffix);
    ste.termExists = true;
    }

    public boolean nextNonLeaf() throws IOException {
    // if (DEBUG) System.out.println(" stef.next ord=" + ord + " nextEnt=" + nextEnt + " entCount="
    // + entCount + " fp=" + suffixesReader.getPosition());
    while (true) {
    if (nextEnt == entCount) {
    assert arc == null || (isFloor && isLastInFloor == false)
    : "isFloor=" + isFloor + " isLastInFloor=" + isLastInFloor;
    loadNextFloorBlock();
    if (isLeafBlock) {
    nextLeaf();
    return false;
    } else {
    continue;
    }
    }

    assert nextEnt != -1 && nextEnt < entCount
    : "nextEnt=" + nextEnt + " entCount=" + entCount + " fp=" + fp;
    nextEnt++;
    final int code = suffixLengthsReader.readVInt();
    suffix = code >>> 1;
    startBytePos = suffixesReader.getPosition();
    ste.term.setLength(prefix + suffix);
    ste.term.grow(ste.term.length());
    suffixesReader.readBytes(ste.term.bytes(), prefix, suffix); // 这里是最核心的地方吗?
    if ((code & 1) == 0) {
    // A normal term
    ste.termExists = true;
    subCode = 0;
    state.termBlockOrd++;
    return false;
    } else {
    // A sub-block; make sub-FP absolute:
    ste.termExists = false;
    subCode = suffixLengthsReader.readVLong();
    lastSubFP = fp - subCode;
    // if (DEBUG) {
    // System.out.println(" lastSubFP=" + lastSubFP);
    // }
    return true;
    }
    }
    }

    + + +

    看上去这就行读取term 在文件中的位置信息:

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    main[1] where
    [1] org.apache.lucene.codecs.lucene90.blocktree.SegmentTermsEnumFrame.scanToTermLeaf (SegmentTermsEnumFrame.java:593)
    [2] org.apache.lucene.codecs.lucene90.blocktree.SegmentTermsEnumFrame.scanToTerm (SegmentTermsEnumFrame.java:530)
    [3] org.apache.lucene.codecs.lucene90.blocktree.SegmentTermsEnum.seekExact (SegmentTermsEnum.java:538)
    [4] org.apache.lucene.index.TermStates.loadTermsEnum (TermStates.java:117)
    [5] org.apache.lucene.index.TermStates.build (TermStates.java:102)
    [6] org.apache.lucene.search.TermQuery.createWeight (TermQuery.java:227)
    [7] org.apache.lucene.search.IndexSearcher.createWeight (IndexSearcher.java:885)
    [8] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:686)
    [9] org.apache.lucene.search.IndexSearcher.searchAfter (IndexSearcher.java:532)
    [10] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:542)
    [11] org.apache.lucene.demo.SearchFiles.doPagingSearch (SearchFiles.java:180)
    [12] org.apache.lucene.demo.SearchFiles.main (SearchFiles.java:150)
    main[1] dump suffixBytes
    suffixBytes = {
    97, 109, 97, 110, 100, 98, 117, 116, 99, 97, 110, 100, 111, 104, 101, 108, 108, 111, 104, 105, 105, 105, 115, 105, 116, 107, 110, 111, 119, 109, 97, 121, 109, 111, 110, 103, 111, 110, 111, 116, 116, 114, 121, 119, 104, 97, 116, 119, 111, 114, 108, 100, 121, 111, 117, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
    }
    + + +

    term 对应docfreq 的统计信息的读取位置

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    38
    39
    40
    41
    main[1] list
    451 // postings
    452
    453 // TODO: if docFreq were bulk decoded we could
    454 // just skipN here:
    455 => if (statsSingletonRunLength > 0) {
    456 state.docFreq = 1;
    457 state.totalTermFreq = 1;
    458 statsSingletonRunLength--;
    459 } else {
    460 int token = statsReader.readVInt();
    main[1] print statsSingletonRunLength
    statsSingletonRunLength = 0
    main[1] next
    >
    Step completed: "thread=main", org.apache.lucene.codecs.lucene90.blocktree.SegmentTermsEnumFrame.decodeMetaData(), line=460 bci=80
    460 int token = statsReader.readVInt();

    main[1] list
    456 state.docFreq = 1;
    457 state.totalTermFreq = 1;
    458 statsSingletonRunLength--;
    459 } else {
    460 => int token = statsReader.readVInt();
    461 if ((token & 1) == 1) {
    462 state.docFreq = 1;
    463 state.totalTermFreq = 1;
    464 statsSingletonRunLength = token >>> 1;
    465 } else {
    main[1] print statsReader
    statsReader = "org.apache.lucene.store.ByteArrayDataInput@6b67034"
    main[1] dump statsReader
    statsReader = {
    bytes: instance of byte[64] (id=1520)
    pos: 0
    limit: 16
    }
    main[1] dump statsReader.bytes
    statsReader.bytes = {
    4, 0, 9, 2, 1, 4, 0, 3, 2, 1, 1, 2, 1, 7, 2, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
    }
    + + +

    搜索的termam对应的是

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    00000000  3f d7 6c 17 12 42 6c 6f  63 6b 54 72 65 65 54 65  |?.l..BlockTreeTe|
    00000010 72 6d 73 44 69 63 74 00 00 00 00 fe ea 80 e6 45 |rmsDict........E|
    00000020 20 d8 56 64 1b 1b 1b 89 70 fe 67 0a 4c 75 63 65 | .Vd....p.g.Luce|
    00000030 6e 65 39 30 5f 30 25 bc 03 61 6d 61 6e 64 62 75 |ne90_0%..amandbu|
    00000040 74 63 61 6e 64 6f 68 65 6c 6c 6f 68 69 69 69 73 |tcandohellohiiis|
    00000050 69 74 6b 6e 6f 77 6d 61 79 6d 6f 6e 67 6f 6e 6f |itknowmaymongono|
    00000060 74 74 72 79 77 68 61 74 77 6f 72 6c 64 79 6f 75 |ttrywhatworldyou|
    00000070 24 02 03 03 03 02 05 02 01 02 02 04 03 05 03 03 |$...............|
    00000080 04 05 03 10 04 00 09 02 01 04 00 03 02 01 01 02 |................| <---- 在这一行第四个开始的序列
    00000090 01 07 02 02 26 7a 3d 04 01 02 03 01 01 01 01 01 |....&z=.........|
    000000a0 05 01 01 01 00 02 04 00 02 01 01 01 01 01 02 01 |................|
    000000b0 01 01 02 01 01 01 01 05 01 03 01 05 a4 03 2f 68 |............../h|
    000000c0 6f 6d 65 2f 75 62 75 6e 74 75 2f 64 6f 63 2f 68 |ome/ubuntu/doc/h|
    000000d0 65 6c 6c 6f 2e 74 78 74 2f 68 6f 6d 65 2f 75 62 |ello.txt/home/ub|
    000000e0 75 6e 74 75 2f 64 6f 63 2f 6d 6f 6e 67 6f 2e 74 |untu/doc/mongo.t|
    000000f0 78 74 05 1a 01 03 04 82 01 01 03 c0 28 93 e8 00 |xt..........(...|
    00000100 00 00 00 00 00 00 00 da 02 a3 a3 |...........|
    + + +

    那么docFreq 的赋值在哪里呢?

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
     currentFrame.state.docFreq = 2
    main[1] list
    1,113 assert !eof;
    1,114 // if (DEBUG) System.out.println("BTR.docFreq");
    1,115 currentFrame.decodeMetaData();
    1,116 // if (DEBUG) System.out.println(" return " + currentFrame.state.docFreq);
    1,117 => return currentFrame.state.docFreq;
    1,118 }
    1,119
    1,120 @Override
    1,121 public long totalTermFreq() throws IOException {
    1,122 assert !eof;
    main[1] where
    [1] org.apache.lucene.codecs.lucene90.blocktree.SegmentTermsEnum.docFreq (SegmentTermsEnum.java:1,117)
    [2] org.apache.lucene.index.TermStates.build (TermStates.java:107)
    [3] org.apache.lucene.search.TermQuery.createWeight (TermQuery.java:227)
    [4] org.apache.lucene.search.IndexSearcher.createWeight (IndexSearcher.java:885)
    [5] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:686)
    [6] org.apache.lucene.search.IndexSearcher.searchAfter (IndexSearcher.java:532)
    [7] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:542)
    [8] org.apache.lucene.demo.SearchFiles.doPagingSearch (SearchFiles.java:180)
    [9] org.apache.lucene.demo.SearchFiles.main (SearchFiles.java:150)
    + +

    读取的过程:

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    readByte:110, ByteBufferIndexInput (org.apache.lucene.store)
    readVInt:121, DataInput (org.apache.lucene.store)
    readVIntBlock:149, Lucene90PostingsReader (org.apache.lucene.codecs.lucene90)
    refillDocs:472, Lucene90PostingsReader$BlockDocsEnum (org.apache.lucene.codecs.lucene90)
    advance:538, Lucene90PostingsReader$BlockDocsEnum (org.apache.lucene.codecs.lucene90)
    advance:77, SlowImpactsEnum (org.apache.lucene.index)
    advance:128, ImpactsDISI (org.apache.lucene.search)
    nextDoc:133, ImpactsDISI (org.apache.lucene.search)
    scoreAll:301, Weight$DefaultBulkScorer (org.apache.lucene.search)
    score:247, Weight$DefaultBulkScorer (org.apache.lucene.search)
    score:38, BulkScorer (org.apache.lucene.search)
    search:776, IndexSearcher (org.apache.lucene.search)
    search:694, IndexSearcher (org.apache.lucene.search)
    search:688, IndexSearcher (org.apache.lucene.search)
    searchAfter:523, IndexSearcher (org.apache.lucene.search)
    search:538, IndexSearcher (org.apache.lucene.search)
    doPagingSearch:161, SearchFiles (com.dinosaur.lucene.skiptest)
    queryTest:52, QueryTest (com.dinosaur.lucene.demo)

    + +

    tim 文件在哪里初始化

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    38
    39
    40
    41
    42
    43
    44
    45
    46
    47
    48
    49
    50
    51
    52
    53
    54
    55
    56
    57
    58
    59
    60
    61
    62
    63
    64
    65
    66
    67
    68
    69
    70
    71
    72
    73
    74
    75
    76
    77
    78
    79
    80
    81
    82
    83
    84
    85
    86
    87
    88
    89
    90
    91
    92
    93
    94
    95
    96
    void loadBlock() throws IOException {

    // Clone the IndexInput lazily, so that consumers
    // that just pull a TermsEnum to
    // seekExact(TermState) don't pay this cost:
    ste.initIndexInput();

    if (nextEnt != -1) {
    // Already loaded
    return;
    }
    // System.out.println("blc=" + blockLoadCount);

    ste.in.seek(fp);
    int code = ste.in.readVInt();
    entCount = code >>> 1;
    assert entCount > 0;
    isLastInFloor = (code & 1) != 0;

    assert arc == null || (isLastInFloor || isFloor)
    : "fp=" + fp + " arc=" + arc + " isFloor=" + isFloor + " isLastInFloor=" + isLastInFloor;

    // TODO: if suffixes were stored in random-access
    // array structure, then we could do binary search
    // instead of linear scan to find target term; eg
    // we could have simple array of offsets

    final long startSuffixFP = ste.in.getFilePointer();
    // term suffixes:
    final long codeL = ste.in.readVLong();
    isLeafBlock = (codeL & 0x04) != 0;
    final int numSuffixBytes = (int) (codeL >>> 3);
    if (suffixBytes.length < numSuffixBytes) {
    suffixBytes = new byte[ArrayUtil.oversize(numSuffixBytes, 1)];
    }
    try {
    compressionAlg = CompressionAlgorithm.byCode((int) codeL & 0x03);
    } catch (IllegalArgumentException e) {
    throw new CorruptIndexException(e.getMessage(), ste.in, e);
    }
    compressionAlg.read(ste.in, suffixBytes, numSuffixBytes);
    suffixesReader.reset(suffixBytes, 0, numSuffixBytes);

    int numSuffixLengthBytes = ste.in.readVInt();
    final boolean allEqual = (numSuffixLengthBytes & 0x01) != 0;
    numSuffixLengthBytes >>>= 1;
    if (suffixLengthBytes.length < numSuffixLengthBytes) {
    suffixLengthBytes = new byte[ArrayUtil.oversize(numSuffixLengthBytes, 1)];
    }
    if (allEqual) {
    Arrays.fill(suffixLengthBytes, 0, numSuffixLengthBytes, ste.in.readByte());
    } else {
    ste.in.readBytes(suffixLengthBytes, 0, numSuffixLengthBytes);
    }
    suffixLengthsReader.reset(suffixLengthBytes, 0, numSuffixLengthBytes);
    totalSuffixBytes = ste.in.getFilePointer() - startSuffixFP;

    /*if (DEBUG) {
    if (arc == null) {
    System.out.println(" loadBlock (next) fp=" + fp + " entCount=" + entCount + " prefixLen=" + prefix + " isLastInFloor=" + isLastInFloor + " leaf?=" + isLeafBlock);
    } else {
    System.out.println(" loadBlock (seek) fp=" + fp + " entCount=" + entCount + " prefixLen=" + prefix + " hasTerms?=" + hasTerms + " isFloor?=" + isFloor + " isLastInFloor=" + isLastInFloor + " leaf?=" + isLeafBlock);
    }
    }*/

    // stats
    int numBytes = ste.in.readVInt();
    if (statBytes.length < numBytes) {
    statBytes = new byte[ArrayUtil.oversize(numBytes, 1)];
    }
    ste.in.readBytes(statBytes, 0, numBytes);
    statsReader.reset(statBytes, 0, numBytes);
    statsSingletonRunLength = 0;
    metaDataUpto = 0;

    state.termBlockOrd = 0;
    nextEnt = 0;
    lastSubFP = -1;

    // TODO: we could skip this if !hasTerms; but
    // that's rare so won't help much
    // metadata
    numBytes = ste.in.readVInt();
    if (bytes.length < numBytes) {
    bytes = new byte[ArrayUtil.oversize(numBytes, 1)];
    }
    ste.in.readBytes(bytes, 0, numBytes);
    bytesReader.reset(bytes, 0, numBytes);

    // Sub-blocks of a single floor block are always
    // written one after another -- tail recurse:
    fpEnd = ste.in.getFilePointer();
    // if (DEBUG) {
    // System.out.println(" fpEnd=" + fpEnd);
    // }
    }
    + + +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    我们知道Lucene将索引文件拆分为了多个文件,这里我们仅讨论倒排索引部分。

    Lucene把用于存储Term的索引文件叫Terms Index,它的后缀是.tip;把Postings信息分别存储在.doc、.pay、.pox,分别记录Postings的DocId信息和Term的词频、Payload信息、pox是记录位置信息。Terms Dictionary的文件后缀称为.tim,它是Term与Postings的关系纽带,存储了Term和其对应的Postings文件指针。

    总体来说,通过Terms Index(.tip)能够快速地在Terms Dictionary(.tim)中找到你的想要的Term,以及它对应的Postings文件指针与Term在Segment作用域上的统计信息。


    postings: 实际上Postings包含的东西并不仅仅是DocIDs(我们通常把这一个有序文档编号系列叫DocIDs),它还包括文档编号、以及词频、Term在文档中的位置信息、还有Payload数据。

    所以关于倒排索引至少涉及5类文件,本文不会全面展开。
    + + + + +

    相关阅读

    + + + +
    + + + + +
    +
    +
    +
    + + + + + + + + + + + + + + + + + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    Hashed and Hierarchical Timing Wheels:Data Structures for the Efficient Implementation of a Timer Facility

    +

    Conventional algorithms to implement an Operating
    System timer module take O(n) time to start or main-
    rain a timer, where n is the number of outstanding
    timers: this is expensive for large n. This paper be-
    gins by exploring the relationship between timer algo-
    rithms, t i m e flow mechanisms used in discrete event
    simulations, and sorting techniques. Next a timer
    a l g o r i t h m for small timer intervals is presented t h a t
    is similar to the timing wheel technique used in logic
    sinmlators. By using a circular buffer or timing wheel,
    it takes O(1) time to start, stop, and m a i n t a i n timers
    within the range of the wheel.
    T w o extensions for larger values of the interval are de-
    scribed. In the first, the timer interval is hashed into
    a slot on the timing wheel. In the second, a hierarchy
    of timing wheels with different granularities is used to
    span a greater range of intervals. T h e performance of
    these two schemes and various implementation trade-
    offs are discussed.
    传统的操作系统定时器模块的算法复杂度是O(n) ,其中n是定时器的数量:当n很大的时候代价会非常昂贵 。
    这篇文章开始探讨定时器算法和时间流机制在离散的事件模拟和排序技术方面的关系.下面的小间隔的定时器算法很类似使用逻辑模拟的时间轮.
    通过使用环状缓冲或者时间轮,我们可以在定时器的可维持运行的精度内使用O(1)的时间去开启,结束以及维持定时器
    有两个额外的对于大的时间间隔的拓展.第一,定时器的间隔被哈希进去一个时间轮.第二,一个多层级的时间轮保证大于时间间隔的也能有位置存放.
    下面会讨论这两张情况和不同实现的平衡.

    +

    Our model of a timer module has four component
    routines:
    START_TIMER(Interval, Request_ID, Expiry_
    Action): The client calls this routine to start a timer
    that will expire after “Interval” units of time. The
    client supplies a Request_ID which is used to distinguish this timer from other timers that the client has
    outstanding. Finally, the client can specify what action must be taken on expiry: for instance, calling a
    client-specified routine, or setting an event flag.
    STOP_TIMER(Request_ID): This routine uses its
    knowledge of the client and Request_ID to locate the
    timer and stop it.
    PER_TICK_BOOKKEEPING: Let the granularity of
    the timer-be T units. Then every T units this routine
    checks whether any outstanding timers have expired;
    if so, it calls STOP_TIMER, which in turn calls the
    next routine.
    EXPIRY_PROCESSING: This routine does the Expiry_Action specified in the START_TIMER call.
    The first two routines are activated on client calls
    while the last two are invoked on timer ticks. The
    timer is often an external hardware clock.
    The following two performance measures can be used
    to choose between the various algorithms described
    in the rest of this paper. Both of them are parameterized by n, the average (or worst-case) number of
    outstanding timers.
    我们的定时器模块有四个组件模块例程:
    START_TIMER(Interval, Request_ID, Expiry_Action): 客户端会调用这个例程去启动(注册)一个会在Interval 时间后会过期的定时器.

    +

    相关阅读

    + + +
    + + + + +
    +
    +
    +
    + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    拉代码

    +
    1
    2
    3
    4
    5
    6
    7
    8
    git clone https://github.com/redis/redis.git
    cd redis/
    ## 带上调试信息
    make CFLAGS="-g -O0"
    ## 创建一个目录
    mkdir rediscluster
    mkdir 7000 7001 7002 7003 7004 7005

    +

    创建几个节点胡配置

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    tree
    .
    ├── 7000
    ├── 7001
    ├── 7002
    ├── 7003
    ├── 7004
    └── 7005

    + +

    然后像这样启动6个:

    +
    1
    src/redis-server rediscluster/7001/redis.conf
    + + +

    主动下线是一个命令

    +
    1
    CLUSTER FAILOVER
    + + +

    相关阅读

    + + +
    + + + + +
    +
    +
    +
    + + + + + + + + + + +
    + + + + +
    + + + + + + + + +
    +
    + +
    + +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/page/15/index.html b/page/15/index.html new file mode 100644 index 0000000000..58bdd863be --- /dev/null +++ b/page/15/index.html @@ -0,0 +1,1133 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + dinosaur + + + + + + + + + + + + +
    +
    + +
    +
    + + + + + +
    + + + + + + + + +
    + +
    + +
    +
    + + +
    + + 0% +
    + + +
    +
    +
    + + +
    + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    1 类型
    string
    list
    set
    zset
    hash

    +

    2 对应的命令

    +

    set 命令 edis/src/t_string.c
    string 的过期不是实时的

    +

    list lpush lpop / 有ziplist 和双向链表组成

    +

    hash 有两个hashtable , 一个是内容一个谁拿来扩容

    +

    set 谁由intset 和 hashtable组成

    +

    zset ziplist+ skiplist

    +

    3 aof/rdb

    +

    aof 谁append only file 都是可读的文本 rdb谁整个盘快照

    +

    4 redis 淘汰

    +
    noeviction:如果缓存数据超过了maxmemory限定值,并且客户端正在执行的命令(大部分的写入指令,但DEL和几个指令例外)会导致内存分配,则向客户端返回错误响应
    +allkeys-lru: 对所有的键都采取LRU淘汰
    +volatile-lru: 仅对设置了过期时间的键采取LRU淘汰
    +allkeys-random: 随机回收所有的键
    +volatile-random: 随机回收设置过期时间的键
    +volatile-ttl: 仅淘汰设置了过期时间的键---淘汰生存时间TTL(Time To Live)更小的键
    +
    +

    5 HA

    Redis Cluster master-replica model

    +

    In order to remain available when a subset of master nodes are failing or are not able to communicate with the majority of nodes, Redis Cluster uses a master-replica model where every hash slot has from 1 (the master itself) to N replicas (N-1 additional replica nodes).

    +

    In our example cluster with nodes A, B, C, if node B fails the cluster is not able to continue, since we no longer have a way to serve hash slots in the range 5501-11000.

    +

    However when the cluster is created (or at a later time) we add a replica node to every master, so that the final cluster is composed of A, B, C that are master nodes, and A1, B1, C1 that are replica nodes. This way, the system is able to continue if node B fails.

    +

    Node B1 replicates B, and B fails, the cluster will promote node B1 as the new master and will continue to operate correctly.

    +

    However, note that if nodes B and B1 fail at the same time, Redis Cluster is not able to continue to operate.

    +

    failover 错误转移

    当master 失去连接后,slave会向master 发起一个paxos 选票

    +

    分片

    分片会路由到不同胡slot,运算方式要crc16(key)% 16384

    +

    每个分片会有特定slot

    +

    一致性

    不是强一致性,

    +

    Redis Cluster is not able to guarantee strong consistency. In practical terms this means that under certain conditions it is possible that Redis Cluster will lose writes that were acknowledged by the system to the client.

    +

    The first reason why Redis Cluster can lose writes is because it uses asynchronous replication. This means that during writes the following happens:

    +
    Your client writes to the master B.
    +The master B replies OK to your client.
    +The master B propagates the write to its replicas B1, B2 and B3.
    +
    +

    因为是异步的

    +

    缓存穿透 雪崩

    穿透是指通过redis
    雪崩就是指同时失效

    +

    限流

    https://segmentfault.com/a/1190000040570911

    + + +
    + + + + +
    +
    +
    +
    + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    相关阅读

    + + + +
    + + + + +
    +
    +
    +
    + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    起因

    有个1700w的表需要初始化 , 然后我们需要分批取id范围是[1 , 1000) , [1000 , 2000)的值

    +

    问题

    很简单的sql

    +
    1
    2
    update  test set    a.value=1 where id in ( 1 , 2 , 7 , 9.... 1000);
    update test set a.value=1 where id in ( 1001 , 1002 , 1005 , ... 2000);
    +

    这里的id大概有100个左右 ,id是单调递增,基本连续

    +

    测试环境很正常,非常快 , 通过这个sql , 我们可以一秒update 1w以上的行

    +

    但是生产环境这个update特别特别慢,update 1000 行 大概需要 50s以上

    +

    排查

      +
    • 定位
      经过很多尝试,
      定位到是update这个sql特别慢,而且是但是测试环境非常快,生产环境非常慢
    • +
    +

    尝试explain

    +
    1
    2
    explain 
    update test as a set a.value=1 where id in ( 1 , 2 , 7 , 9....);
    +

    生产环境下是这样:

    +
    1
    Using where; Using temporary
    +

    但是测试环境是:

    +
    1
    Using where
    +

    开始搜索,找到了类似的原因:
    https://bugs.mysql.com/bug.php?id=80424
    对比了一下版本:
    生产环境:5.7.9-log
    测试环境:5.7.22-log

    +

    确定binlog的记录形式:

    +
    1
    2
    SELECT @@binlog_row_image

    +

    结果是

    +
    1
    FULL
    + +

    这个bug被5.7.15以上修复,所以测试环境没有问题,生产环境有问题

    +

    解决

    因为生产版本的mysql几乎没有升级的可能,这个批量的刷数据如果10条/s估计要刷一个星期,所以我们尝试了很多写法避免这个优化,最后使用了这个写法避免 生产版本的mysql的bug
    不使用in 而是使用join 防止这个优化器的bug

    +
    1
    2
    3
    4
    5
    DESC
    UPDATE `test` a JOIN (
    SELECT id FROM test t WHERE `id` IN (516,517,518,519,520,521,522,523,524,525,526,527,528,529,530,531,533,532)
    ) t ON a.id = t.id
    SET a.isvisible = -1;
    + +

    优化后不用temp了

    +
    1
    2
    3
    "id"	"select_type"	"table"	"partitions"	"type"	   "possible_keys"	"key"	"key_len"	"ref"	"rows"	"filtered"	"Extra"
    "1" "UPDATE" "a" \N "range" "PRIMARY" "PRIMARY" "4" \N "104" "100.00" "Using where"
    "1" "SIMPLE" "b" \N "eq_ref" "PRIMARY" "PRIMARY" "4" "a.id" "1" "100.00" "Using index"
    +

    事后扒代码

    通过https://bugs.mysql.com/bug.php?id=80424 提供的patch大概定位到原因

    +

    为什么会使用temp表?

    +

    第六个参数是判断是否需要使用temp的 ,也就是 !using_filesort && (used_key_is_modified || order)

    +
    1
    2
    3
    4
    5

    Modification_plan plan(thd, MT_UPDATE, &qep_tab,
    used_index, limit,
    (!using_filesort && (used_key_is_modified || order)),
    using_filesort, used_key_is_modified, rows);
    +

    查看Modification_plan这个类的定义:

    +
    1
    2
    3
    4
    5
    Modification_plan(THD *thd_arg,
    enum_mod_type mt, QEP_TAB *qep_tab,
    uint key_arg, ha_rows limit_arg, bool need_tmp_table_arg,
    bool need_sort_arg, bool used_key_is_modified_arg,
    ha_rows rows);
    + +

    在这个问题中是 used_key_is_modified = true, 所以会产生temp表

    +

    相关阅读:

    + + + +
    + + + + +
    +
    +
    +
    + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    tcp_nodealy

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    38
    39
    40
    41
    42
    43
    44
    45
    46
    47
    48
    49
    50
    51
    52
    53
    54
    55
    56
    57
    58
    59
    60
    61
    62
    63
    64
    65
    66
    67
    68
    69
    70
    71
    72
    73
    74
    75
    76
    77
    78
    79
    80
    81
    82
    83
    84
    85
    86
    87
    88
    89
    90
    91
    92
    93
    94
    95
    96
    97
    98
    99
    100
    101
    102
    103
    104
    105
    106
    107
    108
    109
    110
    111
    112
    113
    114
    115
    116
    117
    118
    119
    120
    121
    122
    123
    124
    125
    126
    127
    128
    129
    130
    131
    132
    133
    134
    135
    136
    137
    138
    139
    140
    141
    142
    143
    144
    145
    146
    147
    148
    149
    150
     The solution to the small-packet problem
    解决小包问题的方法

    Clearly an adaptive approach is desirable. One would expect a
    proposal for an adaptive inter-packet time limit based on the
    round-trip delay observed by TCP. While such a mechanism could
    certainly be implemented, it is unnecessary. A simple and
    elegant solution has been discovered.

    The solution is to inhibit the sending of new TCP segments when
    new outgoing data arrives from the user if any previously
    transmitted data on the connection remains unacknowledged. This
    inhibition is to be unconditional; no timers, tests for size of
    data received, or other conditions are required. Implementation
    typically requires one or two lines inside a TCP program.
    解决的方式是如果之前发送的数据没有被ack,阻止发送新的tcp段.这个抑制条件是不需要前置的条件的:不需要定时器,不需要探测包是否被接收,以及其他条件.实现上只需要添加一两行代码在tcp程序里面


    At first glance, this solution seems to imply drastic changes in
    the behavior of TCP. This is not so. It all works out right in
    the end. Let us see why this is so.
    乍看起来,这会很大地改变tcp的行为.但是实际上并不是这样,这从头到尾都没有太大变化.让我们看看为什么是这样.

    When a user process writes to a TCP connection, TCP receives some
    data. It may hold that data for future sending or may send a
    packet immediately. If it refrains from sending now, it will
    typically send the data later when an incoming packet arrives and
    changes the state of the system. The state changes in one of two
    ways; the incoming packet acknowledges old data the distant host
    has received, or announces the availability of buffer space in
    the distant host for new data. (This last is referred to as
    "updating the window"). Each time data arrives on a connec-
    tion, TCP must reexamine its current state and perhaps send some
    packets out. Thus, when we omit sending data on arrival from the
    user, we are simply deferring its transmission until the next
    message arrives from the distant host. A message must always
    arrive soon unless the connection was previously idle or communi-
    cations with the other end have been lost. In the first case,
    the idle connection, our scheme will result in a packet being
    sent whenever the user writes to the TCP connection. Thus we do
    not deadlock in the idle condition. In the second case, where
    当一个用户写消息到tcp连接,Tcp协议栈会受到这些信息.tcp协议栈会保持这些内容或者立马发送这些内容.



    RFC 896 Congestion Control in IP/TCP Internetworks 1/6/84

    the distant host has failed, sending more data is futile anyway.
    Note that we have done nothing to inhibit normal TCP retransmis-
    sion logic, so lost messages are not a problem.

    Examination of the behavior of this scheme under various condi-
    tions demonstrates that the scheme does work in all cases. The
    first case to examine is the one we wanted to solve, that of the
    character-oriented Telnet connection. Let us suppose that the
    user is sending TCP a new character every 200ms, and that the
    connection is via an Ethernet with a round-trip time including
    software processing of 50ms. Without any mechanism to prevent
    small-packet congestion, one packet will be sent for each charac-
    ter, and response will be optimal. Overhead will be 4000%, but
    this is acceptable on an Ethernet. The classic timer scheme,
    with a limit of 2 packets per second, will cause two or three
    characters to be sent per packet. Response will thus be degraded
    even though on a high-bandwidth Ethernet this is unnecessary.
    Overhead will drop to 1500%, but on an Ethernet this is a bad
    tradeoff. With our scheme, every character the user types will
    find TCP with an idle connection, and the character will be sent
    at once, just as in the no-control case. The user will see no
    visible delay. Thus, our scheme performs as well as the no-
    control scheme and provides better responsiveness than the timer
    scheme.

    The second case to examine is the same Telnet test but over a
    long-haul link with a 5-second round trip time. Without any
    mechanism to prevent small-packet congestion, 25 new packets
    would be sent in 5 seconds.* Overhead here is 4000%. With the
    classic timer scheme, and the same limit of 2 packets per second,
    there would still be 10 packets outstanding and contributing to
    congestion. Round-trip time will not be improved by sending many
    packets, of course; in general it will be worse since the packets
    will contend for line time. Overhead now drops to 1500%. With
    our scheme, however, the first character from the user would find
    an idle TCP connection and would be sent immediately. The next
    24 characters, arriving from the user at 200ms intervals, would
    be held pending a message from the distant host. When an ACK
    arrived for the first packet at the end of 5 seconds, a single
    packet with the 24 queued characters would be sent. Our scheme
    thus results in an overhead reduction to 320% with no penalty in
    response time. Response time will usually be improved with our
    scheme because packet overhead is reduced, here by a factor of
    4.7 over the classic timer scheme. Congestion will be reduced by
    this factor and round-trip delay will decrease sharply. For this
    ________
    * This problem is not seen in the pure ARPANET case because the
    IMPs will block the host when the count of packets
    outstanding becomes excessive, but in the case where a pure
    datagram local net (such as an Ethernet) or a pure datagram
    gateway (such as an ARPANET / MILNET gateway) is involved, it
    is possible to have large numbers of tiny packets
    outstanding.



    RFC 896 Congestion Control in IP/TCP Internetworks 1/6/84

    case, our scheme has a striking advantage over either of the
    other approaches.

    We use our scheme for all TCP connections, not just Telnet con-
    nections. Let us see what happens for a file transfer data con-
    nection using our technique. The two extreme cases will again be
    considered.

    As before, we first consider the Ethernet case. The user is now
    writing data to TCP in 512 byte blocks as fast as TCP will accept
    them. The user's first write to TCP will start things going; our
    first datagram will be 512+40 bytes or 552 bytes long. The
    user's second write to TCP will not cause a send but will cause
    the block to be buffered. Assume that the user fills up TCP's
    outgoing buffer area before the first ACK comes back. Then when
    the ACK comes in, all queued data up to the window size will be
    sent. From then on, the window will be kept full, as each ACK
    initiates a sending cycle and queued data is sent out. Thus,
    after a one round-trip time initial period when only one block is
    sent, our scheme settles down into a maximum-throughput condi-
    tion. The delay in startup is only 50ms on the Ethernet, so the
    startup transient is insignificant. All three schemes provide
    equivalent performance for this case.

    Finally, let us look at a file transfer over the 5-second round
    trip time connection. Again, only one packet will be sent until
    the first ACK comes back; the window will then be filled and kept
    full. Since the round-trip time is 5 seconds, only 512 bytes of
    data are transmitted in the first 5 seconds. Assuming a 2K win-
    dow, once the first ACK comes in, 2K of data will be sent and a
    steady rate of 2K per 5 seconds will be maintained thereafter.
    Only for this case is our scheme inferior to the timer scheme,
    and the difference is only in the startup transient; steady-state
    throughput is identical. The naive scheme and the timer scheme
    would both take 250 seconds to transmit a 100K byte file under
    the above conditions and our scheme would take 254 seconds, a
    difference of 1.6%.

    Thus, for all cases examined, our scheme provides at least 98% of
    the performance of both other schemes, and provides a dramatic
    improvement in Telnet performance over paths with long round trip
    times. We use our scheme in the Ford Aerospace Software
    Engineering Network, and are able to run screen editors over Eth-
    ernet and talk to distant TOPS-20 hosts with improved performance
    in both cases.
    + + +

    相关阅读

    + + +
    + + + + +
    +
    +
    +
    + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    跳表 skiplist

    Binary trees can be used for representing abstract data
    types such as dictionaries and ordered lists. They work
    well when the elements are inserted in a random order.
    二叉树可以使用来表达一个词典或者一个有序列表.当二叉树的元素插入是随机顺序的时候,他可以工作很好

    +

    Some sequences of operations, such as inserting the
    elements in order, produce degenerate data structures
    that perform very poo.rly.
    但是一些特定的顺序,举个例子,如果插入的数据本身已经是有序的,那么效果则会很差
    If it were possible to randomly permute the list of items to be inserted, trees
    would work well with high probability for any input sequence.
    如果可以产生随机的数据去插入,那么树会对输入的性能变得非常好
    In most cases queries must be answered online, so randomly permuting the input is impractical.
    Balanced tree algorithms rearrange the tree as operations are performed to maintain certain balance conditions and assure good performance.
    在大部分的情况必须实时查询,所以随机交换输入是不可能的,平衡树需要重排来保证某些平衡的条件和确保比较好的性能
    Skip lists are a probabilistic alternative to balanced trees.
    跳表相对于平衡树来说是另外一个选择
    Skip lists are balanced by consulting a random number generator.
    跳表的平衡是通过获取一个随机数
    Although skip lists have bad worstcase performance, no input sequence consistently produces the worst-case performance (much like quicksort when the pivot element is chosen randomly).
    虽然跳表也有bad case(举个例子每次随机的高度都一样) , 但是没有一个输入序列可以把这个bad case稳定的生成(因为是随机产生所以不会一直产生最坏情况)
    It is very unlikely a skip list data structure will be significantly unbalanced (e.g., for a dictionary of more than 250 elements, the chance that a search will take more than three-times the expeci.ed time is less than one in a million). Skip lists have balance properties similar to that of search trees built by random insertions, yet do not require insertions to be random.
    这不太可能跳表变得非常显著地不平衡,跳表的平衡性质和一个随机插入的搜索树相类似,但是不需要随机去插入
    It is easier to balance a data structure probabilistitally than to explicitly maintain the balance.
    通过概率去平衡一个暑假结构比显式保持它的平衡要简单
    For many applications, skip lists are a more natural representation than trees, and they lead to simpler algorithms.
    对于很多应用,相对于树来说跳表会更加自然.
    The simplicity of skip list algorithms makes them easier to implement and provides significant constant factor speed improvements over balanced tree and self-adjusting tree algorithms.
    跳表的简易型可以实现和提供一个显著的常量因子去比平衡树和自适应树更好
    Skip lists are also very space efficient.
    跳表的空间利用率很高
    They can easily be configured to require an average of 1% pointers per element (or even less) and do not require balance or priority information to be stored with each node.
    他们可以需要平均百分之1的指针和不需要平衡或者权重信息取存储每个节点

    +

    SKIP LISTS
    跳表
    We might need to examine every node of the list when searching a linked list (Figure la).
    当我们使用链表这个结构来存储数据,如果我们要搜索某个节点,我们需要顺序遍历每个节点.
    If the list is stored in sorted order and every other node of the list also has a pointer to the node two ahead of it in the list (Figure lb), we have to examine no more than [n/21 +1 nodes (where n is the length of the list).
    如果这个list是
    Also giving
    every fourth node a pointer four ahead (Figure lc) requires that no more than rn/41 + 2 nodes be examined.
    If every (27th node has a pointer 2’ nodes ahead (Figure Id), the number of nodes that must be examined
    can be reduced to rlog,nl while only doubling the number of pointers. This data structure could be used for
    fast searching, but insertion and deletion would be impractical.
    A node that has k forward pointers is called a level k
    node. If every (2’)th node has a pointer 2’ nodes ahead,
    then levels of nodes are distributed in a simple pattern:
    50 percent are level 1, 25 percent are level 2, 12.5
    percent are level 3 and so on. What would happen if
    the levels of nodes were chosen randomly, but in the
    same proportions (e.g., as in Figure le)? A node’s ith
    forward pointer, instead of pointing 2’-’ nodes ahead,
    points to the next node of level i or higher. Insertions or
    deletions would require only local modifications; the
    level of a node, chosen randomly when the node is
    inserted, need never change. Some arrangements of
    levels would give poor execution times, but we will see
    that such arrangements are rare. Because these data
    structures are linked lists with extra pointers that s

    +

    相关阅读

    + + +
    + + + + +
    +
    +
    +
    + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    mongo 的wiredtiger 是怎么组织kv结构的呢?
    我现在还是没有弄懂

    +

    堆栈:

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    38
    39
    40
    41
    42
    43
    44
    45
    46
    47
    48
    49
    50
    51
    52
    53
    54
    55
    56
    57
    58
    59
    60
    61
    62
    63
    64
    65
    66
    67
    68
    69
    70
    71
    72
    73
    74
    75
    76
    77
    78
    79
    80
    81
    82
    83
    84
    85
    86
    87
    88
    89
    90
    91
    92
    93
    94
    95
    96
    97
    98
    99
    100
    101
    102
    103
    104
    105
    106
    107
    108
    109
    110
    111
    112
    113
    114
    115
    116
    117
    118
    119
    120
    121
    122
    123
    124
    125
    126
    127
    128
    129
    130
    131
    132
    133
    134
    135
    136
    137
    138
    139
    140
    141
    142
    143
    144
    145
    146
    147
    148
    149
    150
    151
    152
    153
    154
    155
    156
    157
    158
    159
    160
    161
    162
    163
    164
    165
    166
    167
    168
    169
    170
    171
    172
    173
    174
    175
    176
    177
    178
    179
    180
    Thread 41 "conn1" hit Breakpoint 5, __wt_btcur_insert (cbt=cbt@entry=0x555560bd2000) at src/third_party/wiredtiger/src/btree/bt_cursor.c:763
    763 {
    (gdb) bt
    #0 __wt_btcur_insert (cbt=cbt@entry=0x555560bd2000) at src/third_party/wiredtiger/src/btree/bt_cursor.c:763
    #1 0x0000555556d6149a in __curfile_insert (cursor=0x555560bd2000) at src/third_party/wiredtiger/src/cursor/cur_file.c:266
    #2 0x0000555556cd3ef4 in mongo::wiredTigerCursorInsert (opCtx=opCtx@entry=0x555560af8180, cursor=cursor@entry=0x555560bd2000)
    at src/mongo/db/storage/wiredtiger/wiredtiger_cursor_helpers.cpp:39
    #3 0x0000555556d131bd in mongo::WiredTigerRecordStore::_insertRecords (this=0x55555bfddc00, opCtx=0x555560af8180, records=<optimized out>, timestamps=0x55555c01a478, nRecords=1)
    at src/mongo/db/storage/wiredtiger/wiredtiger_record_store.cpp:1319
    #4 0x0000555556d13ae7 in mongo::WiredTigerRecordStore::insertRecords (this=<optimized out>, opCtx=<optimized out>, records=<optimized out>, timestamps=...)
    at /usr/include/c++/9/bits/stl_vector.h:915
    #5 0x0000555557bf9972 in mongo::CollectionImpl::_insertDocuments (this=0x55555bfe1410, opCtx=0x555560af8180, begin=..., end=..., opDebug=0x555560b40a50, fromMigrate=false)
    at /usr/include/c++/9/bits/unique_ptr.h:360
    #6 0x0000555557bfa333 in mongo::CollectionImpl::insertDocuments (this=this@entry=0x55555bfe1410, opCtx=opCtx@entry=0x555560af8180, begin=begin@entry=
    {stmtIds = std::vector of length 1, capacity 1 = {-1}, oplogSlot = {static kTermFieldName = {_data = 0x555559e64ab2 "t", _size = 1}, static kTimestampFieldName = {_data = 0x555559e5c074 "ts", _size = 2}, static kUninitializedTerm = -1, static kInitialTerm = 0, _timestamp = {static kAllowUnstableCheckpointsSentinel = {static kAllowUnstableCheckpointsSentinel = <same as static member of an already seen type>, i = 1, secs = 0}, i = 0, secs = 0}, _term = -1}, doc = {static kMinBSONLength = 5 '\005', static kEmptyObjectPrototype = "\005\000\000\000", _objdata = 0x55556036f92b "-", _ownedBuffer = {_buffer = {_holder = {px = 0x55556036f900}, static kHolderSize = 8}}}}, end=end@entry=
    {stmtIds = std::vector of length -28, capacity 23456275625363 = {<error reading variable>, opDebug=0x555560b40a50, fromMigrate=false)
    at src/mongo/db/catalog/collection_impl.cpp:663
    #7 0x0000555557a0a1ae in mongo::write_ops_exec::(anonymous namespace)::insertDocuments (opCtx=0x555560af8180, collection=..., begin=..., end=
    {stmtIds = std::vector of length -28, capacity 23456275625363 = {<error reading variable>, fromMigrate=<optimized out>) at /usr/include/c++/9/bits/stl_iterator.h:871
    #8 0x0000555557a0a631 in mongo::write_ops_exec::(anonymous namespace)::<lambda()>::operator()(void) const (__closure=0x7fffe5962a30) at src/mongo/db/catalog_raii.h:151
    #9 0x0000555557a0ac1b in mongo::writeConflictRetry<mongo::write_ops_exec::(anonymous namespace)::insertBatchAndHandleErrors(mongo::OperationContext*, const mongo::write_ops::InsertCommandRequest&, std::vector<mongo::InsertStatement>&, mongo::write_ops_exec::(anonymous namespace)::LastOpFixer*, mongo::write_ops_exec::WriteResult*, mongo::OperationSource)::<lambda()> > (f=..., ns=..., opStr=..., opCtx=0x555560af8180) at /usr/include/c++/9/bits/stl_iterator.h:806
    #10 mongo::write_ops_exec::(anonymous namespace)::insertBatchAndHandleErrors (source=<optimized out>, out=<optimized out>, lastOpFixer=<optimized out>,
    batch=std::vector of length 1, capacity 1 = {...}, wholeOp=..., opCtx=<optimized out>) at src/mongo/db/ops/write_ops_exec.cpp:502
    #11 mongo::write_ops_exec::performInserts (opCtx=<optimized out>, opCtx@entry=0x555560af8180, wholeOp=..., source=@0x7fffe5962e00: mongo::kStandard)
    at src/mongo/db/ops/write_ops_exec.cpp:655
    #12 0x000055555791d28e in mongo::(anonymous namespace)::CmdInsert::Invocation::typedRun (this=0x555560ba0000, opCtx=0x555560af8180) at src/mongo/db/commands.h:1173
    #13 0x000055555791e8a0 in mongo::TypedCommand<mongo::(anonymous namespace)::CmdInsert>::InvocationBase::_callTypedRun (opCtx=<optimized out>, this=<optimized out>)
    at src/mongo/db/commands.h:1255
    #14 mongo::TypedCommand<mongo::(anonymous namespace)::CmdInsert>::InvocationBase::_runImpl (reply=0x555561392000, opCtx=<optimized out>, this=<optimized out>)
    at src/mongo/db/commands.h:1256
    #15 mongo::TypedCommand<mongo::(anonymous namespace)::CmdInsert>::InvocationBase::run (this=<optimized out>, opCtx=<optimized out>, reply=0x555561392000)
    at src/mongo/db/commands.h:1261
    #16 0x0000555558791662 in mongo::CommandHelpers::runCommandInvocation (opCtx=0x555560af8180, request=..., invocation=0x555560ba0000, response=0x555561392000)
    at src/mongo/db/commands.cpp:200
    #17 0x0000555558797d73 in mongo::CommandHelpers::<lambda()>::operator() (__closure=0x7fffe5963180) at src/mongo/db/commands.cpp:184
    #18 mongo::makeReadyFutureWith<mongo::CommandHelpers::runCommandInvocation(std::shared_ptr<mongo::RequestExecutionContext>, std::shared_ptr<mongo::CommandInvocation>, mongo::transport::ServiceExecutor::ThreadingModel)::<lambda()> > (func=...) at src/mongo/util/future.h:1208
    #19 mongo::CommandHelpers::runCommandInvocation (rec=std::shared_ptr<class mongo::RequestExecutionContext> (use count 11, weak count 0) = {...},
    invocation=std::shared_ptr<class mongo::CommandInvocation> (use count 3, weak count 0) = {...}, threadingModel=<optimized out>) at src/mongo/db/commands.cpp:185
    #20 0x0000555556c48367 in mongo::(anonymous namespace)::runCommandInvocation (rec=std::shared_ptr<class mongo::RequestExecutionContext> (empty) = {...},
    invocation=std::shared_ptr<class mongo::CommandInvocation> (empty) = {...}) at /usr/include/c++/9/bits/shared_ptr_base.h:756
    --Type <RET> for more, q to quit, c to continue without paging--
    #21 0x0000555556c5c389 in mongo::(anonymous namespace)::InvokeCommand::<lambda()>::operator() (__closure=<optimized out>) at /usr/include/c++/9/bits/shared_ptr_base.h:756
    #22 mongo::makeReadyFutureWith<mongo::(anonymous namespace)::InvokeCommand::run()::<lambda()> > (func=...) at src/mongo/util/future.h:1211
    #23 mongo::(anonymous namespace)::InvokeCommand::run (this=0x55555c01a4a0) at src/mongo/db/service_entry_point_common.cpp:842
    #24 mongo::(anonymous namespace)::RunCommandImpl::<lambda(auto:78*)>::operator()<mongo::(anonymous namespace)::InvokeCommand> (__closure=<optimized out>, path=0x55555c01a4a0)
    at src/mongo/db/service_entry_point_common.cpp:1188
    #25 mongo::future_util::AsyncState<mongo::(anonymous namespace)::InvokeCommand>::<lambda()>::operator() (this=<optimized out>, this=<optimized out>)
    at src/mongo/util/future_util.h:742
    #26 mongo::makeReadyFutureWith<mongo::future_util::AsyncState<State>::thenWithState(Launcher&&) && [with Launcher = mongo::(anonymous namespace)::RunCommandImpl::_runCommand()::<lambda(auto:78*)>; State = mongo::(anonymous namespace)::InvokeCommand]::<lambda()> > (func=...) at src/mongo/util/future.h:1211
    #27 mongo::future_util::AsyncState<mongo::(anonymous namespace)::InvokeCommand>::thenWithState<mongo::(anonymous namespace)::RunCommandImpl::_runCommand()::<lambda(auto:78*)> > (
    launcher=..., this=<optimized out>) at src/mongo/util/future_util.h:747
    #28 mongo::(anonymous namespace)::RunCommandImpl::_runCommand (this=<optimized out>) at src/mongo/db/service_entry_point_common.cpp:1188
    #29 0x0000555556c5cc32 in mongo::(anonymous namespace)::RunCommandAndWaitForWriteConcern::_runCommandWithFailPoint (this=0x555560ba00e0)
    at src/mongo/db/service_entry_point_common.cpp:1299
    #30 0x0000555556c5d1c3 in mongo::(anonymous namespace)::RunCommandAndWaitForWriteConcern::_runImpl (this=0x555560ba00e0) at src/mongo/db/service_entry_point_common.cpp:1219
    #31 0x0000555556c4b9ad in mongo::(anonymous namespace)::RunCommandImpl::<lambda()>::operator() (__closure=<optimized out>) at src/mongo/db/service_entry_point_common.cpp:730
    #32 mongo::makeReadyFutureWith<mongo::(anonymous namespace)::RunCommandImpl::run()::<lambda()> > (func=...) at src/mongo/util/future.h:1211
    #33 mongo::(anonymous namespace)::RunCommandImpl::run (this=0x555560ba00e0) at src/mongo/db/service_entry_point_common.cpp:728
    #34 0x0000555556c4f5b2 in mongo::(anonymous namespace)::ExecCommandDatabase::<lambda()>::<lambda(auto:79*)>::operator()<mongo::(anonymous namespace)::RunCommandAndWaitForWriteConcern> (__closure=<optimized out>, runner=0x555560ba00e0) at src/mongo/db/service_entry_point_common.cpp:1651
    #35 mongo::future_util::AsyncState<mongo::(anonymous namespace)::RunCommandAndWaitForWriteConcern>::<lambda()>::operator() (this=<optimized out>, this=<optimized out>)
    at src/mongo/util/future_util.h:742
    #36 mongo::makeReadyFutureWith<mongo::future_util::AsyncState<State>::thenWithState(Launcher&&) && [with Launcher = mongo::(anonymous namespace)::ExecCommandDatabase::_commandExec()::<lambda()>::<lambda(auto:79*)>; State = mongo::(anonymous namespace)::RunCommandAndWaitForWriteConcern]::<lambda()> > (func=...) at src/mongo/util/future.h:1211
    #37 mongo::future_util::AsyncState<mongo::(anonymous namespace)::RunCommandAndWaitForWriteConcern>::thenWithState<mongo::(anonymous namespace)::ExecCommandDatabase::_commandExec()::<lambda()>::<lambda(auto:79*)> > (launcher=..., this=<optimized out>) at src/mongo/util/future_util.h:747
    #38 mongo::(anonymous namespace)::ExecCommandDatabase::<lambda()>::operator() (__closure=<synthetic pointer>) at src/mongo/db/service_entry_point_common.cpp:1651
    #39 mongo::(anonymous namespace)::ExecCommandDatabase::_commandExec (this=0x555560b57800) at src/mongo/db/service_entry_point_common.cpp:1658
    #40 0x0000555556c58516 in mongo::(anonymous namespace)::ExecCommandDatabase::<lambda()>::operator() (__closure=<optimized out>) at src/mongo/db/service_entry_point_common.cpp:625
    #41 mongo::makeReadyFutureWith<mongo::(anonymous namespace)::ExecCommandDatabase::run()::<lambda()> > (func=...) at src/mongo/util/future.h:1211
    #42 mongo::(anonymous namespace)::ExecCommandDatabase::run (this=0x555560b57800) at src/mongo/db/service_entry_point_common.cpp:623
    #43 mongo::(anonymous namespace)::<lambda()>::<lambda(auto:81*)>::operator()<mongo::(anonymous namespace)::ExecCommandDatabase> (__closure=<optimized out>, runner=0x555560b57800)
    at src/mongo/db/service_entry_point_common.cpp:1880
    #44 mongo::future_util::AsyncState<mongo::(anonymous namespace)::ExecCommandDatabase>::<lambda()>::operator()(void) const (this=<optimized out>, this=<optimized out>)
    at src/mongo/util/future_util.h:742
    #45 0x0000555556c58be2 in mongo::makeReadyFutureWith<mongo::future_util::AsyncState<State>::thenWithState(Launcher&&) && [with Launcher = mongo::(anonymous namespace)::executeCommand(std::shared_ptr<mongo::(anonymous namespace)::HandleRequest::ExecutionContext>)::<lambda()> mutable::<lambda(auto:81*)>; State = mongo::(anonymous namespace)::ExecCommandDatabase]::<lambda()> > (func=...) at src/mongo/util/future.h:1206
    #46 mongo::future_util::AsyncState<mongo::(anonymous namespace)::ExecCommandDatabase>::thenWithState<mongo::(anonymous namespace)::executeCommand(std::shared_ptr<mongo::(anonymous namespace)::HandleRequest::ExecutionContext>)::<lambda()> mutable::<lambda(auto:81*)> > (launcher=..., this=0x7fffe59639c0) at src/mongo/util/future_util.h:747
    #47 mongo::(anonymous namespace)::<lambda()>::operator() (__closure=<optimized out>) at src/mongo/db/service_entry_point_common.cpp:1880
    #48 mongo::future_details::call<mongo::(anonymous namespace)::executeCommand(std::shared_ptr<mongo::(anonymous namespace)::HandleRequest::ExecutionContext>)::<lambda()>&> (func=...)
    at src/mongo/util/future_impl.h:255
    --Type <RET> for more, q to quit, c to continue without paging--
    #49 mongo::future_details::throwingCall<mongo::(anonymous namespace)::executeCommand(std::shared_ptr<mongo::(anonymous namespace)::HandleRequest::ExecutionContext>)::<lambda()>&, mongo::future_details::FakeVoid> (func=...) at src/mongo/util/future_impl.h:308
    #50 mongo::future_details::FutureImpl<mongo::future_details::FakeVoid>::<lambda()>::<lambda(mongo::future_details::SharedState<mongo::future_details::FakeVoid>*, mongo::future_details::SharedState<void>*)>::operator() (output=0x555560b43200, input=<optimized out>, this=<optimized out>) at src/mongo/util/future_impl.h:935
    #51 mongo::future_details::FutureImpl<mongo::future_details::FakeVoid>::<lambda(mongo::future_details::SharedStateBase*)>::operator() (ssb=<optimized out>, this=<optimized out>)
    at src/mongo/util/future_impl.h:1257
    #52 mongo::unique_function<void(mongo::future_details::SharedStateBase*)>::callRegularVoid<mongo::future_details::FutureImpl<T>::makeContinuation(OnReady&&) [with Result = void; OnReady = mongo::future_details::FutureImpl<T>::then(Func&&) && [with Func = mongo::(anonymous namespace)::executeCommand(std::shared_ptr<mongo::(anonymous namespace)::HandleRequest::ExecutionContext>)::<lambda()>; T = mongo::future_details::FakeVoid]::<lambda()>::<lambda(mongo::future_details::SharedState<mongo::future_details::FakeVoid>*, mongo::future_details::SharedState<void>*)>; T = mongo::future_details::FakeVoid]::<lambda(mongo::future_details::SharedStateBase*)> > (args#0=<optimized out>, f=..., isVoid=...)
    at src/mongo/util/functional.h:145
    #53 mongo::unique_function<void(mongo::future_details::SharedStateBase*)>::SpecificImpl::call(mongo::future_details::SharedStateBase *&&) (this=<optimized out>,
    args#0=<optimized out>) at src/mongo/util/functional.h:159
    #54 0x0000555556c14b27 in mongo::unique_function<void (mongo::future_details::SharedStateBase*)>::operator()(mongo::future_details::SharedStateBase*) const (args#0=<optimized out>,
    this=0x555560b43818) at src/mongo/util/invariant.h:66
    #55 mongo::future_details::SharedStateBase::transitionToFinished (this=0x555560b43800) at src/mongo/util/future_impl.h:441
    #56 0x0000555556c5fcd2 in mongo::future_details::SharedStateImpl<mongo::future_details::FakeVoid>::emplaceValue<mongo::future_details::FakeVoid> (this=<optimized out>)
    at /usr/include/c++/9/new:174
    #57 mongo::future_details::FutureImpl<mongo::future_details::FakeVoid>::propagateResultTo(mongo::future_details::SharedStateImpl<mongo::future_details::FakeVoid>*) &&::{lambda(mongo::future_details::FakeVoid&&)#1}::operator()(mongo::future_details::FakeVoid&&) const (this=<optimized out>, val=...) at src/mongo/util/future_impl.h:1146
    #58 mongo::future_details::FutureImpl<mongo::future_details::FakeVoid>::generalImpl<mongo::future_details::FutureImpl<mongo::future_details::FakeVoid>::propagateResultTo(mongo::future_details::SharedStateImpl<mongo::future_details::FakeVoid>*) &&::{lambda(mongo::future_details::FakeVoid&&)#1}, mongo::future_details::FutureImpl<mongo::future_details::FakeVoid>::propagateResultTo(mongo::future_details::SharedStateImpl<mongo::future_details::FakeVoid>*) &&::{lambda(mongo::Status&&)#2}, mongo::future_details::FutureImpl<mongo::future_details::FakeVoid>::propagateResultTo(mongo::future_details::SharedStateImpl<mongo::future_details::FakeVoid>*) &&::{lambda()#3}>(mongo::future_details::FutureImpl<mongo::future_details::FakeVoid>::propagateResultTo(mongo::future_details::SharedStateImpl<mongo::future_details::FakeVoid>*) &&::{lambda(mongo::future_details::FakeVoid&&)#1}&&, mongo::future_details::FutureImpl<mongo::future_details::FakeVoid>::propagateResultTo(mongo::future_details::SharedStateImpl<mongo::future_details::FakeVoid>*) &&::{lambda(mongo::Status&&)#2}&&, mongo::future_details::FutureImpl<mongo::future_details::FakeVoid>::propagateResultTo(mongo::future_details::SharedStateImpl<mongo::future_details::FakeVoid>*) &&::{lambda()#3}&&) (notReady=...,
    fail=..., success=..., this=<optimized out>) at src/mongo/util/future_impl.h:1191
    #59 mongo::future_details::FutureImpl<mongo::future_details::FakeVoid>::generalImpl<mongo::future_details::FutureImpl<mongo::future_details::FakeVoid>::propagateResultTo(mongo::future_details::SharedStateImpl<mongo::future_details::FakeVoid>*) &&::{lambda(mongo::future_details::FakeVoid&&)#1}, mongo::future_details::FutureImpl<mongo::future_details::FakeVoid>::propagateResultTo(mongo::future_details::SharedStateImpl<mongo::future_details::FakeVoid>*) &&::{lambda(mongo::Status&&)#2}, mongo::future_details::FutureImpl<mongo::future_details::FakeVoid>::propagateResultTo(mongo::future_details::SharedStateImpl<mongo::future_details::FakeVoid>*) &&::{lambda()#3}>(mongo::future_details::FutureImpl<mongo::future_details::FakeVoid>::propagateResultTo(mongo::future_details::SharedStateImpl<mongo::future_details::FakeVoid>*) &&::{lambda(mongo::future_details::FakeVoid&&)#1}&&, mongo::future_details::FutureImpl<mongo::future_details::FakeVoid>::propagateResultTo(mongo::future_details::SharedStateImpl<mongo::future_details::FakeVoid>*) &&::{lambda(mongo::Status&&)#2}&&, mongo::future_details::FutureImpl<mongo::future_details::FakeVoid>::propagateResultTo(mongo::future_details::SharedStateImpl<mongo::future_details::FakeVoid>*) &&::{lambda()#3}&&) (
    this=<optimized out>, success=..., fail=..., notReady=...) at src/mongo/util/future_impl.h:1182
    #60 0x0000555556c520ac in mongo::future_details::FutureImpl<mongo::future_details::FakeVoid>::propagateResultTo(mongo::future_details::SharedStateImpl<mongo::future_details::FakeVoid>*) && (output=<optimized out>, this=0x7fffe5963b40) at src/mongo/util/future_impl.h:1143
    #61 mongo::SemiFuture<void>::propagateResultTo<mongo::future_details::SharedStateImpl<mongo::future_details::FakeVoid>*&>(mongo::future_details::SharedStateImpl<mongo::future_details::FakeVoid>*&) && (arg=<synthetic pointer>: <optimized out>, this=0x7fffe5963b40) at src/mongo/util/future.h:285
    #62 mongo::future_details::FutureImpl<mongo::future_details::FakeVoid>::<lambda()>::<lambda(mongo::future_details::SharedState<mongo::future_details::FakeVoid>*, mongo::future_details::SharedState<void>*)>::operator() (output=0x555560b43800, input=<optimized out>, this=<optimized out>) at src/mongo/util/future_impl.h:935
    #63 mongo::future_details::FutureImpl<mongo::future_details::FakeVoid>::<lambda(mongo::future_details::SharedStateBase*)>::operator() (ssb=<optimized out>, this=<optimized out>)
    --Type <RET> for more, q to quit, c to continue without paging--
    at src/mongo/util/future_impl.h:1257
    #64 mongo::unique_function<void(mongo::future_details::SharedStateBase*)>::callRegularVoid<mongo::future_details::FutureImpl<T>::makeContinuation(OnReady&&) [with Result = void; OnReady = mongo::future_details::FutureImpl<T>::then(Func&&) && [with Func = mongo::(anonymous namespace)::executeCommand(std::shared_ptr<mongo::(anonymous namespace)::HandleRequest::ExecutionContext>)::<lambda()>; T = mongo::future_details::FakeVoid]::<lambda()>::<lambda(mongo::future_details::SharedState<mongo::future_details::FakeVoid>*, mongo::future_details::SharedState<void>*)>; T = mongo::future_details::FakeVoid]::<lambda(mongo::future_details::SharedStateBase*)> > (args#0=<optimized out>, f=..., isVoid=...)
    at src/mongo/util/functional.h:145
    #65 mongo::unique_function<void(mongo::future_details::SharedStateBase*)>::SpecificImpl::call(mongo::future_details::SharedStateBase *&&) (this=<optimized out>,
    args#0=<optimized out>) at src/mongo/util/functional.h:159
    #66 0x0000555556c14b27 in mongo::unique_function<void (mongo::future_details::SharedStateBase*)>::operator()(mongo::future_details::SharedStateBase*) const (args#0=<optimized out>,
    this=0x555560b42d18) at src/mongo/util/invariant.h:66
    #67 mongo::future_details::SharedStateBase::transitionToFinished (this=0x555560b42d00) at src/mongo/util/future_impl.h:441
    #68 0x0000555556c59141 in mongo::future_details::SharedStateImpl<mongo::future_details::FakeVoid>::emplaceValue<>() (this=0x555560b42d00) at /usr/include/c++/9/new:174
    #69 mongo::Promise<void>::emplaceValue<, 0>()::{lambda(boost::intrusive_ptr<mongo::future_details::SharedStateImpl<mongo::future_details::FakeVoid> >&&)#1}::operator()(boost::intrusive_ptr<mongo::future_details::SharedStateImpl<mongo::future_details::FakeVoid> >&&) const (this=<optimized out>, sharedState=<synthetic pointer>) at src/mongo/util/future.h:854
    #70 mongo::Promise<void>::setImpl<mongo::Promise<void>::emplaceValue<, 0>()::{lambda(boost::intrusive_ptr<mongo::future_details::SharedStateImpl<mongo::future_details::FakeVoid> >&&)#1}>(mongo::Promise<void>::emplaceValue<, 0>()::{lambda(boost::intrusive_ptr<mongo::future_details::SharedStateImpl<mongo::future_details::FakeVoid> >&&)#1}&&) (doSet=...,
    this=0x7fffe5963ce0) at src/mongo/util/future.h:895
    #71 mongo::Promise<void>::emplaceValue<, 0>() (this=0x7fffe5963ce0) at src/mongo/util/future.h:853
    #72 mongo::(anonymous namespace)::executeCommand (execContext=...) at src/mongo/db/service_entry_point_common.cpp:1892
    #73 0x0000555556c59cbf in mongo::(anonymous namespace)::<lambda()>::operator() (__closure=<optimized out>) at /usr/include/c++/9/bits/shared_ptr_base.h:756
    #74 mongo::future_details::call<mongo::(anonymous namespace)::receivedCommands(std::shared_ptr<mongo::(anonymous namespace)::HandleRequest::ExecutionContext>)::<lambda()>&> (
    func=...) at src/mongo/util/future_impl.h:255
    #75 mongo::future_details::throwingCall<mongo::(anonymous namespace)::receivedCommands(std::shared_ptr<mongo::(anonymous namespace)::HandleRequest::ExecutionContext>)::<lambda()>&, mongo::future_details::FakeVoid> (func=...) at src/mongo/util/future_impl.h:308
    #76 mongo::future_details::FutureImpl<mongo::future_details::FakeVoid>::<lambda(mongo::future_details::FakeVoid&&)>::operator()(mongo::future_details::FakeVoid &&) (val=...,
    this=<optimized out>) at src/mongo/util/future_impl.h:917
    #77 0x0000555556c59e8a in mongo::future_details::FutureImpl<mongo::future_details::FakeVoid>::generalImpl<mongo::future_details::FutureImpl<T>::then(Func&&) && [with Func = mongo::(anonymous namespace)::receivedCommands(std::shared_ptr<mongo::(anonymous namespace)::HandleRequest::ExecutionContext>)::<lambda()>; T = mongo::future_details::FakeVoid]::<lambda(mongo::future_details::FakeVoid&&)>, mongo::future_details::FutureImpl<T>::then(Func&&) && [with Func = mongo::(anonymous namespace)::receivedCommands(std::shared_ptr<mongo::(anonymous namespace)::HandleRequest::ExecutionContext>)::<lambda()>; T = mongo::future_details::FakeVoid]::<lambda(mongo::Status&&)>, mongo::future_details::FutureImpl<T>::then(Func&&) && [with Func = mongo::(anonymous namespace)::receivedCommands(std::shared_ptr<mongo::(anonymous namespace)::HandleRequest::ExecutionContext>)::<lambda()>; T = mongo::future_details::FakeVoid]::<lambda()> > (fail=..., notReady=..., success=..., this=0x7fffe5964110) at src/third_party/boost/boost/optional/detail/optional_aligned_storage.hpp:64
    #78 mongo::future_details::FutureImpl<mongo::future_details::FakeVoid>::then<mongo::(anonymous namespace)::receivedCommands(std::shared_ptr<mongo::(anonymous namespace)::HandleRequest::ExecutionContext>)::<lambda()> > (func=..., this=0x7fffe5964110) at src/mongo/util/future_impl.h:940
    #79 mongo::Future<void>::then<mongo::(anonymous namespace)::receivedCommands(std::shared_ptr<mongo::(anonymous namespace)::HandleRequest::ExecutionContext>)::<lambda()> > (
    func=..., this=0x7fffe5964110) at src/mongo/util/future.h:405
    #80 mongo::(anonymous namespace)::receivedCommands (
    execContext=std::shared_ptr<class mongo::(anonymous namespace)::HandleRequest::ExecutionContext> (use count 11, weak count 0) = {...})
    at src/mongo/db/service_entry_point_common.cpp:1939
    #81 0x0000555556c5b130 in mongo::(anonymous namespace)::CommandOpRunner::run (this=<optimized out>) at /usr/include/c++/9/ext/atomicity.h:96
    #82 0x0000555556c54f9f in mongo::ServiceEntryPointCommon::handleRequest (opCtx=opCtx@entry=0x555560af8180, m=...,
    behaviors=std::unique_ptr<const class mongo::ServiceEntryPointCommon::Hooks> = {...}) at src/mongo/db/service_entry_point_common.cpp:2441
    #83 0x0000555556c41514 in mongo::ServiceEntryPointMongod::handleRequest (this=<optimized out>, opCtx=0x555560af8180, m=...) at /usr/include/c++/9/bits/move.h:74
    --Type <RET> for more, q to quit, c to continue without paging--
    #84 0x0000555556cabe0a in mongo::transport::ServiceStateMachine::Impl::processMessage (this=0x555560b81090) at src/mongo/transport/service_state_machine.cpp:466
    #85 0x0000555556caf176 in mongo::transport::ServiceStateMachine::Impl::<lambda()>::operator() (__closure=<optimized out>) at src/mongo/transport/service_state_machine.cpp:559
    #86 mongo::future_details::call<mongo::transport::ServiceStateMachine::Impl::startNewLoop(const mongo::Status&)::<lambda()>&> (func=...) at src/mongo/util/future_impl.h:255
    #87 mongo::future_details::throwingCall<mongo::transport::ServiceStateMachine::Impl::startNewLoop(const mongo::Status&)::<lambda()>&, mongo::future_details::FakeVoid> (func=...)
    at src/mongo/util/future_impl.h:308
    #88 mongo::future_details::FutureImpl<mongo::future_details::FakeVoid>::<lambda(mongo::future_details::FakeVoid&&)>::operator() (this=<optimized out>, val=...)
    at src/mongo/util/future_impl.h:917
    #89 mongo::future_details::FutureImpl<mongo::future_details::FakeVoid>::generalImpl<mongo::future_details::FutureImpl<T>::then(Func&&) && [with Func = mongo::transport::ServiceStateMachine::Impl::startNewLoop(const mongo::Status&)::<lambda()>; T = mongo::future_details::FakeVoid]::<lambda(mongo::future_details::FakeVoid&&)>, mongo::future_details::FutureImpl<T>::then(Func&&) && [with Func = mongo::transport::ServiceStateMachine::Impl::startNewLoop(const mongo::Status&)::<lambda()>; T = mongo::future_details::FakeVoid]::<lambda(mongo::Status&&)>, mongo::future_details::FutureImpl<T>::then(Func&&) && [with Func = mongo::transport::ServiceStateMachine::Impl::startNewLoop(const mongo::Status&)::<lambda()>; T = mongo::future_details::FakeVoid]::<lambda()> > (fail=..., notReady=..., success=..., this=0x7fffe5964780) at src/mongo/util/future_impl.h:1184
    #90 mongo::future_details::FutureImpl<mongo::future_details::FakeVoid>::then<mongo::transport::ServiceStateMachine::Impl::startNewLoop(const mongo::Status&)::<lambda()> > (
    func=..., this=0x7fffe5964780) at src/mongo/util/future_impl.h:940
    #91 mongo::Future<void>::then<mongo::transport::ServiceStateMachine::Impl::startNewLoop(const mongo::Status&)::<lambda()> > (func=..., this=0x7fffe5964780)
    at src/mongo/util/future.h:405
    #92 mongo::transport::ServiceStateMachine::Impl::startNewLoop (execStatus=..., this=0x555560b81090) at src/mongo/transport/service_state_machine.cpp:559
    #93 mongo::transport::ServiceStateMachine::Impl::startNewLoop (this=0x555560b81090, execStatus=...) at src/mongo/transport/service_state_machine.cpp:546
    #94 0x0000555556caf8e4 in mongo::transport::ServiceStateMachine::Impl::<lambda(mongo::Status)>::<lambda(mongo::Status)>::<lambda()>::operator() (__closure=<synthetic pointer>,
    __closure=<synthetic pointer>) at src/mongo/transport/service_state_machine.cpp:588
    #95 mongo::ClientStrand::run<mongo::transport::ServiceStateMachine::Impl::startNewLoop(const mongo::Status&)::<lambda(mongo::Status)>::<lambda(mongo::Status)>::<lambda()> > (
    task=..., this=<optimized out>) at src/mongo/db/client_strand.h:165
    #96 mongo::transport::ServiceStateMachine::Impl::<lambda(mongo::Status)>::<lambda(mongo::Status)>::operator() (__closure=<optimized out>, execStatus=...)
    at src/mongo/transport/service_state_machine.cpp:588
    #97 mongo::unique_function<void(mongo::Status)>::callRegularVoid<mongo::transport::ServiceStateMachine::Impl::startNewLoop(const mongo::Status&)::<lambda(mongo::Status)>::<lambda(mongo::Status)> > (args#0=..., f=..., isVoid=...) at src/mongo/util/functional.h:145
    #98 mongo::unique_function<void(mongo::Status)>::SpecificImpl::call(mongo::Status &&) (this=<optimized out>, args#0=...) at src/mongo/util/functional.h:159
    #99 0x00005555592c9065 in mongo::unique_function<void (mongo::Status)>::operator()(mongo::Status) const (args#0=..., this=<optimized out>) at src/mongo/util/invariant.h:66
    #100 mongo::transport::ServiceExecutorSynchronous::<lambda(mongo::Status)>::operator() (status=..., __closure=<optimized out>)
    at src/mongo/transport/service_executor_synchronous.cpp:163
    #101 mongo::unique_function<void(mongo::Status)>::callRegularVoid<mongo::transport::ServiceExecutorSynchronous::runOnDataAvailable(const SessionHandle&, mongo::OutOfLineExecutor::Task)::<lambda(mongo::Status)> > (args#0=..., f=..., isVoid=...) at src/mongo/util/functional.h:145
    #102 mongo::unique_function<void(mongo::Status)>::SpecificImpl::call(mongo::Status &&) (this=<optimized out>, args#0=...) at src/mongo/util/functional.h:159
    #103 0x000055555741b9d8 in mongo::unique_function<void (mongo::Status)>::operator()(mongo::Status) const (args#0=..., this=<optimized out>) at src/mongo/util/invariant.h:66
    #104 mongo::transport::ServiceExecutor::schedule(mongo::unique_function<void (mongo::Status)>)::{lambda()#1}::operator()() (__closure=<optimized out>)
    at src/mongo/transport/service_executor.h:111
    #105 mongo::unique_function<void ()>::callRegularVoid<mongo::transport::ServiceExecutor::schedule(mongo::unique_function<void (mongo::Status)>)::{lambda()#1}>(std::integral_constant<bool, true>, mongo::transport::ServiceExecutor::schedule(mongo::unique_function<void (mongo::Status)>)::{lambda()#1}&) (f=..., isVoid=...) at src/mongo/util/functional.h:145
    #106 mongo::unique_function<void ()>::makeImpl<mongo::transport::ServiceExecutor::schedule(mongo::unique_function<void (mongo::Status)>)::{lambda()#1}>(mongo::transport::ServiceExecutor::schedule(mongo::unique_function<void (mongo::Status)>)::{lambda()#1}&&)::SpecificImpl::call() (this=<optimized out>) at src/mongo/util/functional.h:159
    #107 0x00005555592c923f in mongo::unique_function<void ()>::operator()() const (this=<optimized out>) at src/mongo/util/invariant.h:66
    #108 mongo::transport::ServiceExecutorSynchronous::<lambda()>::operator() (__closure=0x55556037a1a8) at src/mongo/transport/service_executor_synchronous.cpp:131
    #109 mongo::unique_function<void()>::callRegularVoid<mongo::transport::ServiceExecutorSynchronous::scheduleTask(mongo::transport::ServiceExecutor::Task, mongo::transport::ServiceExec--Type <RET> for more, q to quit, c to continue without paging--
    utor::ScheduleFlags)::<lambda()> > (f=..., isVoid=...) at src/mongo/util/functional.h:145
    #110 mongo::unique_function<void()>::SpecificImpl::call(void) (this=0x55556037a1a0) at src/mongo/util/functional.h:159
    #111 0x00005555592cdc28 in mongo::unique_function<void ()>::operator()() const (this=0x555560b58f58) at src/mongo/util/invariant.h:66
    #112 mongo::<lambda()>::operator() (__closure=0x555560b58f48) at src/mongo/transport/service_executor_utils.cpp:111
    #113 mongo::unique_function<void()>::callRegularVoid<mongo::launchServiceWorkerThread(mongo::unique_function<void()>)::<lambda()> > (f=..., isVoid=...)
    at src/mongo/util/functional.h:145
    #114 mongo::unique_function<void()>::SpecificImpl::call(void) (this=0x555560b58f40) at src/mongo/util/functional.h:159
    #115 0x00005555592cdca1 in mongo::unique_function<void ()>::operator()() const (this=0x55555c01a4b8) at src/mongo/util/invariant.h:66
    #116 mongo::(anonymous namespace)::runFunc (ctx=0x55555c01a4b8) at src/mongo/transport/service_executor_utils.cpp:64
    #117 0x00007ffff7b8a609 in start_thread (arg=<optimized out>) at pthread_create.c:477
    #118 0x00007ffff777f293 in clone () at ../sysdeps/unix/sysv/linux/x86_64/clone.S:95

    + + +

    相关阅读

    + + +
    + + + + +
    +
    +
    +
    + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +
      +
    • 什么是霍尔逻辑:
      霍尔逻辑由三个部分组成:
    • +
    • ϕ被称为前置条件,
    • +
    • P是程序片段,
    • +
    • ψ称为后置条件
    • +
    +

    相关阅读

      +
    • https://arxiv.org/pdf/1211.4470.pdf
    • +
    • C. A. R. Hoare. Proof of correctness of data representations. Acta Inf.,
      1:271–281, 1972
    • +
    • Bertrand Meyer. Object-oriented software construction. Prentice Hall, 2nd
      edition, 1997. First Ed.: 1988.
    • +
    + + +
    + + + + +
    +
    +
    +
    + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    在x86_64中,调用栈是从高地址到低地址增长的, %rbp寄存器有两个核心的内容:

    +
      +
    • 0(%rbp)也就是%rbp寄存器指向的寄存器内容,这个指针指向的地址也存着上一个堆栈的%rbp堆栈的地址
    • +
    • 8(%rbp)也就是%rbp寄存器存着另外一个存的是返回地址,什么是返回地址?就是指令段的地址
    • +
    +

    也就是rbp通过寄存器我们可以得到

    +
      +
    • 8(%rbp):上一个堆栈的代码段开始
    • +
    • (%rbp): 上一个堆栈的开始
    • +
    +

    相关阅读

    + + +
    + + + + +
    +
    +
    +
    + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    vxlan 是一个3层协议上加上2层协议的协议
    由vetp解析出上面2层协议的报文

    + + + +
    + + + + +
    +
    +
    +
    + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    mysql 报文:

    mysql报文分为两部分:headerpayload

    +

    有四个字节,其中前三个字节是标识这个包的长度描述payload的长度,也就是payload最长的长度为2^24-1字节,最后一个字节则是类似于tcp的序列号,每次从0开始递增,描述的是第几个包

    +

    payload

    payload则是具体负载

    +

    mysql握手

    tcp三次握手之后,整个传输层的连接已经建立了,那么怎么登陆呢?
    握手文档
    加密的方式
    举个例子:加密套件是mysql_native_password,那么第一个包会是由 server发出,附带20字节的随机码, 然后在客户端的用户提交的密码做多次sha1哈希然后回传给mysql

    +

    binlog获取分为两步:

    1: COM_REGISTER_SLAVE 把slave注册到master里面
    2: COM_BINLOG_DUMP 这个包主要是告诉master锁需要的binlog的名字和位点,然后就会返回一堆binlog事件给客户端

    +

    mysql 发送binlog流程

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    int Binlog_sender::get_binlog_end_pos(File_reader *reader, my_off_t *end_pos) {
    DBUG_TRACE;
    my_off_t read_pos = reader->position();

    do {
    /*
    MYSQL_BIN_LOG::binlog_end_pos is atomic. We should only acquire the
    LOCK_binlog_end_pos if we reached the end of the hot log and are going
    to wait for updates on the binary log (Binlog_sender::wait_new_event()).
    */
    *end_pos = mysql_bin_log.get_binlog_end_pos();

    /* If this is a cold binlog file, we are done getting the end pos */
    if (unlikely(!mysql_bin_log.is_active(m_linfo.log_file_name))) {
    *end_pos = 0;
    return 0;
    }

    DBUG_PRINT("info", ("Reading file %s, seek pos %llu, end_pos is %llu",
    m_linfo.log_file_name, read_pos, *end_pos));
    DBUG_PRINT("info", ("Active file is %s", mysql_bin_log.get_log_fname()));

    if (read_pos < *end_pos) return 0;

    /* Some data may be in net buffer, it should be flushed before waiting */
    if (!m_wait_new_events || flush_net()) return 1;

    if (unlikely(wait_new_events(read_pos))) return 1;
    } while (unlikely(!m_thd->killed));

    return 1;
    }
    + +
    + + + + +
    +
    +
    +
    + + + + + + + + + + +
    + + + + +
    + + + + + + + + +
    +
    + +
    + +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/page/16/index.html b/page/16/index.html new file mode 100644 index 0000000000..8850de0cbf --- /dev/null +++ b/page/16/index.html @@ -0,0 +1,1067 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + dinosaur + + + + + + + + + + + + +
    +
    + +
    +
    + + + + + +
    + + + + + + + + +
    + +
    + +
    +
    + + +
    + + 0% +
    + + +
    +
    +
    + + +
    + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +
    1
    _attribute_((packed))
    +

    你会在redis的sds.h看到这个gcc的扩展属性,这个属性是拿来干嘛呢?其实是拿来压缩字段长度的

    +
    1
    2
    3
    4
    This attribute, attached to an enum, struct, or union type definition, specified that the minimum required memory be used to represent the type.
    Specifying this attribute for struct and union types is equivalent to specifying the packed attribute on each of the structure or union members. Specifying the -fshort-enums flag on the line is equivalent to specifying the packed attribute on all enum definitions.

    You may only specify this attribute after a closing curly brace on an enum definition, not in a typedef declaration, unless that declaration also contains the definition of the enum.
    + +

    相关阅读

    + + +
    + + + + +
    +
    +
    +
    + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + + + +
    + + + + +
    +
    +
    +
    + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +
    1
    2
    3
    4
    5
    6
    7
    8
    DBUG_PRINT("info", ("Creating new slave thread"));
    if (mysql_thread_create(thread_key, &th, &connection_attrib, h_func,
    (void *)mi)) {
    LogErr(ERROR_LEVEL, ER_RPL_CANT_CREATE_SLAVE_THREAD,
    mi->get_for_channel_str());
    my_error(ER_SLAVE_THREAD, MYF(0));
    goto err;
    }
    +

    slave 线程

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    /**
    Slave SQL thread entry point.

    @param arg Pointer to Relay_log_info object that holds information
    for the SQL thread.

    @return Always 0.
    */
    extern "C" void *handle_slave_sql(void *arg) {
    THD *thd; /* needs to be first for thread_stack */
    bool thd_added = false;
    bool main_loop_error = false;
    char llbuff[22], llbuff1[22];
    + + +
    + + + + +
    +
    +
    +
    + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    As a special case, the last element of a structure with more than one named member may
    have an incomplete array type; this is called a flexible array member. With two
    exceptions, the flexible array member is ignored. First, the size of the structure shall be
    equal to the offset of the last element of an otherwise identical structure that replaces the
    flexible array member with an array of unspecified length.106) Second, when a . (or ->)
    operator has a left operand that is (a pointer to) a structure with a flexible array member
    and the right operand names that member, it behaves as if that member were replaced
    with the longest array (with the same element type) that would not make the structure
    larger than the object being accessed; the offset of the array shall remain that of the
    flexible array member, even if this would differ from that of the replacement array. If this
    array would have no elements, it behaves as if it had one element but the behavior is
    undefined if any attempt is made to access that element or to generate a pointer one past
    it.

    + + + +
    + + + + +
    +
    +
    +
    + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    突然间不想写这些业务代码了,就是那么突然.我已经快工作四年了,毫无进展,什么都不会

    +

    这几年我究竟做了什么?

    +

    给PHP修了两个内存泄漏,相对于写业务,我更喜欢修bug

    +

    引入了canal,对binlog更加了解了,而且效果真的很好但是canal其实很多坑.

    +

    这些其实很好,但是其他对于我来说都是垃圾时间,我其实真的不喜欢写前端,为什么PHP一定要写前端?

    +

    我一点都不会写算法题,真的一点都不会,最近才弄懂循环不变式和霍尔逻辑还有一点点数理逻辑

    +

    对于db,我最近才分清隔离级别/事务/mvcc的关系,花了三年半太久了,我也是一年的时候才弄懂PHP关闭连接是什么时候

    +

    对于redis其实我根本不懂,我只会简单的set aaa bbb 这样用

    +

    对于es,我对分词有所了解因为做过nlp相关需求,但是还是没有用过es

    +

    对于我来说,PHP会用,还天天写bug,mysql很多不会用,对于网络,我基本没有用过长连接挺失败的这三年半

    +

    对于我这三年半还是挺失败的

    +

    开心的地方

    最开心的地方在于会找相关的论文来看了,虽然对工作一点用都没有

    +

    对于我来说很多东西不是黑盒了,虽然对于面试一点用都没有

    +

    我唯一知道的东西

    只有数学才是有且仅有的可以提供正确性的工具,实践并不是什么真正有用的东西.

    + + +
    + + + + +
    +
    +
    +
    + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    比较坑的点:

    1 每次同步的内容会每秒持久化到file或者zk ,binlog一般只保留的几天,如果你持久化到文件/zk的配置的binlog文件在mysql已经不存在了会报错,
    报错信息大概如下

    +
    1
    java.io.IOException: Received error packet: errno = 1236, sqlstate = HY000 errmsg = Could not find first log file name in binary log index file
    +

    这个时候只能调整配置或者删除mate.dat 文件,然后重启canal , 这个时候他会使用mysql的语句show status去取最新位点

    +

    2 重启canal有个非常非常坑的点在于会读information_schema 这个库的内容去读表名和表id等信息 ,而这个往往会很久,不知道是不是测试环境原因,读了挺久的

    +

    mate刷新的逻辑

    根据配置每秒刷新到mate信息
    也就是文件或者zk上,所以重启会有重复消费

    +

    找到位点

    加载顺序:
    1 从mate中获取位点: getLatestIndexBy 也就是从 memeory/zk或者file的mate信息中读取位点
    2 根据配置读取:

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    38
    39
    40
    41
    42
    43
    44
    45
    46
    47
    48
    49
    50
    51
    52
    53
    54
    55
    56
    57
    58
    59
    60
    61
    62
    63
    64
    65
    66
    67
    68
    69
    70
    71
    72
    73
    74
    75
    76
    77
    78
    79
    80
    81
    82
    83
    84
    85
    86
    87
    88
    89
    90
    91
    92
    93
    94
    95
    96
    97
    98
    99
    100
    101
    102
    103
    protected EntryPosition findStartPositionInternal(ErosaConnection connection) {
    MysqlConnection mysqlConnection = (MysqlConnection) connection;
    LogPosition logPosition = logPositionManager.getLatestIndexBy(destination);
    if (logPosition == null) {// 找不到历史成功记录
    EntryPosition entryPosition = null;
    if (masterInfo != null && mysqlConnection.getConnector().getAddress().equals(masterInfo.getAddress())) {
    entryPosition = masterPosition;
    } else if (standbyInfo != null
    && mysqlConnection.getConnector().getAddress().equals(standbyInfo.getAddress())) {
    entryPosition = standbyPosition;
    }

    if (entryPosition == null) {
    entryPosition = findEndPositionWithMasterIdAndTimestamp(mysqlConnection); // 默认从当前最后一个位置进行消费
    }

    // 判断一下是否需要按时间订阅
    if (StringUtils.isEmpty(entryPosition.getJournalName())) {
    // 如果没有指定binlogName,尝试按照timestamp进行查找
    if (entryPosition.getTimestamp() != null && entryPosition.getTimestamp() > 0L) {
    logger.warn("prepare to find start position {}:{}:{}",
    new Object[] { "", "", entryPosition.getTimestamp() });
    return findByStartTimeStamp(mysqlConnection, entryPosition.getTimestamp());
    } else {
    logger.warn("prepare to find start position just show master status");
    return findEndPositionWithMasterIdAndTimestamp(mysqlConnection); // 默认从当前最后一个位置进行消费
    }
    } else {
    if (entryPosition.getPosition() != null && entryPosition.getPosition() > 0L) {
    // 如果指定binlogName + offest,直接返回
    entryPosition = findPositionWithMasterIdAndTimestamp(mysqlConnection, entryPosition);
    logger.warn("prepare to find start position {}:{}:{}",
    new Object[] { entryPosition.getJournalName(), entryPosition.getPosition(),
    entryPosition.getTimestamp() });
    return entryPosition;
    } else {
    EntryPosition specificLogFilePosition = null;
    if (entryPosition.getTimestamp() != null && entryPosition.getTimestamp() > 0L) {
    // 如果指定binlogName +
    // timestamp,但没有指定对应的offest,尝试根据时间找一下offest
    EntryPosition endPosition = findEndPosition(mysqlConnection);
    if (endPosition != null) {
    logger.warn("prepare to find start position {}:{}:{}",
    new Object[] { entryPosition.getJournalName(), "", entryPosition.getTimestamp() });
    specificLogFilePosition = findAsPerTimestampInSpecificLogFile(mysqlConnection,
    entryPosition.getTimestamp(),
    endPosition,
    entryPosition.getJournalName(),
    true);
    }
    }

    if (specificLogFilePosition == null) {
    // position不存在,从文件头开始
    entryPosition.setPosition(BINLOG_START_OFFEST);
    return entryPosition;
    } else {
    return specificLogFilePosition;
    }
    }
    }
    } else {
    if (logPosition.getIdentity().getSourceAddress().equals(mysqlConnection.getConnector().getAddress())) {
    if (dumpErrorCountThreshold >= 0 && dumpErrorCount > dumpErrorCountThreshold) {
    // binlog定位位点失败,可能有两个原因:
    // 1. binlog位点被删除
    // 2.vip模式的mysql,发生了主备切换,判断一下serverId是否变化,针对这种模式可以发起一次基于时间戳查找合适的binlog位点
    boolean case2 = (standbyInfo == null || standbyInfo.getAddress() == null)
    && logPosition.getPostion().getServerId() != null
    && !logPosition.getPostion().getServerId().equals(findServerId(mysqlConnection));
    if (case2) {
    EntryPosition findPosition = fallbackFindByStartTimestamp(logPosition, mysqlConnection);
    dumpErrorCount = 0;
    return findPosition;
    }
    // 处理 binlog 位点被删除的情况,提供自动重置到当前位点的功能
    // 应用场景: 测试环境不稳定,位点经常被删。强烈不建议在正式环境中开启此控制参数,因为binlog 丢失调到最新位点也即意味着数据丢失
    if (isAutoResetLatestPosMode()) {
    dumpErrorCount = 0;
    return findEndPosition(mysqlConnection);
    }
    Long timestamp = logPosition.getPostion().getTimestamp();
    if (isRdsOssMode() && (timestamp != null && timestamp > 0)) {
    // 如果binlog位点不存在,并且属于timestamp不为空,可以返回null走到oss binlog处理
    return null;
    }
    } else if (StringUtils.isBlank(logPosition.getPostion().getJournalName())
    && logPosition.getPostion().getPosition() <= 0
    && logPosition.getPostion().getTimestamp() > 0) {
    return fallbackFindByStartTimestamp(logPosition,mysqlConnection);
    }
    // 其余情况
    logger.warn("prepare to find start position just last position\n {}",
    JsonUtils.marshalToString(logPosition));
    return logPosition.getPostion();
    } else {
    // 针对切换的情况,考虑回退时间
    long newStartTimestamp = logPosition.getPostion().getTimestamp() - fallbackIntervalInSeconds * 1000;
    logger.warn("prepare to find start position by switch {}:{}:{}", new Object[] { "", "",
    logPosition.getPostion().getTimestamp() });
    return findByStartTimeStamp(mysqlConnection, newStartTimestamp);
    }
    }
    + + +

    事件类型

    事件有很多类型,我现在只对update和insert 感兴趣

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    38
    39
    40
    41
    42
    43
    44
    45
    46
    47
    48
    49
    50
    51
    52
    53
    54
    55
    56
    57
    58
    59
    60
    61
    62
    63
    64
    65
    66
    67
    68
    69
    70
    71
    72
    73
    74
    75
    public enum EventType
    implements com.google.protobuf.ProtocolMessageEnum {
    /**
    * <code>INSERT = 1;</code>
    */
    INSERT(0, 1),
    /**
    * <code>UPDATE = 2;</code>
    */
    UPDATE(1, 2),
    /**
    * <code>DELETE = 3;</code>
    */
    DELETE(2, 3),
    /**
    * <code>CREATE = 4;</code>
    */
    CREATE(3, 4),
    /**
    * <code>ALTER = 5;</code>
    */
    ALTER(4, 5),
    /**
    * <code>ERASE = 6;</code>
    */
    ERASE(5, 6),
    /**
    * <code>QUERY = 7;</code>
    */
    QUERY(6, 7),
    /**
    * <code>TRUNCATE = 8;</code>
    */
    TRUNCATE(7, 8),
    /**
    * <code>RENAME = 9;</code>
    */
    RENAME(8, 9),
    /**
    * <code>CINDEX = 10;</code>
    *
    * <pre>
    **CREATE INDEX*
    * </pre>
    */
    CINDEX(9, 10),
    /**
    * <code>DINDEX = 11;</code>
    */
    DINDEX(10, 11),
    /**
    * <code>GTID = 12;</code>
    */
    GTID(11, 12),
    /**
    * <code>XACOMMIT = 13;</code>
    *
    * <pre>
    ** XA *
    * </pre>
    */
    XACOMMIT(12, 13),
    /**
    * <code>XAROLLBACK = 14;</code>
    */
    XAROLLBACK(13, 14),
    /**
    * <code>MHEARTBEAT = 15;</code>
    *
    * <pre>
    ** MASTER HEARTBEAT *
    * </pre>
    */
    MHEARTBEAT(14, 15),
    ;
    + +
      +
    • 相关阅读
    • +
    +

    http://www.tianshouzhi.com/api/tutorials/canal

    + + +
    + + + + +
    +
    +
    +
    + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    状态机

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    38
    39
    40
    41
    42
    43
    44
                                 +---------+ ---------\      active OPEN
    | CLOSED | \ -----------
    +---------+<---------\ \ create TCB
    | ^ \ \ snd SYN
    passive OPEN | | CLOSE \ \
    ------------ | | ---------- \ \
    create TCB | | delete TCB \ \
    V | \ \
    +---------+ CLOSE | \
    | LISTEN | ---------- | |
    +---------+ delete TCB | |
    rcv SYN | | SEND | |
    ----------- | | ------- | V
    +---------+ snd SYN,ACK / \ snd SYN +---------+
    | |<----------------- ------------------>| |
    | SYN | rcv SYN | SYN |
    | RCVD |<-----------------------------------------------| SENT |
    | | snd ACK | |
    | |------------------ -------------------| |
    +---------+ rcv ACK of SYN \ / rcv SYN,ACK +---------+
    | -------------- | | -----------
    | x | | snd ACK
    | V V
    | CLOSE +---------+
    | ------- | ESTAB |
    | snd FIN +---------+
    | CLOSE | | rcv FIN
    V ------- | | -------
    +---------+ snd FIN / \ snd ACK +---------+
    | FIN |<----------------- ------------------>| CLOSE |
    | WAIT-1 |------------------ | WAIT |
    +---------+ rcv FIN \ +---------+
    | rcv ACK of FIN ------- | CLOSE |
    | -------------- snd ACK | ------- |
    V x V snd FIN V
    +---------+ +---------+ +---------+
    |FINWAIT-2| | CLOSING | | LAST-ACK|
    +---------+ +---------+ +---------+
    | rcv ACK of FIN | rcv ACK of FIN |
    | rcv FIN -------------- | Timeout=2MSL -------------- |
    | ------- x V ------------ x V
    \ snd ACK +---------+delete TCB +---------+
    ------------------------>|TIME WAIT|------------------>| CLOSED |
    +---------+ +---------+
    + + + +

    拥塞控制

    1 慢开始和拥塞避免
    2 快速重传

    +

    慢开始

    慢开始为了什么?

    +

    快速重传

    快速恢复

    我的理解

    tcp本质是什么?
    本质是一个字节流
    为什么会有大小端问题?
    因为字节流终究是字节流,如果你只要一个字节的的话在不同端的机器一点问题都没有
    怎么保证不丢包?
    序号+重传,为什么序号可以?
    因为序号是和包一一映射的,所以序号和报文是同构的,也就是一一映射的
    重传有什么问题吗?
    因为序号和包一一对应,也就是幂等的,所以重传没有什么问题

    +

    为什么需要状态机?
    因为状态机是从一个状态到另外一个状态,这样我们更加明确整个流程

    +

    相关阅读

    + + + +
    + + + + +
    +
    +
    +
    + + + + + + + + + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    管道

    + +
    + + + + +
    +
    +
    +
    + + + + + + + + + + + + + + + + + + +
    + + + + +
    + + + + + + + + +
    +
    + +
    + +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/page/17/index.html b/page/17/index.html new file mode 100644 index 0000000000..b827b759a2 --- /dev/null +++ b/page/17/index.html @@ -0,0 +1,1071 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + dinosaur + + + + + + + + + + + + +
    +
    + +
    +
    + + + + + +
    + + + + + + + + +
    + +
    + +
    +
    + + +
    + + 0% +
    + + +
    +
    +
    + + +
    + + + + + + + + + + + + + + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    一个使用php的workman的代码抛出了这样的异常

    +
    1
    fwrite(): send of 157 bytes failed with errno=11 Resource temporarily unavailable
    +

    socket是使用了noblocking的,结果有一个这个错误,结果发现是已经修复了不抛异常了,但是在我的php5.6的版本还是旧的代码,所以还是会抛这个异常,所以排查了一天,不是bug
    最新的修改

    +
    1
    https://github.com/php/php-src/pull/5026/files
    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    38
    39
    40
    41
    42
    43
    44
    45
    	didwrite = send(sock->socket, buf, XP_SOCK_BUF_SIZE(count), (sock->is_blocked && ptimeout) ? MSG_DONTWAIT : 0);

    if (didwrite <= 0) {
    char *estr;
    int err = php_socket_errno();
    if (err == EWOULDBLOCK || err == EAGAIN) {
    if (sock->is_blocked) {
    int retval;

    sock->timeout_event = 0;

    do {
    retval = php_pollfd_for(sock->socket, POLLOUT, ptimeout);

    if (retval == 0) {
    sock->timeout_event = 1;
    break;
    }

    if (retval > 0) {
    /* writable now; retry */
    goto retry;
    }

    err = php_socket_errno();
    } while (err == EINTR);
    } else {
    /* EWOULDBLOCK/EAGAIN is not an error for a non-blocking stream.
    * Report zero byte write instead. */
    return 0;
    }
    }

    estr = php_socket_strerror(err, NULL, 0);
    php_error_docref(NULL, E_NOTICE, "Send of " ZEND_LONG_FMT " bytes failed with errno=%d %s",
    (zend_long)count, err, estr);
    efree(estr);
    }

    if (didwrite > 0) {
    php_stream_notify_progress_increment(PHP_STREAM_CONTEXT(stream), didwrite, 0);
    }

    return didwrite;
    }
    + + +
    + + + + +
    +
    +
    +
    + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    背景

    论文很好玩的,我很希望可以看很多很多的论文.

    +

    有时候我感觉论文里面的数学真的很漂亮

    + + +
    + + + + +
    +
    +
    +
    + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    tcp 本质是靠序号保证可靠性,这里的前提是我们的约定是id是递增的

    +

    相关阅读

    + + +
    + + + + +
    +
    +
    +
    + + + + + + + + + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    如何读取mysql的binlog?

    可以使用mysql默认的mysqlbinlog

    +

    重要的是需要加v
    命令

    +
    1
    mysqlbinlog   -v  mysql-bin.000006
    +

    你会看到具体的sql了,我这里的sql是

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    38
    39
    #210311 14:51:10 server id 1  end_log_pos 1606 CRC32 0x783f56ab 	Query	thread_id=64	exec_time=0	error_code=0
    SET TIMESTAMP=1615445470/*!*/;
    BEGIN
    /*!*/;
    # at 1606
    #210311 14:51:10 server id 1 end_log_pos 1672 CRC32 0x52632df5 Table_map: `user_db`.`tb_user` mapped to number 119
    # at 1672
    #210311 14:51:10 server id 1 end_log_pos 1783 CRC32 0xf565f574 Update_rows: table id 119 flags: STMT_END_F

    BINLOG '
    3r1JYBMBAAAAQgAAAIgGAAAAAHcAAAAAAAEAB3VzZXJfZGIAB3RiX3VzZXIABgMPDw8BDwgUAEAA
    FAAyADj1LWNS
    3r1JYB8BAAAAbwAAAPcGAAAAAHcAAAAAAAEAAgAG///AYAAAAAI5NgYxMjM0NTYFOTk5OTcSD3po
    YW5nc2FuQGJ1Zy5jbsBgAAAAATUGMTIzNDU2BTk5OTk3Eg96aGFuZ3NhbkBidWcuY2509WX1
    '/*!*/;
    ### UPDATE `user_db`.`tb_user`
    ### WHERE
    ### @1=96
    ### @2='96'
    ### @3='123456'
    ### @4='99997'
    ### @5=18
    ### @6='zhangsan@bug.cn'
    ### SET
    ### @1=96
    ### @2='5'
    ### @3='123456'
    ### @4='99997'
    ### @5=18
    ### @6='zhangsan@bug.cn'
    # at 1783
    #210311 14:51:10 server id 1 end_log_pos 1814 CRC32 0xa90279ec Xid = 1794
    COMMIT/*!*/;
    SET @@SESSION.GTID_NEXT= 'AUTOMATIC' /* added by mysqlbinlog */ /*!*/;
    DELIMITER ;
    # End of log file
    /*!50003 SET COMPLETION_TYPE=@OLD_COMPLETION_TYPE*/;
    /*!50530 SET @@SESSION.PSEUDO_SLAVE_MODE=0*/;

    + +

    协议流程

    COM_BINLOG_DUMP -> com_binlog_dump

    +

    相关阅读

    https://www.cnblogs.com/netsa/p/7350629.html
    https://dev.mysql.com/doc/internals/en/replication-protocol.html

    + + +
    + + + + +
    +
    +
    +
    + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    docker-compose 的spec

    +
    1
    2
    3
    4
    5
    6
    7
    TOP :
    version
    service
    network
    volumes
    config
    secrets
    + +

    service :
    build
    deploy

    +
    +
    +

    Each service MAY also include a Build section, which defines how to create the Docker image for the service. Compose implementations MAY support building docker images using this service definition. If not implemented the Build section SHOULD be ignored and the Compose file MUST still be considered valid.

    +
    +
    +
    +

    Build support is an OPTIONAL aspect of the Compose specification, and is described in detail here

    +
    +
    +

    Each Service defines runtime constraints and requirements to run its containers. The deploy section groups these constraints and allows the platform to adjust the deployment strategy to best match containers’ needs with available resources.

    +
    +

    相关文档

    + + +
    + + + + +
    +
    +
    +
    + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    我们如何写一个正确的代码?

    +

    答案是形式化验证,其中一个是霍尔逻辑

    +

    举个例子

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    快速幂
    x> 0
    x/2/2 ... 必定会归到1 或者0, 所以递归会有限次

    快速幂
    a^x = floor(a^(x/2))^2 *a^(x%1);
    fun(a,x) ={
    if(x== 0){
    return 1;
    }elseif(x== 1){
    return a;
    }
    return fun(a , floor(x/2) )*a^(x%1);
    }
    + +
    + + + + +
    +
    +
    +
    + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    5.1 INTRODUCTION
    对于多版本并发控制算法来说,对于每个数据实例x写操作都会生成一个X的新的副本(或者叫做版本).
    数据管理器因此会保存一个包含数据管理器赋值过给X的所有版本的列表.
    对于每个读操作Read(x),调度器不仅把读操作发送给数据管理器,还会告诉数据管理器他想要读x的哪一个版本.
    In a multiversion concurrency control algorithm, each Write on a data item x
    produces a new copy (or version) of X. The DM that manages x therefore
    keeps a list of versions of X, which is the history of values that the DM has
    assigned to X. For each Read(x), the scheduler not only decides when to send
    the Read to the DM, but it also tells the DM which one of the versions of x to
    read.
    多版本并发控制的优点在于帮助调度器避免拒绝太晚的操作(也就是说晚一点的操作不会被拒绝).举个例子,(单版本的情况下)调度器会拒绝读他本该读但是被覆盖的数据.
    在多版本的情况下,旧的值不会被覆盖,因此可以延迟去读.调度器可以通过读旧版本的值来避免(单版本下)对于读操作的拒绝.
    保持多版本并对于并发控制并不会花费太多成本,因为对于故障恢复算法来说也是需要版本的信息.
    The benefit of multiple versions for concurrency control is to help the
    scheduler avoid rejecting operations that arrive too late. For example, the
    scheduler normally rejects a Read because the value it was supposed to read
    has already been overwritten. With multiversions, such old values are never
    overwritten and are therefore always available to tardy Reads. The scheduler
    can avoid rejecting the Read simply by having the Read read an old version.’
    Maintaining multiple versions may not add much to the cost of concurrency
    control, because the versions may be needed anyway by the recovery
    algorithm.
    As we’ll see in the next chapter, many recovery algorithms have to
    maintain some before image information, at least of those data items that have
    been updated by active transactions; the recovery algorithm needs those before
    images in case any of the active transactions abort. The before images of a data
    item are exactly its list of old versions. It is a small step for the DM to make
    those versions explicitly available to the scheduler.
    一个很明显的花费是保存多个版本需要很多存储空间,为了控制这些存储的现在,版本内容必须要定期周期性地清理或者归档.
    因为某些特定的版本还会被活跃的事务(也就是没有提交或者没有回滚回滚的事务),清理版本信息需要同时兼顾活跃的事务.
    清理动作也是mvcc的另外一个花费.
    An obvious cost of maintaining multiple versions is storage space. To
    control this storage requirement, versions must periodically be purged or archived. Since certain versions may be needed by active transactions, purging
    versions must be synchronized with respect to active transactions. This purging
    activity is another cost of multiversion concurrency control.

    +

    我们假设当事务被抛弃,那些那么这个事务创建的版本也会被销毁.在我们后续的讨论中,词版本描述的是已提交事务或者活跃事务的数据对应的值.因此,当调度器决定分配x的特定版本给操作Read(x),返回的值不会包含被抛弃的事务.
    如果版本读产生于活跃的事务,可恢复性(也就是回滚)要求读操作对应的事务的提交必须晚于被读的活跃事务的提交.

    +

    We assume that if a transaction is aborted, any versions it created are
    destroyed. In our subsequent discussion, the term “version” will refer to the
    value of a data item produced by a transaction that’s either active or committed.
    Thus, when the scheduler decides to assign a particular version of x to
    Read(x), the value returned is not one produced by an aborted transaction. If
    the version read is one produced by an active transaction, recoverability
    requires that the reader’s commitment be delayed until the transaction that
    produced the version has committed.
    如果被读的事务最后被抛弃了(这个事务对应的版本也无效了),该活跃的事务也需要因此而被抛弃.
    If that transaction actually aborts
    (thereby invalidating its version), the reader must also be aborted.
    当前存在的多版本内容仅仅对调度器和数据管理器可见,对于用户使用事务是不可见的.
    The existence of multiple versions is only visible to the scheduler and DM,
    not to user transactions.
    事务只会持有该数据比如x和y.除了数据库处理系统,用户本身看上去只有一个版本,即,在用户的角度看是最后一个会写入
    Transactions still reference data items, such as x and
    y. Users therefore expect the DBS to behave as if there were only one version of
    each data item, namely, the last one that was written from that user’s perspective.
    调度器会通过使用多版本来减少拒绝的操作,从而提升性能.
    The scheduler may use multiple versions to improve performance by
    rejecting operations less frequently.
    不过最后的结果会和单版本的结果看上去一样.
    But it must not change the system’s
    functionality over a single version view of the database.
    There are many applications of databases in which users do want to
    explicitly access each of the multiple versions of a data item. For example, a
    user may wish to maintain several versions of a design database: the last design
    released for manufacturing, the last design checked for correctness, and the
    most recent working design. The user may update each version of the design
    independently. Since the existence of these multiple versions is not transparent
    to the user, such applications are not appropriate for the multiversion concurrency
    control algorithms described in this chapter.

    +

    Analyzing Correctness
    分析mvcc的正确性
    为了分析mvcc算法的正确性,我们需要扩展可序列化理论.我们需要扩展两类历史:运行在多版本数据库的多版本历史,在用户看来是单版本的单版本历史.用户会把序列化但版本历史(因为我们把事务看成只有一个版本的,我们所有的目标就是多版本执行的内容和但版本看到的一样,相类似的例子就是编译器经过ssr优化后很多顺序都变了但是看上去都一样)
    To analyze the correctness of multiversion concurrency control algorithms, we
    need to extend serializability theory. This extension requires two types of
    histories: multiversion (MV) histories that represent the DM’s execution of
    operations on a multiversion database, and single version (IV) histories that
    represent the interpretation of MV histories in the users’ single version view of
    the database. Serial 1V histories are the histories that the user regards as
    correct.
    不过实际上系统是多版本的(只是看上去和单版本的一样).所以为了证明这个并发控制算法是正确的,我们必须证明多版本的历史的约束和单版本的是等价的.那么多版本历史单版本历史等价是什么意思?(也就是多版本历史和单版本历史等价的动作语义是什么)
    But the system actually produces MV histories. So, to prove that a
    concurrency control aIgorithm is correct, we must prove that each of the MV
    histories that it can produce is equivalent to a serial 1V history,
    What does it mean for an MV history to be equivalent to a 1V history?
    我们通过拓展单版本历史之间的等价来描述多版本历史单版本历史等价.为了做这个扩展,我们需要引入一些符号.
    Let’s try to answer this by extending the definition of equivalence of 1V histories
    that we used in Chapters 2-4. To attempt this extension, we need a little
    notation.
    对于每个数据实例x,我们用xi,xj…来表示x的版本,下标是写这个版本的事务的编号(也就是xi就是表示事务i写了一个版本x),
    因此对于多版本历史,永远都是这个下标Wi[Xi],版本的下标和和事务的下标一样.多版本历史的读操作则没有那么特殊,举个例子ri[xj].
    For each data item X, we denote the versions of x by xi, xj, . . . , where
    the subscript is the index of the transaction that wrote the version. Thus, each
    Write in an MV history is always of the form Wi[Xi], where the version
    subscript equals the transaction subscript. Reads are denoted in the usual way,
    such as ri[xj].
    假如我们说多版本历史单版本历史等价的(equivalence)的定义是:如果多版本的每个操作的重复和单版本的冲突都一样.
    考虑多版本历史

    +
    1
    H1 = w0[x0]c0w1[x1]c1r2[x0]w2[y2]c2
    +

    H1这个历史里面,只有w0[x0]r2[x0]是冲突的,写操作w1[x1]w0[x0]以及r2[x0]不冲突,因为他们操作的是不同版本的数据,即x1.现在我们来考虑单版本历史:

    +
    1
    H2 = w0[x]c0w1[x]c1r2[x]w2[y]c2 
    +

    我们通过去掉操作的版本的下标(也就是去掉版本号)来构造历史H2,比如x1映射成x,x0也映射成x,y2映射成y.这种情况下(H2的单版本历史),w0[x]r2[x0]是冲突的.按照定义(如果他们冲突一样)则他们等价,但是这其实是不合理的(虽然他们都冲突).在历史H2,T2T1x.但是在历史H2,T1读的是T0(也就是说冲突是一样但是读的数据来源是不一样的).因为这两个历史(H1和H2)读的来源不一致,所以他们最后写的操作也不一致
    Suppose we adopt a definition of equivalence that says an MV history
    HM” is equivalent to a 1V history HIV if every pair of conflicting operations in
    Hbp, is in the same order in HIV. Consider the MV history
    H, = wobol co WEA cl rz[xol w,[yzl cz.
    The only two operations in H, that conflict are w,[x,] and r,[x,]. The operation
    w,[x,] does not conflict with either w,[x,] or r,[x,], because it operates on a
    different
    We constructed H, by mapping each operation on versions x0, x,, and yz in H,
    into the same operation on the corresponding data items x and y. Notice that
    the two operations in H, that conflict, w,[x,] and r,[x,], are in the same order
    in H, as in H,. So, according to the definition of equivalence just given, H, is
    equivalent to H,. But this is not reasonable. In H,, T, reads x from T,, whereas
    in H,, T, reads x from T,,.’ Since T2 reads a different value of x in H, and H,, it
    may write a different value in y.
    我们的对于(单版本和多版本之间)的冲突之所以有一点问题,是因为多版本历史单版本历史操作的是不同的对象:
    一个是对版本的操作,一个是对数据的操作(可以类比一个是”打11岁的你”和”打你”是不同语义的).他们的操作是有不同的冲突属性.
    举个例子,多版本情况下w1[x1]r2[x0]不冲突,(相对应的版本历史.怎么对应?当然是把下标去掉)单版本情况下w1[x]r2[x]是冲突的.
    因此,如果仅仅通过冲突来定义他们是等价的,这是不精确的.
    This definition of equivalence based on conflicts runs into trouble because
    MVand 1V histories have slightly different operations - version operations
    versus data item operations. These operations have different conflict properties.
    For example, w,[x,] does not conflict with yz[xo], but their corresponding
    1V operations w,[x] and TJX] do conflict. Therefore, a definition of equivalence
    based on conflicts is inappropriate.
    因此,为了解决这个问题(读的来源不一样的问题),我们需要回到2.6节的视图等价.
    回想一下,如果两个历史的读的源都一样而且最后写也一样则他们视图等价.
    比较历史H1H2.在H1(多版本历史)中,T2从T1读x,但是对于H2(单版本历史),T2从T0读x,因此H1和H2视图不等价
    To solve this problem, we need to return to first principles by adopting the
    more fundamental definition of view equivalence developed in Section 2.6.
    Recall that two histories are view equivalent if they have the same reads-from
    relationships and the same final writes. Comparing histories H, and H,, we see
    that T, reads x from T, in H,, but T, reads x from T, in H,. Thus, H, is not
    view equivalent to H2.
    然后我们获得满足条件等价(单版本和多版本之间的等价,定义是视图等价,那么两者是等价的)的定义.
    我们需要通过一个对单版本历史与多版本历史之间等价的方式.
    其中一个方式是SG(H)是无环的,所以历史H是等价于序列化多版本历史,但是仅仅是这样是没有太大帮助.因为(无环)序列化历史不是等价于序列化单版本历史.
    举个例子:
    H3 = w0[x0]w0[y0]c0R// todo
    如果我们把版本的内容作为单独的数据实例,就会构造一下的序列化图,虽然这个序列化图是无环的,H3还是和单版本的不等价,因为他们映射的读来源不一样

    +

    Now that we have a satisfactory definition of equivalence, we need a way
    of showing that every MV history H produced by a given multiversion concurrency
    control algorithm is equivalent to a serial 1V history. One way would be
    to show that SG( H) is acyclic, so H is equivalent to a serial MV history, Unfortunately,
    this doesn’t help much, because not every serial MV history is equivalent
    to a serial 1V history. For example, consider the serial MV history
    仅仅是多版本历史的子集,也就是l-serial MV histories才会等价于序列化单版本历史,如果所有的read-from关系,要么读自己的事务,要么读最新的事务,这样就说l-serial MV histories,
    这样的serial MV histories是可以于单版本的历史等价
    Only a subset of serial MV histories, called l-serial MV histories, are
    equivalent to serial 1V histories. Intuitively, a serial MV history is I-serial if
    for each reads-from relationship, say T, reads x from T,, T, is the last trdnsaction
    preceding T, that writes any version of x. Notice that Ii, is not l-serial
    because TL reads x from T,), not T,, which is the last transaction preceding T2
    that writes x.

    +

    mv2pl > 1sr > 5.3 Let H be an MV history over ?: C(H) is equivalent to a
    serial, 1V history over T iff H is 1SR.

    +
    +
    +

    我整理的mvcc原理

    1SR 定义

    An MV history is one-copy serializable (or 1SR) if its committed projection is equivalent to a l-serial MV history.

    +

    1-series MV history:

    每个事务读先于他的事务里面写版本最大的
    1 多版本事务Hi 和事务Hj,如果Hi从事务Hj读x,那么满足约束: Hj的x的版本是先于Hi的所有事务里面写x的版本最新的一个

    +
      +
    • 一个小问题:一个事务先于另外一个事务,这是个偏序关系,这里有个疑问,怎么定义这个先于
    • +
    +

    MV History Equivalence:

    两个历史的事务中,读的来源都一样则两个多版本事务视图等价

    +

    mv2pl imply mvsg
    mvsg imply 1sr
    1sr imply 1-serial

    +

    相关阅读

    + + +
    + + + + +
    +
    +
    +
    + + + + + + + + + + +
    + + + + +
    + + + + + + + + +
    +
    + +
    + +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/page/18/index.html b/page/18/index.html new file mode 100644 index 0000000000..30f5cd7a7e --- /dev/null +++ b/page/18/index.html @@ -0,0 +1,1199 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + dinosaur + + + + + + + + + + + + +
    +
    + +
    +
    + + + + + +
    + + + + + + + + +
    + +
    + +
    +
    + + +
    + + 0% +
    + + +
    +
    +
    + + +
    + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    为什么想看select的代码

    有一个场景,遇到一个表只有十多万,但是表大小有几十g,为什么呢?因为有个字段是longtext.
    放了很多很长的文本.发现select * from table limit 1000就已经读不出来了

    +

    一个简单的select语句

    1
    select id from test wher  id < 100 ;
    + +

    这个流程究竟发生了什么?

    +

    第一步其实和php差不多,先是编译原理的前端几步lex和parse
    第二步就是逻辑优化和物理优化, 其实可以想成是常用的编程语言的常量折叠或者数据流图的分析,就是编译时的优化
    第三步也就是语义动作了,也就是真正的执行过程也可以想做是运行时:
    但是条件对于读来说是不可见的,条件是作用于索引上 , 然后返回所有行 , 再根据列筛选出来,然后再join和排序,对于这个sql来说,他唯一作用就是通过索引读出内容,内存和硬盘对于他来说是不存在的

    +

    首先是读表,这个表是从ibd文件来的.所以终究是需要调用系统调用读文件,那么linux用系统调用是pread

    +

    索引在哪保存?
    保存在表空间里面
    内容保存到哪里?
    保存到表空间里面
    关联是在哪里发生?
    发生在从索引从表空间读出来
    关联是整整一行关联吗?
    是的.
    二级索引怎么读的?
    通过二级索引读一级索引,一级索引读内容.

    +

    所以实际上是有一个语义上的层是:
    sql -> 语义动作 作用于索引 -> 索引访问表空间(有点像交换空间或者物理内存和虚拟内存的关系一样一样, 需要的时候才从硬盘硬盘加载)

    +

    下面是相关代码

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    (gdb) bt
    #0 srv_start (create_new_db=create_new_db@entry=false) at /home/ubuntu/mysql-8.0.23/storage/innobase/srv/srv0start.cc:1857
    #1 0x00005555577275b6 in innobase_init_files (tablespaces=0x7fffea1f1380, dict_init_mode=DICT_INIT_CHECK_FILES) at /home/ubuntu/mysql-8.0.23/storage/innobase/handler/ha_innodb.cc:5042
    #2 innobase_ddse_dict_init (dict_init_mode=DICT_INIT_CHECK_FILES, version=<optimized out>, tables=0x7fffea1f1360, tablespaces=0x7fffea1f1380)
    at /home/ubuntu/mysql-8.0.23/storage/innobase/handler/ha_innodb.cc:12323
    #3 0x00005555573d2aef in dd::bootstrap::DDSE_dict_init (thd=thd@entry=0x55555b899410, dict_init_mode=dict_init_mode@entry=DICT_INIT_CHECK_FILES, version=80023)
    at /home/ubuntu/mysql-8.0.23/sql/dd/impl/bootstrap/bootstrapper.cc:737
    #4 0x00005555575f92e4 in dd::upgrade_57::do_pre_checks_and_initialize_dd (thd=0x55555b899410) at /home/ubuntu/mysql-8.0.23/sql/dd/upgrade_57/upgrade.cc:911
    #5 0x0000555556697ec5 in bootstrap::handle_bootstrap (arg=arg@entry=0x7fffffffda10) at /home/ubuntu/mysql-8.0.23/sql/bootstrap.cc:323
    #6 0x0000555557b934a1 in pfs_spawn_thread (arg=0x55555b834c80) at /home/ubuntu/mysql-8.0.23/storage/perfschema/pfs.cc:2900
    #7 0x00007ffff7bbb6db in start_thread (arg=0x7fffea1f2700) at pthread_create.c:463
    #8 0x00007ffff61b571f in clone () at ../sysdeps/unix/sysv/linux/x86_64/clone.S:95
    + +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    (gdb) info threads 
    Id Target Id Frame
    1 Thread 0x7ffff7fe7880 (LWP 17988) "mysqld" 0x00007ffff7bbcd2d in __GI___pthread_timedjoin_ex (threadid=140737121298176, thread_return=thread_return@entry=0x0, abstime=abstime@entry=0x0,
    block=block@entry=true) at pthread_join_common.c:89
    * 2 Thread 0x7fffea1f2700 (LWP 18000) "mysqld" __libc_pread64 (fd=4, buf=0x7fffe81d0000, count=65536, offset=0) at ../sysdeps/unix/sysv/linux/pread64.c:29
    4 Thread 0x7fffe8f49700 (LWP 18269) "mysqld" 0x00007ffff7bc1ad3 in futex_wait_cancelable (private=<optimized out>, expected=0, futex_word=0x7fffe42eda10)
    at ../sysdeps/unix/sysv/linux/futex-internal.h:88
    5 Thread 0x7fffe3b32700 (LWP 18270) "mysqld" 0x00007ffff7bc1ad3 in futex_wait_cancelable (private=<optimized out>, expected=0, futex_word=0x7fffe42edab0)
    at ../sysdeps/unix/sysv/linux/futex-internal.h:88
    6 Thread 0x7fffe3331700 (LWP 18271) "mysqld" 0x00007ffff7bc1ad3 in futex_wait_cancelable (private=<optimized out>, expected=0, futex_word=0x7fffe42edb50)
    at ../sysdeps/unix/sysv/linux/futex-internal.h:88
    7 Thread 0x7fffe2b30700 (LWP 18272) "mysqld" 0x00007ffff7bc1ad3 in futex_wait_cancelable (private=<optimized out>, expected=0, futex_word=0x7fffe42edbf0)
    at ../sysdeps/unix/sysv/linux/futex-internal.h:88
    8 Thread 0x7fffe232f700 (LWP 18273) "mysqld" 0x00007ffff7bc1ad3 in futex_wait_cancelable (private=<optimized out>, expected=0, futex_word=0x7fffe42edc90)
    at ../sysdeps/unix/sysv/linux/futex-internal.h:88
    9 Thread 0x7fffe1b2e700 (LWP 18274) "mysqld" 0x00007ffff7bc1ad3 in futex_wait_cancelable (private=<optimized out>, expected=0, futex_word=0x7fffe42edd30)
    at ../sysdeps/unix/sysv/linux/futex-internal.h:88
    10 Thread 0x7fffe132d700 (LWP 18275) "mysqld" 0x00007ffff7bc1ad3 in futex_wait_cancelable (private=<optimized out>, expected=0, futex_word=0x7fffe42eddd0)
    at ../sysdeps/unix/sysv/linux/futex-internal.h:88
    11 Thread 0x7fffe0b2c700 (LWP 18276) "mysqld" 0x00007ffff7bc1ad3 in futex_wait_cancelable (private=<optimized out>, expected=0, futex_word=0x7fffe42ede70)
    at ../sysdeps/unix/sysv/linux/futex-internal.h:88
    12 Thread 0x7fffd3d5f700 (LWP 18277) "mysqld" 0x00007ffff7bc1ad3 in futex_wait_cancelable (private=<optimized out>, expected=0, futex_word=0x7fffe42edf10)
    at ../sysdeps/unix/sysv/linux/futex-internal.h:88
    13 Thread 0x7fffd355e700 (LWP 18278) "mysqld" 0x00007ffff7bc1ad3 in futex_wait_cancelable (private=<optimized out>, expected=0, futex_word=0x7fffe42edfb0)
    at ../sysdeps/unix/sysv/linux/futex-internal.h:88
    14 Thread 0x7fffd2d5d700 (LWP 18279) "mysqld" 0x00007ffff6ace280 in operator new(unsigned long) () from /usr/lib/x86_64-linux-gnu/libstdc++.so.6

    + +

    相关阅读

    + + +
    + + + + +
    +
    +
    +
    + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    前提相关定义

    质数的集合

    +

    rsa 的证明

    M是需要任意整数 , 需要满足

    我们要证明

    其中

    +

    欧拉函数

    定义是小于或等于n的正整数中与n互质的数的数目.
    其中,如果x是质数,则欧拉函数的求值结果为x-1

    +

    下面我们来证明公式的右边部分 , 也就是:

    +
      +
    • 当M可以被n整除的时候,也就是满足

      成立
    • +
    +

    相关阅读

    + + +
    + + + + +
    +
    +
    +
    + + + + + + + + + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    三值逻辑

    三值的谓词:
    谓词求值后有三个元素{T,F,U} ,也就是true , false , 和unknow

    +

    求值和谓词

    1
    SELECT 1=NULL
    +

    在mysql , 这个会返回一行,这行是值是null

    +

    ① 然后根据规范 where casehaving 子句都只取三值逻辑真值中的true

    +
    1
    2
    SELECT 1 NOT IN (1,  NULL)    ## false 所以不会被条件筛选出来
    SELECT 1 NOT IN ( NULL) ## null , 因为上面的规则①所以也不会被筛选出来
    +

    所以在where子句中使用not innot int 中包含null的时候会筛选不出来

    +

    exist 规范规定exist是个二值函数,所以要映射成true或者false , mysql中则是只要返回一行真值就是true

    + + + +
    + + + + +
    +
    +
    +
    + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    rule

    judugment

    语法

    语法是一堆token组成的结构

    +

    语义

    语义是某个语法结构映射的内容

    +

    比如操作语义: 某个语法映射相关操作

    +

    指称语义: 语法映射某个映射关系

    +

    相关阅读

    + + +
    + + + + +
    +
    +
    +
    + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    谓词下推:

    为什么可以谓词下推?

    +

    因为交换律结合律

    +

    怎么下推,也就是触发条件?

    rule instance

    +

    语义

    + +
    + + + + +
    +
    +
    +
    + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    我们如何保证消息的可靠性?

    +

    前提:
    每块消息都是分割成一小块

    +

    如何保证不丢消息?

    +

    每块消息映射一个id , 我们只要保证每个id都有就能保证我们的消息必然是全的(没有丢失的 , 因为id是全序的 , id映射的内容本身已是不会丢失的)

    + + +
    + + + + +
    +
    +
    +
    + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    对于没有泛型的情况
    比如

    +
    1
    2
    3
    max(a : int , b:int){
    xxx
    }
    +

    入参是a , b ,这两个参数的类型的约束是 int ,
    也就是这个函数的约束是 max(int , int)
    泛型的语义就是:

    +
    1
    2
    3
    max(a :<T> , b:<T>){

    }
    +

    这个约束是什么?

    +

    约束变成
    max(<T> ,<T>)

    +

    作用:
    我们的约束变更加范化了,这个如果按照编译原理或者解空间来说,我们可选的映射更多了.

    +

    泛型的作用:和类的继承差不多,因为继承既是优点也是缺点.

    +

    泛型这个约束也是既有优点也有缺点,泛型的优点是更加接近无类型类型,所以缺点也是大家会滥用无类型的内容

    +

    就像继承一样,其实很多继承是没有必要的,或者重构的继承非常难.

    +

    说到底,如果我们写了一个通用的轮子,如果和多地方用到了这个轮子,那么如果这个轮子经常更改,就需要考虑用到这个轮子的相关代码是不是要兼容

    +

    如果这个轮子被共用的地方少,那么就不用兼容那么多

    +

    所以我们抽象就比如面临改动频繁的缺点

    + + +
    + + + + +
    +
    +
    +
    + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    结构和性质是同构的吗?

    +

    约束和结构是同构的吗?

    +

    我一直觉得,我们之所以很难维护好业务代码,是因为我们的约束和我们的业务不是同构的,我们一个又一个函数在传递,大部分时间都工作得很好,但是总有一些误差经过传递之后会被触发.

    +

    为什么代码庞大之后很难改?
    因为经过传递之后依赖太多值了.对于一个函数来说(a , b) -> (c) , 一开始只依赖a和b

    +

    过了几天,我们改了a的依赖,a依赖于d,e 然后就这样了(d,e) -> (a)

    +

    这样一直迭代后,依赖就没人能理清了

    + + +
    + + + + +
    +
    +
    +
    + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + + +

    tree

    A free tree T is an undirected graph that is connected and acyclic.

    +

    树有三个属性:

    +
      +
    • 无向
    • +
    • 连通
    • +
    • 无环
    • +
    +

    定义

    一共有两个参数: kh

    +

    性质

    1 每个叶子节点的高度都一样
    2.1 除了叶子节点根节点,其他节点至少有k+1个子节点
    2.2 根节点是叶节点或者根节点至少有两个子节点.
    3 每个节点最多有2k+1个子节点

    +

    核心性质

    1
    2
    v ∈ Parent(p)   有
    <a+1
    + + +

    插入

    插入方式有几种:
    1 插入到第一个大于他的节点的同一个页
    2 和大于他的最小节点同一个页

    +

    删除

    查询

    + +
    + + + + +
    +
    +
    +
    + + + + + + + + + + +
    + + + + +
    + + + + + + + + +
    +
    + +
    + +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/page/19/index.html b/page/19/index.html new file mode 100644 index 0000000000..721a6a2654 --- /dev/null +++ b/page/19/index.html @@ -0,0 +1,1207 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + dinosaur + + + + + + + + + + + + +
    +
    + +
    +
    + + + + + +
    + + + + + + + + +
    + +
    + +
    +
    + + +
    + + 0% +
    + + +
    +
    +
    + + +
    + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    mvcc论文
    http://www.cs.cmu.edu/~pavlo/papers/p781-wu.pdf

    +

    zhihu相关内容:
    https://zhuanlan.zhihu.com/p/45734268

    +

    mvcc是什么?

    mvcc是多版本并发控制

    +

    数学基础:

      +
    • 偏序 (part order ) : 偏序则部分元素可以互相比较
    • +
    • 全序 (full order ) : 全序描述的是每个元素都可以比较
    • +
    +

    complete mutipversion history

    complete mv history 满足下面性质:

    +
      +
    • +
    • for each and all operations in , if , then
    • +
    +

    相关阅读

    + + +
    + + + + +
    +
    +
    +
    + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    数理逻辑我是没有学过的,但是感觉很像编译原理的前端

    + + +
    + + + + +
    +
    +
    +
    + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    1 编译原理ssa
    2 tensorflow
    3 vue原理
    4 nlp parser
    5 es的search

    +
    +

    2020-11-17
    我感觉好像什么东西都离不开编译原理,数理逻辑,是我的错觉吗?

    + + +
    + + + + +
    +
    +
    +
    + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    最大生成树

    初始化:
    放入一个node

    +

    循环不变量:
    是整颗最大生成树的子集

    +

    将最大权的放入节点,更新最大权

    +

    为什么work

    如果放入的该节点不是最大的生成树的子节点,那么加起来权是最小的

    +

    mlp

    训练

    + +
    + + + + +
    +
    +
    +
    + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    感知机是一个输出是+1 和-1两个值的分类器

    +

    多层感知机
    输入: 上一层的输出
    处理: 感知函数
    输出: 经过感知函数处理的值

    + + +
    + + + + +
    +
    +
    +
    + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    控制流图

    入口和出口

    1
    2
    3
    4
    入口 --->   判断 ---> 出口
    | |
    | |
    |____|
    +

    对于每个判断,有两个入口: 第一次入口 , 后续的入口
    对于每个判断,有两个出口:exit跳出循环, 继续循环

    +

    1 第一次入口满足断言
    2 每次判断继续循环满足断言

    +

    我们可以得出结论:
    出口必然满足断言

    +
    1
    2
    3
    - 判断不改变断言
    - 每个入口都满足断言
    可以推出exit满足断言
    + +

    如何形式化证明

    如果证明,或者如何抽象出这个证明或者我们找到一个同构的问题

    +

    循环不变式核心是 满足约束:
    1 初始化条件满足断言
    2 每次迭代后满足断言
    3 循环是可计算的(不是死循环)

    +

    其实3只是为了不会死循环,核心条件是1和2

    + + +
    + + + + +
    +
    +
    +
    + + + + + + + + + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    一点也不懂nlp的我

    +

    吉布斯分布

    +

    Hammersley-Clifford_Theorem

    +

    损失函数

    +

    crf

    +

    计算参数的方式

    crf 的参数非常多,怎么求呢?
    通过最大似然估计

    +

    最大似然函数的本质是什么?

    +

    本质就是: 已知统计的分布 , 那么我们假定我们统计到的数据是最大可能出现的.那么这个最大的概率对应的参数就是我们想要的参数

    +

    拟牛顿法

    +

    标注方式

    +

    函数

    1
    2
    Y = { B , M , E , S}
    X = {今,天,天,气,真, 热}
    + +

    liner crf 参数求解

    +

    如何训练

      +
    • 标注
      我们要怎么标注呢?
      比如我有两个人民日报的句子
    • +
    +
    1
    2
    3
    4
    5
    // 句子1 
    全总/j 致/v 全国/n 各族/r 职工/n 慰问信/n
    // 句子2
    勉励/v 广大/b 职工/n 发挥/v 工人阶级/n 主力军/n 作用/n ,/w 为/p 企业/n 改革/vn 发展/vn 建功立业/l

    + +

    怎么标注呢

    +

    下面是例子

    +
    1
    2
    3
    4
    // 句子1
    全/B 总/E 致/S 全/B 国/E 各/B 族/E 职/B 工/E 慰/B 问/M 信/E
    // 句子2
    勉/B 励/E 广/B 大E 职/B 工/E 发/B 挥/E 工/B 人/M 阶/M 级/E 主/B 力/M 军/E 作/B 用/E ,/S 为/S 企/B 业/E 改/B 革/E 发/B 展/E 建/B 功/M 立/M 业/E
    + +

    参数估计

    + + +
    + + + + +
    +
    +
    +
    + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    分词

    什么是分词,分词是一个分类问题,一般是基于权重判断是否是需要切分.机器是识别不了文字的,所以只是一个权重的切分

    +

    分词会发生在两个步骤: 写入doc , 查询query

    +

    在lucene的堆栈一般是这样的,最后调用的是incrementToken 接口

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    incrementToken:147, StandardTokenizer (org.apache.lucene.analysis.standard)
    incrementToken:37, LowerCaseFilter (org.apache.lucene.analysis)
    incrementToken:51, FilteringTokenFilter (org.apache.lucene.analysis)
    fillCache:91, CachingTokenFilter (org.apache.lucene.analysis)
    incrementToken:70, CachingTokenFilter (org.apache.lucene.analysis)
    createFieldQuery:318, QueryBuilder (org.apache.lucene.util)
    createFieldQuery:257, QueryBuilder (org.apache.lucene.util)
    newFieldQuery:468, QueryParserBase (org.apache.lucene.queryparser.classic)
    getFieldQuery:457, QueryParserBase (org.apache.lucene.queryparser.classic)
    handleBareTokenQuery:824, QueryParserBase (org.apache.lucene.queryparser.classic)
    Term:494, QueryParser (org.apache.lucene.queryparser.classic)
    Clause:366, QueryParser (org.apache.lucene.queryparser.classic)
    Query:251, QueryParser (org.apache.lucene.queryparser.classic)
    TopLevelQuery:223, QueryParser (org.apache.lucene.queryparser.classic)
    parse:136, QueryParserBase (org.apache.lucene.queryparser.classic)
    + +

    搜索

    搜索的原理是 倒排+权重 ,然后取出权重最高的前几个,所以也可以看成是一个权重分类问题.

    +

    高可用

    // todo

    +

    冗余

    // todo

    +

    错误转移

    // todo

    +

    lucence

    lucence 的源码有简单的例子,主要分成三个部分

    +
    1
    2
    3
    4
    5
    6
    7
    - 1 索引
    - 1.1分词
    - 2 存储
    - Lucene有很多类,不过我抽象成存储应该不过分,这个我没有仔细看

    - 3 搜索
    - 3.1 计算权重(一般是idf-td)
    +

    我还没用仔细看es的内容,不过根据我编译原理的理解,es就是在上面加一层parse然后转换成相应的操作

    +

    相关阅读

    + + +
    + + + + +
    +
    +
    +
    + + + + + + + + + + + + + + + + + + +
    + + + + +
    + + + + + + + + +
    +
    + +
    + +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/page/2/index.html b/page/2/index.html new file mode 100644 index 0000000000..695d569e4a --- /dev/null +++ b/page/2/index.html @@ -0,0 +1,1180 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + dinosaur + + + + + + + + + + + + +
    +
    + +
    +
    + + + + + +
    + + + + + + + + +
    + +
    + +
    +
    + + +
    + + 0% +
    + + +
    +
    +
    + + +
    + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    背景

      +
    • jdk版本:jdk11
    • +
    +

    之前会请求一个php的内部商品接口服务,现在切换成java的商品接口服务,但是java的代码很多边界有问题,导致oom

    +

    现象

    cpu 飙升100% ,内存飙升100%,然后直接挂了

    +

    +

    排查

    开始排查

    日志:显示是oom

    +

    开始发现没有dump文件,添加dump文件

    +
    1
    -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=/home/admin/logs/jvmlogs/java.hprof
    + +

    然后分析,整个堆有个400m和100m的大对象,整个堆也就1G,这两个对象就已经占了50%+ 了

    +

    +

    最后确认是sql有问题,把整个表都查出来了

    + + +
    + + + + +
    +
    +
    +
    + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    背景

    线上有个服务,本来请求路径是
    PHP -> NGINX -> PHP
    需要做项目迁移,迁移到
    PHP-> NGINX -> JAVA, 也就是将请求的服务从php改成请求java

    +

    排查

    错误复现

    上线后php请求java的接口报错cURL error 18: transfer closed with outstanding read data remaining , 排查之后发现java有个socket rest

    +

    排查java 错误

    查看java日志,线上发现有rst错误,请求只有不够1秒,但是被rst掉了,所以排除了接口太慢导致超时的问题

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    38
    39
    40
    41
    42
    43
    44
    45
    46
    47
    48
    49
    50
    51
    52
    53
    54
    55
    56
    57
    58
    59
    60
    61
    62
    63
    64
    65
    66
    67
    68
    69
    70
    71
    72
    73
    74
    75
    76
    77
    78
    79
    80
    81
    82
    83
    84
    85
    86
    87
    88
    89
    90
    91
    92
    93
    94
    95
    96
    97
    98
    99
    100
    101
    102
    103
    104
    105
    106
    107
    108
    109
    110
    111
    112
    113
    114
    115
    116
    117
    118
    119
    120
    121
    122
    123
    124
    125
    126
    127
    128
    129
    130
    131
    132
    133
    134
    135
    136
    137
    138
    139
    140
    141
    142
    143
    144
    145
    146
    147
    148
    149
    150
    151
    152
    153
    154
    155
    156
    157
    158
    159
    160
    161
    162
    163
    164
    165
    166
    167
    168
    169
    170
    171
    172
    173
    174
    175
    176
    177
    178
    179
    180
    181
    182
    183
    184
    185
    186
    187
    188
    189
    190
    org.apache.catalina.connector.ClientAbortException: java.io.IOException: Connection reset by peer

    at org.apache.catalina.connector.OutputBuffer.realWriteBytes(OutputBuffer.java:353)

    at org.apache.catalina.connector.OutputBuffer.flushByteBuffer(OutputBuffer.java:783)

    at org.apache.catalina.connector.OutputBuffer.append(OutputBuffer.java:688)

    at org.apache.catalina.connector.OutputBuffer.writeBytes(OutputBuffer.java:388)

    at org.apache.catalina.connector.OutputBuffer.write(OutputBuffer.java:366)

    at org.apache.catalina.connector.CoyoteOutputStream.write(CoyoteOutputStream.java:96)

    at org.springframework.util.StreamUtils$NonClosingOutputStream.write(StreamUtils.java:287)

    at com.fasterxml.jackson.core.json.UTF8JsonGenerator._flushBuffer(UTF8JsonGenerator.java:2161)

    at com.fasterxml.jackson.core.json.UTF8JsonGenerator._writeStringSegment2(UTF8JsonGenerator.java:1476)

    at com.fasterxml.jackson.core.json.UTF8JsonGenerator._writeStringSegment(UTF8JsonGenerator.java:1423)

    at com.fasterxml.jackson.core.json.UTF8JsonGenerator._writeStringSegments(UTF8JsonGenerator.java:1306)

    at com.fasterxml.jackson.core.json.UTF8JsonGenerator.writeString(UTF8JsonGenerator.java:502)

    at com.fasterxml.jackson.databind.ser.std.StringSerializer.serialize(StringSerializer.java:41)

    at com.fasterxml.jackson.databind.ser.BeanPropertyWriter.serializeAsField(BeanPropertyWriter.java:728)

    at com.fasterxml.jackson.databind.ser.std.BeanSerializerBase.serializeFields(BeanSerializerBase.java:755)

    at com.fasterxml.jackson.databind.ser.BeanSerializer.serialize(BeanSerializer.java:178)

    at com.fasterxml.jackson.databind.ser.std.MapSerializer.serializeFields(MapSerializer.java:726)

    at com.fasterxml.jackson.databind.ser.std.MapSerializer.serializeWithoutTypeInfo(MapSerializer.java:681)

    at com.fasterxml.jackson.databind.ser.std.MapSerializer.serialize(MapSerializer.java:637)

    at com.fasterxml.jackson.databind.ser.std.MapSerializer.serialize(MapSerializer.java:33)

    at com.fasterxml.jackson.databind.ser.BeanPropertyWriter.serializeAsField(BeanPropertyWriter.java:728)

    at com.fasterxml.jackson.databind.ser.std.BeanSerializerBase.serializeFields(BeanSerializerBase.java:755)

    at com.fasterxml.jackson.databind.ser.BeanSerializer.serialize(BeanSerializer.java:178)

    at com.fasterxml.jackson.databind.ser.impl.IndexedListSerializer.serializeContents(IndexedListSerializer.java:119)

    at com.fasterxml.jackson.databind.ser.impl.IndexedListSerializer.serialize(IndexedListSerializer.java:79)

    at com.fasterxml.jackson.databind.ser.impl.IndexedListSerializer.serialize(IndexedListSerializer.java:18)

    at com.fasterxml.jackson.databind.ser.BeanPropertyWriter.serializeAsField(BeanPropertyWriter.java:728)

    at com.fasterxml.jackson.databind.ser.std.BeanSerializerBase.serializeFields(BeanSerializerBase.java:755)

    at com.fasterxml.jackson.databind.ser.BeanSerializer.serialize(BeanSerializer.java:178)

    at com.fasterxml.jackson.databind.ser.DefaultSerializerProvider._serialize(DefaultSerializerProvider.java:480)

    at com.fasterxml.jackson.databind.ser.DefaultSerializerProvider.serializeValue(DefaultSerializerProvider.java:319)

    at com.fasterxml.jackson.databind.ObjectWriter$Prefetch.serialize(ObjectWriter.java:1516)

    at com.fasterxml.jackson.databind.ObjectWriter.writeValue(ObjectWriter.java:1006)

    at org.springframework.http.converter.json.AbstractJackson2HttpMessageConverter.writeInternal(AbstractJackson2HttpMessageConverter.java:346)

    at org.springframework.http.converter.AbstractGenericHttpMessageConverter.write(AbstractGenericHttpMessageConverter.java:104)

    at org.springframework.web.servlet.mvc.method.annotation.AbstractMessageConverterMethodProcessor.writeWithMessageConverters(AbstractMessageConverterMethodProcessor.java:277)

    at org.springframework.web.servlet.mvc.method.annotation.RequestResponseBodyMethodProcessor.handleReturnValue(RequestResponseBodyMethodProcessor.java:181)

    at org.springframework.web.method.support.HandlerMethodReturnValueHandlerComposite.handleReturnValue(HandlerMethodReturnValueHandlerComposite.java:82)

    at org.springframework.web.servlet.mvc.method.annotation.ServletInvocableHandlerMethod.invokeAndHandle(ServletInvocableHandlerMethod.java:123)

    at org.springframework.web.servlet.mvc.method.annotation.RequestMappingHandlerAdapter.invokeHandlerMethod(RequestMappingHandlerAdapter.java:878)

    at org.springframework.web.servlet.mvc.method.annotation.RequestMappingHandlerAdapter.handleInternal(RequestMappingHandlerAdapter.java:792)

    at org.springframework.web.servlet.mvc.method.AbstractHandlerMethodAdapter.handle(AbstractHandlerMethodAdapter.java:87)

    at org.springframework.web.servlet.DispatcherServlet.doDispatch(DispatcherServlet.java:1040)

    at org.springframework.web.servlet.DispatcherServlet.doService(DispatcherServlet.java:943)

    at org.springframework.web.servlet.FrameworkServlet.processRequest(FrameworkServlet.java:1006)

    at org.springframework.web.servlet.FrameworkServlet.doPost(FrameworkServlet.java:909)

    at javax.servlet.http.HttpServlet.service(HttpServlet.java:652)

    at org.springframework.web.servlet.FrameworkServlet.service(FrameworkServlet.java:883)

    at javax.servlet.http.HttpServlet.service(HttpServlet.java:733)

    at org.apache.catalina.core.ApplicationFilterChain.internalDoFilter(ApplicationFilterChain.java:227)

    at org.apache.catalina.core.ApplicationFilterChain.doFilter(ApplicationFilterChain.java:162)

    at org.apache.tomcat.websocket.server.WsFilter.doFilter(WsFilter.java:53)

    at org.apache.catalina.core.ApplicationFilterChain.internalDoFilter(ApplicationFilterChain.java:189)

    at org.apache.catalina.core.ApplicationFilterChain.doFilter(ApplicationFilterChain.java:162)

    at org.springframework.web.filter.CharacterEncodingFilter.doFilterInternal(CharacterEncodingFilter.java:201)

    at org.springframework.web.filter.OncePerRequestFilter.doFilter(OncePerRequestFilter.java:119)

    at org.apache.catalina.core.ApplicationFilterChain.internalDoFilter(ApplicationFilterChain.java:189)

    at org.apache.catalina.core.ApplicationFilterChain.doFilter(ApplicationFilterChain.java:162)

    at org.apache.catalina.core.StandardWrapperValve.invoke(StandardWrapperValve.java:202)

    at org.apache.catalina.core.StandardContextValve.invoke(StandardContextValve.java:97)

    at org.apache.catalina.authenticator.AuthenticatorBase.invoke(AuthenticatorBase.java:542)

    at org.apache.catalina.core.StandardHostValve.invoke(StandardHostValve.java:143)

    at org.apache.catalina.valves.ErrorReportValve.invoke(ErrorReportValve.java:92)

    at org.apache.catalina.core.StandardEngineValve.invoke(StandardEngineValve.java:78)

    at org.apache.catalina.connector.CoyoteAdapter.service(CoyoteAdapter.java:357)

    at org.apache.coyote.http11.Http11Processor.service(Http11Processor.java:374)

    at org.apache.coyote.AbstractProcessorLight.process(AbstractProcessorLight.java:65)

    at org.apache.coyote.AbstractProtocol$ConnectionHandler.process(AbstractProtocol.java:893)

    at org.apache.tomcat.util.net.NioEndpoint$SocketProcessor.doRun(NioEndpoint.java:1707)

    at org.apache.tomcat.util.net.SocketProcessorBase.run(SocketProcessorBase.java:49)

    at java.base/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1128)

    at java.base/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:628)

    at org.apache.tomcat.util.threads.TaskThread$WrappingRunnable.run(TaskThread.java:61)

    at java.base/java.lang.Thread.run(Thread.java:834)

    Caused by: java.io.IOException: Connection reset by peer

    at java.base/sun.nio.ch.FileDispatcherImpl.write0(Native Method)

    at java.base/sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:47)

    at java.base/sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:113)

    at java.base/sun.nio.ch.IOUtil.write(IOUtil.java:79)

    at java.base/sun.nio.ch.IOUtil.write(IOUtil.java:50)

    at java.base/sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:466)

    at org.apache.tomcat.util.net.NioChannel.write(NioChannel.java:135)

    at org.apache.tomcat.util.net.NioBlockingSelector.write(NioBlockingSelector.java:118)

    at org.apache.tomcat.util.net.NioSelectorPool.write(NioSelectorPool.java:151)

    at org.apache.tomcat.util.net.NioEndpoint$NioSocketWrapper.doWrite(NioEndpoint.java:1367)

    at org.apache.tomcat.util.net.SocketWrapperBase.doWrite(SocketWrapperBase.java:766)

    at org.apache.tomcat.util.net.SocketWrapperBase.writeBlocking(SocketWrapperBase.java:586)

    at org.apache.tomcat.util.net.SocketWrapperBase.write(SocketWrapperBase.java:530)

    at org.apache.coyote.http11.Http11OutputBuffer$SocketOutputBuffer.doWrite(Http11OutputBuffer.java:546)

    at org.apache.coyote.http11.filters.IdentityOutputFilter.doWrite(IdentityOutputFilter.java:84)

    at org.apache.coyote.http11.Http11OutputBuffer.doWrite(Http11OutputBuffer.java:193)

    at org.apache.coyote.Response.doWrite(Response.java:606)

    at org.apache.catalina.connector.OutputBuffer.realWriteBytes(OutputBuffer.java:340)

    ... 73 common frames omitted

    +

    排查nginx日志

    proxy_temp permission denied , 所以问题找到了,就是nginx权限有问题,没有权限创建proxy_temp 文件

    +

    原因: 每个请求都会分配一个页的缓冲区.如果超过一个页8kb/16kb,就会将内容存到proxy_temp文件里面,因为没有权限创建这个文件,导致请求直接被断开

    +

    +

    解决方式

    当前方案:
    修改到当前nginx的用户组可以创建proxy_temp文件

    +

    可选方案:
    如果不想写文件,嫌弃写文件有性能问题,可以调节

    +
      +
    • proxy_max_temp_file_size
    • +
    • proxy_buffer_size
    • +
    • proxy_buffers
    • +
    +
    1
    When buffering is enabled, nginx receives a response from the proxied server as soon as possible, saving it into the buffers set by the proxy_buffer_size and proxy_buffers directives. If the whole response does not fit into memory, a part of it can be saved to a temporary file on the disk. Writing to temporary files is controlled by the proxy_max_temp_file_size and proxy_temp_file_write_size directives.
    + + + +

    上线后:

    +

    相关阅读

    + + +
    + + + + +
    +
    +
    +
    + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    背景

    熟悉elastic search

    +

    构建

    1
    ./gradlew localDistro
    + +
      +
    • 第一步: 关闭安全相关检查,我本地是http,不需要https
      1
      2
      3
      4
      ### config/elasticsearch.yml 的这个选项改成false , 这样可以关闭https校验
      # Enable security features
      xpack.security.enabled: false

    • +
    • 改代码
      elasticsearch/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/ServerProcess.java
      添加jdb相关参数:
      1
      2
      3
      // also pass through distribution type
      jvmOptions.add("-Des.distribution.type=" + processInfo.sysprops().get("es.distribution.type"));
      jvmOptions.add("-agentlib:jdwp=transport=dt_socket,server=y,address=9999"); // 添加这一行 , 让jdb可以调试
    • +
    • 运行elasticsearch , 这时候会卡在启动的时候,等待jdb连接
    • +
    +
    1
    ./elasticsearch
    + + +
      +
    • 使用jdb调试
    • +
    +

    使用gradle 拉取代码之后,需要获取lucene的代码,并解压到/home/ubuntu/lucene目录

    +
    1
    jar -xf /home/ubuntu/.gradle/caches/modules-2/files-2.1/org.apache.lucene/lucene-core/9.7.0/35359f1763c9d7a0f04188c4933311be3c07b60e/lucene-core-9.7.0-sources.jar
    + +
    1
    2
    ## 这里/home/dai/ 是我放elasticsearch 的地方 , `/home/ubuntu/.gradle/caches/modules-2/files-2.1/org.apache.lucene/lucene-core/9.7.0/35359f1763c9d7a0f04188c4933311be3c07b60e/` 是我的gradle 的默认下载路径,可以自己用find去找
    jdb -attach 9999 -sourcepath /home/ubuntu/elasticsearch/distribution/tools/cli-launcher/src/main/java/:/home/ubuntu/elasticsearch/server/src/main/java/:/home/ubuntu/lucene/
    + +
      +
    • 断点
      1
      stop in org.elasticsearch.rest.action.search.RestSearchAction.prepareRequest
    • +
    +

    堆栈

    使用jdb调试:

    +

    断点是:stop in org.elasticsearch.rest.action.search.RestSearchAction.prepareRequest

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    38
    39
    40
    41
    42
    43
    44
    45
    46
    47
    48
    49
    50
    51
    52
    53
    54
    55
    Breakpoint hit: 
    Breakpoint hit: "thread=elasticsearch[myhost][transport_worker][T#5]", org.elasticsearch.rest.action.search.RestSearchAction.prepareRequest(), line=100 bci=0
    100 if (request.hasParam("min_compatible_shard_node")) {

    elasticsearch[myhost][transport_worker][T#5][1] where
    [1] org.elasticsearch.rest.action.search.RestSearchAction.prepareRequest (RestSearchAction.java:100)
    [2] org.elasticsearch.rest.BaseRestHandler.handleRequest (BaseRestHandler.java:80)
    [3] org.elasticsearch.xpack.security.rest.SecurityRestFilter.doHandleRequest (SecurityRestFilter.java:96)
    [4] org.elasticsearch.xpack.security.rest.SecurityRestFilter.handleRequest (SecurityRestFilter.java:76)
    [5] org.elasticsearch.rest.RestController.dispatchRequest (RestController.java:414)
    [6] org.elasticsearch.rest.RestController.tryAllHandlers (RestController.java:543)
    [7] org.elasticsearch.rest.RestController.dispatchRequest (RestController.java:316)
    [8] org.elasticsearch.http.AbstractHttpServerTransport.dispatchRequest (AbstractHttpServerTransport.java:453)
    [9] org.elasticsearch.http.AbstractHttpServerTransport.handleIncomingRequest (AbstractHttpServerTransport.java:549)
    [10] org.elasticsearch.http.AbstractHttpServerTransport.incomingRequest (AbstractHttpServerTransport.java:426)
    [11] org.elasticsearch.http.netty4.Netty4HttpPipeliningHandler.handlePipelinedRequest (Netty4HttpPipeliningHandler.java:128)
    [12] org.elasticsearch.http.netty4.Netty4HttpPipeliningHandler.channelRead (Netty4HttpPipeliningHandler.java:118)
    [13] io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead (AbstractChannelHandlerContext.java:442)
    [14] io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead (AbstractChannelHandlerContext.java:420)
    [15] io.netty.channel.AbstractChannelHandlerContext.fireChannelRead (AbstractChannelHandlerContext.java:412)
    [16] io.netty.handler.codec.MessageToMessageDecoder.channelRead (MessageToMessageDecoder.java:103)
    [17] io.netty.handler.codec.MessageToMessageCodec.channelRead (MessageToMessageCodec.java:111)
    [18] io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead (AbstractChannelHandlerContext.java:442)
    [19] io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead (AbstractChannelHandlerContext.java:420)
    [20] io.netty.channel.AbstractChannelHandlerContext.fireChannelRead (AbstractChannelHandlerContext.java:412)
    [21] io.netty.handler.codec.MessageToMessageDecoder.channelRead (MessageToMessageDecoder.java:103)
    [22] io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead (AbstractChannelHandlerContext.java:444)
    [23] io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead (AbstractChannelHandlerContext.java:420)
    [24] io.netty.channel.AbstractChannelHandlerContext.fireChannelRead (AbstractChannelHandlerContext.java:412)
    [25] io.netty.handler.codec.MessageToMessageDecoder.channelRead (MessageToMessageDecoder.java:103)
    [26] io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead (AbstractChannelHandlerContext.java:444)
    [27] io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead (AbstractChannelHandlerContext.java:420)
    [28] io.netty.channel.AbstractChannelHandlerContext.fireChannelRead (AbstractChannelHandlerContext.java:412)
    [29] io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead (ByteToMessageDecoder.java:346)
    [30] io.netty.handler.codec.ByteToMessageDecoder.channelRead (ByteToMessageDecoder.java:318)
    [31] io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead (AbstractChannelHandlerContext.java:444)
    [32] io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead (AbstractChannelHandlerContext.java:420)
    [33] io.netty.channel.AbstractChannelHandlerContext.fireChannelRead (AbstractChannelHandlerContext.java:412)
    [34] io.netty.handler.codec.MessageToMessageDecoder.channelRead (MessageToMessageDecoder.java:103)
    [35] io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead (AbstractChannelHandlerContext.java:444)
    [36] io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead (AbstractChannelHandlerContext.java:420)
    [37] io.netty.channel.AbstractChannelHandlerContext.fireChannelRead (AbstractChannelHandlerContext.java:412)
    [38] io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead (DefaultChannelPipeline.java:1,410)
    [39] io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead (AbstractChannelHandlerContext.java:440)
    [40] io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead (AbstractChannelHandlerContext.java:420)
    [41] io.netty.channel.DefaultChannelPipeline.fireChannelRead (DefaultChannelPipeline.java:919)
    [42] io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read (AbstractNioByteChannel.java:166)
    [43] io.netty.channel.nio.NioEventLoop.processSelectedKey (NioEventLoop.java:788)
    [44] io.netty.channel.nio.NioEventLoop.processSelectedKeysPlain (NioEventLoop.java:689)
    [45] io.netty.channel.nio.NioEventLoop.processSelectedKeys (NioEventLoop.java:652)
    [46] io.netty.channel.nio.NioEventLoop.run (NioEventLoop.java:562)
    [47] io.netty.util.concurrent.SingleThreadEventExecutor$4.run (SingleThreadEventExecutor.java:997)
    [48] io.netty.util.internal.ThreadExecutorMap$2.run (ThreadExecutorMap.java:74)
    [49] java.lang.Thread.runWith (Thread.java:1,636)
    [50] java.lang.Thread.run (Thread.java:1,623)
    +

    query phase

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    elasticsearch[myhost][search][T#7][1] where
    [1] org.elasticsearch.search.query.QueryPhase.execute (QueryPhase.java:62)
    [2] org.elasticsearch.search.SearchService.loadOrExecuteQueryPhase (SearchService.java:516)
    [3] org.elasticsearch.search.SearchService.executeQueryPhase (SearchService.java:668)
    [4] org.elasticsearch.search.SearchService.lambda$executeQueryPhase$2 (SearchService.java:541)
    [5] org.elasticsearch.search.SearchService$$Lambda$7604/0x00007fceb1297320.get (null)
    [6] org.elasticsearch.action.ActionRunnable$2.accept (ActionRunnable.java:51)
    [7] org.elasticsearch.action.ActionRunnable$2.accept (ActionRunnable.java:48)
    [8] org.elasticsearch.action.ActionRunnable$3.doRun (ActionRunnable.java:73)
    [9] org.elasticsearch.common.util.concurrent.AbstractRunnable.run (AbstractRunnable.java:26)
    [10] org.elasticsearch.common.util.concurrent.TimedRunnable.doRun (TimedRunnable.java:33)
    [11] org.elasticsearch.common.util.concurrent.ThreadContext$ContextPreservingAbstractRunnable.doRun (ThreadContext.java:983)
    [12] org.elasticsearch.common.util.concurrent.AbstractRunnable.run (AbstractRunnable.java:26)
    [13] java.util.concurrent.ThreadPoolExecutor.runWorker (ThreadPoolExecutor.java:1,144)
    [14] java.util.concurrent.ThreadPoolExecutor$Worker.run (ThreadPoolExecutor.java:642)
    [15] java.lang.Thread.runWith (Thread.java:1,636)
    [16] java.lang.Thread.run (Thread.java:1,623)
    elasticsearch[myhost][search][T#7][1]
    + +

    查看reader

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    Breakpoint hit: "thread=elasticsearch[myhost][search][T#7]", org.elasticsearch.search.query.QueryPhase.addCollectorsAndSearch(), line=150 bci=0
    150 final ContextIndexSearcher searcher = searchContext.searcher();

    elasticsearch[myhost][search][T#7][1] next
    >
    Step completed: "thread=elasticsearch[myhost][search][T#7]", org.elasticsearch.search.query.QueryPhase.addCollectorsAndSearch(), line=151 bci=5
    151 final IndexReader reader = searcher.getIndexReader();

    elasticsearch[myhost][search][T#7][1] next
    >
    Step completed: "thread=elasticsearch[myhost][search][T#7]", org.elasticsearch.search.query.QueryPhase.addCollectorsAndSearch(), line=152 bci=10
    152 QuerySearchResult queryResult = searchContext.queryResult();

    elasticsearch[myhost][search][T#7][1] print reader
    reader = "ExitableDirectoryReader(FilterLeafReader(FieldUsageTrackingLeafReader(reader=FilterLeafReader(_0(9.7.0):c1:[diagnostics={timestamp=1694357055349, source=flush, lucene.version=9.7.0, os.version=6.2.0-31-generic, os.arch=amd64, os=Linux, java.vendor=Oracle Corporation, java.runtime.version=20.0.2+9-78}]:[attributes={Lucene90StoredFieldsFormat.mode=BEST_SPEED}] :id=bv125vla2ovjxnipt5j9ssmby))))"
    elasticsearch[myhost][search][T#7][1]
    + + +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    elasticsearch[myhost][search][T#7][1] print query
    query = "age:38"
    elasticsearch[myhost][search][T#7][1] step in
    >
    Step completed: "thread=elasticsearch[myhost][search][T#7]", org.elasticsearch.search.internal.ContextIndexSearcher.search(), line=340 bci=0
    340 final C firstCollector = collectorManager.newCollector();

    elasticsearch[myhost][search][T#7][1] where
    [1] org.elasticsearch.search.internal.ContextIndexSearcher.search (ContextIndexSearcher.java:340)
    [2] org.elasticsearch.search.query.QueryPhase.addCollectorsAndSearch (QueryPhase.java:206)
    [3] org.elasticsearch.search.query.QueryPhase.executeQuery (QueryPhase.java:134)
    [4] org.elasticsearch.search.query.QueryPhase.execute (QueryPhase.java:63)
    [5] org.elasticsearch.search.SearchService.loadOrExecuteQueryPhase (SearchService.java:516)
    [6] org.elasticsearch.search.SearchService.executeQueryPhase (SearchService.java:668)
    [7] org.elasticsearch.search.SearchService.lambda$executeQueryPhase$2 (SearchService.java:541)
    [8] org.elasticsearch.search.SearchService$$Lambda$7604/0x00007fceb1297320.get (null)
    [9] org.elasticsearch.action.ActionRunnable$2.accept (ActionRunnable.java:51)
    [10] org.elasticsearch.action.ActionRunnable$2.accept (ActionRunnable.java:48)
    [11] org.elasticsearch.action.ActionRunnable$3.doRun (ActionRunnable.java:73)
    [12] org.elasticsearch.common.util.concurrent.AbstractRunnable.run (AbstractRunnable.java:26)
    [13] org.elasticsearch.common.util.concurrent.TimedRunnable.doRun (TimedRunnable.java:33)
    [14] org.elasticsearch.common.util.concurrent.ThreadContext$ContextPreservingAbstractRunnable.doRun (ThreadContext.java:983)
    [15] org.elasticsearch.common.util.concurrent.AbstractRunnable.run (AbstractRunnable.java:26)
    [16] java.util.concurrent.ThreadPoolExecutor.runWorker (ThreadPoolExecutor.java:1,144)
    [17] java.util.concurrent.ThreadPoolExecutor$Worker.run (ThreadPoolExecutor.java:642)
    [18] java.lang.Thread.runWith (Thread.java:1,636)
    [19] java.lang.Thread.run (Thread.java:1,623)

    + + +

    bulkScorer 和Scorer

    核心变成了这两个函数:

    +
      +
    • bulkScorer
    • +
    • scorer
    • +
    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    BulkScorer bulkScorer = weight.bulkScorer(ctx);
    if (bulkScorer != null) {
    if (cancellable.isEnabled()) {
    bulkScorer = new CancellableBulkScorer(bulkScorer, cancellable::checkCancelled);
    }
    try {
    bulkScorer.score(leafCollector, liveDocs);
    } catch (CollectionTerminatedException e) {
    ...
    }
    + +

    这里的weight:

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    elasticsearch[ubuntu-Vostro-3690][search_worker][T#2][1] dump weight
    weight = {
    similarity: instance of org.elasticsearch.index.similarity.SimilarityService$PerFieldSimilarity(id=25223)
    simScorer: instance of org.apache.lucene.search.similarities.BM25Similarity$BM25Scorer(id=25224)
    termStates: instance of org.apache.lucene.index.TermStates(id=25225)
    scoreMode: instance of org.apache.lucene.search.ScoreMode(id=25226)
    $assertionsDisabled: true
    this$0: instance of org.apache.lucene.search.TermQuery(id=25227)
    org.apache.lucene.search.Weight.parentQuery: instance of org.apache.lucene.search.TermQuery(id=25227)
    }
    +

    搜索过程

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    elasticsearch[myhost][search_worker][T#5][1] list
    246 // float. And then monotonicity is preserved through composition via
    247 // x -> 1 + x and x -> 1 - 1/x.
    248 // Finally we expand weight * (1 - 1 / (1 + freq * 1/norm)) to
    249 // weight - weight / (1 + freq * 1/norm), which runs slightly faster.
    250 => float normInverse = cache[((byte) encodedNorm) & 0xFF];
    251 return weight - weight / (1f + freq * normInverse);
    252 }
    253
    254 @Override
    255 public Explanation explain(Explanation freq, long encodedNorm) {
    elasticsearch[myhost][search_worker][T#5][1] where
    [1] org.apache.lucene.search.similarities.BM25Similarity$BM25Scorer.score (BM25Similarity.java:250)
    [2] org.apache.lucene.search.LeafSimScorer.score (LeafSimScorer.java:60)
    [3] org.apache.lucene.search.TermScorer.score (TermScorer.java:75)
    [4] org.apache.lucene.search.TopScoreDocCollector$SimpleTopScoreDocCollector$1.collect (TopScoreDocCollector.java:73)
    [5] org.apache.lucene.search.Weight$DefaultBulkScorer.scoreRange (Weight.java:274)
    [6] org.apache.lucene.search.Weight$DefaultBulkScorer.score (Weight.java:254)
    [7] org.elasticsearch.search.internal.CancellableBulkScorer.score (CancellableBulkScorer.java:45)
    [8] org.apache.lucene.search.BulkScorer.score (BulkScorer.java:38)
    [9] org.elasticsearch.search.internal.ContextIndexSearcher.searchLeaf (ContextIndexSearcher.java:538)
    [10] org.elasticsearch.search.internal.ContextIndexSearcher.search (ContextIndexSearcher.java:480)
    [11] org.elasticsearch.search.internal.ContextIndexSearcher.lambda$search$4 (ContextIndexSearcher.java:396)
    [12] org.elasticsearch.search.internal.ContextIndexSearcher$$Lambda$7626/0x00007fceb12a4e58.call (null)
    [13] java.util.concurrent.FutureTask.run (FutureTask.java:317)
    [14] org.elasticsearch.common.util.concurrent.TimedRunnable.doRun (TimedRunnable.java:33)
    [15] org.elasticsearch.common.util.concurrent.ThreadContext$ContextPreservingAbstractRunnable.doRun (ThreadContext.java:983)
    [16] org.elasticsearch.common.util.concurrent.AbstractRunnable.run (AbstractRunnable.java:26)
    [17] java.util.concurrent.ThreadPoolExecutor.runWorker (ThreadPoolExecutor.java:1,144)
    [18] java.util.concurrent.ThreadPoolExecutor$Worker.run (ThreadPoolExecutor.java:642)
    [19] java.lang.Thread.runWith (Thread.java:1,636)
    [20] java.lang.Thread.run (Thread.java:1,623)
    + +

    es 常用 crud

      +
    • 写入

      +
      1
      curl -XPOST http://localhost:9200/test/_doc -H "Content-Type: application/json" -d  '{"name":"John Smith","age":"38"}'
      +
    • +
    • 查询

      +
      1
      curl -X GET  -H "Content-Type: application/json"    http://localhost:9200/test/_search -d '{"query":{"match":{"age":"38"}}}'
    • +
    +

    相关阅读

    + + +
    + + + + +
    +
    +
    +
    + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    背景

    了解各种分支跳转

    +

    注释

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    38
    39
    40
    41
    42
    43
    44
    45
    46
    47
    48
    49
    50
    51
    52
    53
    54
    55
    56
    57
    58
    59
    60
    61
    62
    63
    64
    65
    66
    67
    68
    69
    70
    71
    72
    73
    74
    75
    76
    77
    78
    79
    80
    81
    82
    83
    84
    85
    86
    87
    88
    89
    90
    91
    92
    93
    94
    95
    96
    97
    98
    99
    100
    101
    102
    103
    104
    105
    106
    107
    108
    109
    110
    111
    112
    113
    114
    115
    116
    117
    118
    119
    120
    121
    122
    123
    124
    125
    126
    127
    128
    129
    130
    131
    132
    133
    134
    135
    136
    137
    138
    139
    140
    141
    142
    143
    144
    145
    146
    147
    148
    149
    150
    151
    152
    153
    154
    155
    156
    157
    158
    159
    160
    161
    162
    163
    164
    165
    166
    167
    168
    169
    170
    171
    172
    173
    174
    175
    176
    177
    178
    179
    180
    181
    182
    183
    184
    185
    186
    187
    188
    189
    190
    191
    192
    193
    194
    195
    196
    197
    198
    199
    200
    201
    202
    203
    204
    205
    206
    207
    208
    209
    210
    211
    212
    213
    214
    215
    216
    217
    218
    219
    220
    221
    222
    223
    224
    225
    226
    227
    228
    229
    230
    231
    232
    233
    234
    235
    236
    237
    238
    239
    240
    241
    242
    243
    244
    245
    246
    247
    248
    249
    250
    251
    252
    253
    254
    255
    256
    257
    258
    259
    260
    261
    262
    263
    264
    265
    266
    267
    268
    269
    270
    271
    272
    273
    274
    275
    276
    277
    278
    279
    280
    281
    282
    283
    284
    285
    286
    287
    288
    289
    290
    291
    292
    293
    294
    295
    296
    297
    298
    299
    300
    301
    302
    303
    304
    305
    306
    (gdb) x/30i 0x7fffe10176ab

    0x7fffe10176ab: mov -0x18(%rbp),%rcx ## __ get_method(rcx); // rcx holds method , rbp-0x18 就是方法地址
    0x7fffe10176af: mov -0x28(%rbp),%rax ## __ profile_taken_branch(rax, rbx); // rax holds updated MDP, rbx 一直到 0x7fffe10176d0都是profile_taken_branch
    0x7fffe10176b3: test %rax,%rax
    0x7fffe10176b6: je 0x7fffe10176d4
    0x7fffe10176bc: mov 0x8(%rax),%rbx
    0x7fffe10176c0: add $0x1,%rbx
    0x7fffe10176c4: sbb $0x0,%rbx
    0x7fffe10176c8: mov %rbx,0x8(%rax)
    0x7fffe10176cc: add 0x10(%rax),%rax
    0x7fffe10176d0: mov %rax,-0x28(%rbp)
    0x7fffe10176d4: movswl 0x1(%r13),%edx ## __ movl(rdx, at_bcp(1)); r13指向当前要取的字节码指令的地址
    0x7fffe10176d9: bswap %edx ##__ bswapl(rdx);
    0x7fffe10176db: sar $0x10,%edx ## __ sarl(rdx, 16);
    0x7fffe10176de: movslq %edx,%rdx ## LP64_ONLY(__ movl2ptr(rdx, rdx));
    0x7fffe10176e1: add %rdx,%r13 ## __ addptr(rbcp, rdx); // Adjust the bcp in r13 by the displacement in rdx
    0x7fffe10176e4: test %edx,%edx ## 判断是不是 0 ,是0 就ZF=1 ,SF
    0x7fffe10176e6: jns 0x7fffe10176ec ## 大于等于0 , 跳转到0x7fffe10176ec
    0x7fffe10176ec: mov 0x20(%rcx),%rax
    0x7fffe10176ec: mov 0x20(%rcx),%rax ## __ movptr(rax, Address(rcx, Method::method_counters_offset()));
    0x7fffe10176f0: test %rax,%rax ## __ testptr(rax, rax);
    0x7fffe10176f3: jne 0x7fffe10176f9 ## ZF!=0 跳转, 也就是rax >0 跳转 __ jcc(Assembler::notZero, has_counters);
    0x7fffe10176f9: push %rdx ## __ push(rdx);
    0x7fffe10176fa: push %rcx ## __ push(rcx);
    0x7fffe10176fb: call 0x7fffe1017705 ## begin , __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::build_method_counters),
    rcx); 一直到0x7fffe1017828
    0x7fffe1017700: jmp 0x7fffe1017829
    0x7fffe1017705: mov %rcx,%rsi
    0x7fffe1017708: lea 0x8(%rsp),%rax
    0x7fffe101770d: mov %r13,-0x40(%rbp)
    0x7fffe1017711: cmpq $0x0,-0x10(%rbp)
    0x7fffe1017719: je 0x7fffe1017733
    0x7fffe101771f: movabs $0x7ffff71becc8,%rdi
    0x7fffe1017729: and $0xfffffffffffffff0,%rsp
    0x7fffe101772d:
    call 0x7ffff694f3c0 <_ZN14MacroAssembler7debug64EPclPl>
    0x7fffe1017732: hlt
    0x7fffe1017733: push %r10
    0x7fffe1017735:
    cmp 0x16aef7c4(%rip),%r12 # 0x7ffff7b06f00 <_ZN14CompressedOops11_narrow_oopE>
    0x7fffe101773c: je 0x7fffe1017756
    0x7fffe1017742: movabs $0x7ffff7311c28,%rdi
    0x7fffe101774c: and $0xfffffffffffffff0,%rsp
    0x7fffe1017750:
    call 0x7ffff694f3c0 <_ZN14MacroAssembler7debug64EPclPl>
    0x7fffe1017755: hlt
    0x7fffe1017756: pop %r10
    0x7fffe1017758: mov %r15,%rdi
    0x7fffe101775b: vzeroupper
    0x7fffe101775e: mov %rbp,0x2d0(%r15)
    0x7fffe1017765: mov %rax,0x2c0(%r15)
    0x7fffe101776c: test $0xf,%esp
    0x7fffe1017772: je 0x7fffe101778a
    0x7fffe1017778: sub $0x8,%rsp
    0x7fffe101777c:
    call 0x7ffff65d4a46 <_ZN18InterpreterRuntime21build_method_countersEP10JavaThreadP6Method>
    0x7fffe1017781: add $0x8,%rsp
    0x7fffe1017785: jmp 0x7fffe101778f
    0x7fffe101778a:
    call 0x7ffff65d4a46 <_ZN18InterpreterRuntime21build_method_countersEP10JavaThreadP6Method>
    0x7fffe101778f: push %rax
    0x7fffe1017790: push %rdi
    0x7fffe1017791: push %rsi
    0x7fffe1017792: push %rdx
    0x7fffe1017793: push %rcx
    0x7fffe1017794: push %r8
    0x7fffe1017796: push %r9
    0x7fffe1017798: push %r10
    0x7fffe101779a: push %r11
    0x7fffe101779c: test $0xf,%esp
    0x7fffe10177a2: je 0x7fffe10177ba
    0x7fffe10177a8: sub $0x8,%rsp
    0x7fffe10177ac: call 0x7ffff5d1c04e <_ZN6Thread7currentEv>
    0x7fffe10177b1: add $0x8,%rsp
    0x7fffe10177b5: jmp 0x7fffe10177bf
    0x7fffe10177ba: call 0x7ffff5d1c04e <_ZN6Thread7currentEv>
    0x7fffe10177bf: pop %r11
    0x7fffe10177c1: pop %r10
    0x7fffe10177c3: pop %r9
    0x7fffe10177c5: pop %r8
    0x7fffe10177c7: pop %rcx
    0x7fffe10177c8: pop %rdx
    0x7fffe10177c9: pop %rsi
    0x7fffe10177ca: pop %rdi
    0x7fffe10177cb: cmp %rax,%r15
    0x7fffe10177ce: je 0x7fffe10177e8
    0x7fffe10177d4: movabs $0x7ffff7311da0,%rdi
    0x7fffe10177de: and $0xfffffffffffffff0,%rsp
    0x7fffe10177e2:
    call 0x7ffff694f3c0 <_ZN14MacroAssembler7debug64EPclPl>
    0x7fffe10177e7: hlt
    0x7fffe10177e8: pop %rax
    0x7fffe10177e9: movq $0x0,0x2c0(%r15)
    0x7fffe10177f4: movq $0x0,0x2d0(%r15)
    0x7fffe10177ff: movq $0x0,0x2c8(%r15)
    0x7fffe101780a: vzeroupper
    0x7fffe101780d: cmpq $0x0,0x8(%r15)
    0x7fffe1017815: je 0x7fffe1017820
    0x7fffe101781b: jmp 0x7fffe1000c20
    0x7fffe1017820: mov -0x40(%rbp),%r13
    0x7fffe1017824: mov -0x38(%rbp),%r14
    0x7fffe1017828: ret ### end __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::build_method_counters),
    rcx);
    0x7fffe1017829: pop %rcx ### __ pop(rcx);
    0x7fffe101782a: pop %rdx ## __ pop(rdx);
    0x7fffe101782b: mov 0x20(%rcx),%rax ## __ movptr(rax, Address(rcx, Method::method_counters_offset()));
    0x7fffe101782f: test %rax,%rax ## __ testptr(rax, rax);
    0x7fffe1017832: je 0x7fffe1017838 ## __ jcc(Assembler::zero, dispatch);
    0x7fffe1017838: mov 0x18(%rcx),%rbx ## __ movptr(rbx, Address(rcx, in_bytes(Method::method_data_offset()))); // Are we profiling?
    0x7fffe101783c: test %rbx,%rbx ## __ testptr(rbx, rbx);
    0x7fffe101783f: je 0x7fffe1017841 ## __ jccb(Assembler::zero, no_mdo);
    0x7fffe1017841: mov 0x130(%rbx),%eax ## 一直到0x7fffe1017856 都是 __ increment_mask_and_jump(mdo_backedge_counter, increment, mask, rax, false, Assembler::zero,
    UseOnStackReplacement ? &backedge_counter_overflow : NULL);
    0x7fffe1017847: add $0x2,%eax
    0x7fffe101784a: mov %eax,0x130(%rbx)
    0x7fffe1017850: and 0x144(%rbx),%eax
    0x7fffe1017856: je 0x7fffe101785c
    0x7fffe101785c: jmp 0x7fffe1017861 ## __ jmp(dispatch);
    0x7fffe1017861: mov 0x20(%rcx),%rcx ## __ movptr(rcx, Address(rcx, Method::method_counters_offset()));
    0x7fffe1017865: mov 0x10(%rcx),%eax ### __ increment_mask_and_jump(Address(rcx, be_offset), increment, mask,rax, false, Assembler::zero, UseOnStackReplacement ? &backedge_counter_overflow : NULL);
    0x7fffe1017868: add $0x2,%eax
    0x7fffe101786b: mov %eax,0x10(%rcx)
    0x7fffe101786e: and 0x2c(%rcx),%eax
    0x7fffe1017871: je 0x7fffe1017877
    0x7fffe1017877: movzbl 0x0(%r13),%ebx ### // Pre-load the next target bytecode into rbx __ load_unsigned_byte(rbx, Address(rbcp, 0));
    0x7fffe101787c: testb $0x1,0x388(%r15) ### // continue with the bytecode @ target
    ### // rax: return bci for jsr's, unused otherwise
    ###// rbx: target bytecode
    ###// r13: target bcp
    ### // __ dispatch_only(vtos, true); 从0x7fffe101787c 到 0x7fffe101789c
    0x7fffe1017884: je 0x7fffe1017892
    0x7fffe1017886: movabs $0x7ffff7bd68a0,%r10
    0x7fffe1017890: jmp 0x7fffe101789c
    0x7fffe1017892: movabs $0x7ffff7bcc8a0,%r10
    0x7fffe101789c: jmp *(%r10,%rbx,8)
    0x7fffe10178a0: neg %rdx ## __ negptr(rdx);
    0x7fffe10178a3: add %r13,%rdx ## __ addptr(rdx, rbcp); // branch bcp
    0x7fffe10178a6: call 0x7fffe10178b0 ## 从 0x7fffe10178a6 到0x7fffe10179d3 都是call_VM
    ## __ call_VM(noreg,
    ## CAST_FROM_FN_PTR(address,
    ## InterpreterRuntime::frequency_counter_overflow),
    ## rdx);
    0x7fffe10178ab: jmp 0x7fffe10179d4
    0x7fffe10178b0: mov %rdx,%rsi
    0x7fffe10178b3: lea 0x8(%rsp),%rax
    0x7fffe10178b8: mov %r13,-0x40(%rbp)
    0x7fffe10178bc: cmpq $0x0,-0x10(%rbp)
    0x7fffe10178c4: je 0x7fffe10178de
    0x7fffe10178ca: movabs $0x7ffff71becc8,%rdi
    0x7fffe10178d4: and $0xfffffffffffffff0,%rsp
    0x7fffe10178d8: call 0x7ffff694f3c0 <_ZN14MacroAssembler7debug64EPclPl>
    0x7fffe10178dd: hlt
    0x7fffe10178de: push %r10
    0x7fffe10178e0: cmp 0x16aef619(%rip),%r12 # 0x7ffff7b06f00 <_ZN14CompressedOops11_narrow_oopE>
    0x7fffe10178e7: je 0x7fffe1017901
    0x7fffe10178ed: movabs $0x7ffff7311c28,%rdi
    0x7fffe10178f7: and $0xfffffffffffffff0,%rsp
    0x7fffe10178fb: call 0x7ffff694f3c0 <_ZN14MacroAssembler7debug64EPclPl>
    0x7fffe1017900: hlt
    0x7fffe1017901: pop %r10
    0x7fffe1017903: mov %r15,%rdi
    0x7fffe1017906: vzeroupper
    0x7fffe1017909: mov %rbp,0x2d0(%r15)
    0x7fffe1017910: mov %rax,0x2c0(%r15)
    0x7fffe1017917: test $0xf,%esp
    0x7fffe101791d: je 0x7fffe1017935
    0x7fffe1017923: sub $0x8,%rsp
    0x7fffe1017927: call 0x7ffff65d3eb4 <_ZN18InterpreterRuntime26frequency_counter_overflowEP10JavaThreadPh>
    0x7fffe101792c: add $0x8,%rsp
    0x7fffe1017930: jmp 0x7fffe101793a
    0x7fffe1017935: call 0x7ffff65d3eb4 <_ZN18InterpreterRuntime26frequency_counter_overflowEP10JavaThreadPh>
    0x7fffe101793a: push %rax
    0x7fffe101793b: push %rdi
    0x7fffe101793c: push %rsi
    0x7fffe101793d: push %rdx
    0x7fffe101793e: push %rcx
    0x7fffe101793f: push %r8
    0x7fffe1017941: push %r9
    0x7fffe1017943: push %r10
    0x7fffe1017945: push %r11
    0x7fffe1017947: test $0xf,%esp
    0x7fffe101794d: je 0x7fffe1017965
    0x7fffe1017953: sub $0x8,%rsp
    0x7fffe1017957: call 0x7ffff5d1c04e <_ZN6Thread7currentEv>
    0x7fffe101795c: add $0x8,%rsp
    0x7fffe1017960: jmp 0x7fffe101796a
    0x7fffe1017965: call 0x7ffff5d1c04e <_ZN6Thread7currentEv>
    0x7fffe101796a: pop %r11
    0x7fffe101796c: pop %r10
    0x7fffe101796e: pop %r9
    0x7fffe1017970: pop %r8
    0x7fffe1017972: pop %rcx
    0x7fffe1017973: pop %rdx
    0x7fffe1017974: pop %rsi
    0x7fffe1017975: pop %rdi
    0x7fffe1017976: cmp %rax,%r15
    0x7fffe1017979: je 0x7fffe1017993
    0x7fffe101797f: movabs $0x7ffff7311da0,%rdi
    0x7fffe1017989: and $0xfffffffffffffff0,%rsp
    0x7fffe101798d: call 0x7ffff694f3c0 <_ZN14MacroAssembler7debug64EPclPl>
    0x7fffe1017992: hlt
    0x7fffe1017993: pop %rax
    0x7fffe1017994: movq $0x0,0x2c0(%r15)
    0x7fffe101799f: movq $0x0,0x2d0(%r15)
    0x7fffe10179aa: movq $0x0,0x2c8(%r15)
    0x7fffe10179b5: vzeroupper
    0x7fffe10179b8: cmpq $0x0,0x8(%r15)
    0x7fffe10179c0: je 0x7fffe10179cb
    0x7fffe10179c6: jmp 0x7fffe1000c20
    0x7fffe10179cb: mov -0x40(%rbp),%r13
    0x7fffe10179cf: mov -0x38(%rbp),%r14
    0x7fffe10179d3: ret
    ## // rax: osr nmethod (osr ok) or NULL (osr not possible)
    ## // rdx: scratch
    ## // r14: locals pointer
    ## // r13: bcp
    0x7fffe10179d4: test %rax,%rax ## __ testptr(rax, rax); // test result
    0x7fffe10179d7: je 0x7fffe1017877 ## __ jcc(Assembler::zero, dispatch); // no osr if null
    0x7fffe10179dd: cmpb $0x0,0x14b(%rax) ## // nmethod may have been invalidated (VM may block upon call_VM return) __ cmpb(Address(rax, nmethod::state_offset()), nmethod::in_use);
    0x7fffe10179e4: jne 0x7fffe1017877 ## __ jcc(Assembler::notEqual, dispatch);
    ### // We have the address of an on stack replacement routine in rax.
    ### // In preparation of invoking it, first we must migrate the locals
    ### // and monitors from off the interpreter frame on the stack.
    ### // Ensure to save the osr nmethod over the migration call,
    ### // it will be preserved in rbx.
    0x7fffe10179ea: mov %rax,%rbx ### __ mov(rbx, rax);

    0x7fffe10179ed: call 0x7fffe10179f7 ### 从 0x7fffe10179ed 0x7fffe1017b17 都是 call_VM(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_begin));
    0x7fffe10179f2: jmp 0x7fffe1017b18
    0x7fffe10179f7: lea 0x8(%rsp),%rax
    0x7fffe10179fc: mov %r13,-0x40(%rbp)
    0x7fffe1017a00: cmpq $0x0,-0x10(%rbp)
    0x7fffe1017a08: je 0x7fffe1017a22
    0x7fffe1017a0e: movabs $0x7ffff71becc8,%rdi
    0x7fffe1017a18: and $0xfffffffffffffff0,%rsp
    0x7fffe1017a1c: call 0x7ffff694f3c0 <_ZN14MacroAssembler7debug64EPclPl>
    0x7fffe1017a21: hlt
    0x7fffe1017a22: push %r10
    0x7fffe1017a24: cmp 0x16aef4d5(%rip),%r12 # 0x7ffff7b06f00 <_ZN14CompressedOops11_narrow_oopE>
    0x7fffe1017a2b: je 0x7fffe1017a45
    0x7fffe1017a31: movabs $0x7ffff7311c28,%rdi
    0x7fffe1017a3b: and $0xfffffffffffffff0,%rsp
    0x7fffe1017a3f: call 0x7ffff694f3c0 <_ZN14MacroAssembler7debug64EPclPl>
    0x7fffe1017a44: hlt
    0x7fffe1017a45: pop %r10
    0x7fffe1017a47: mov %r15,%rdi
    0x7fffe1017a4a: vzeroupper
    0x7fffe1017a4d: mov %rbp,0x2d0(%r15)
    0x7fffe1017a54: mov %rax,0x2c0(%r15)
    0x7fffe1017a5b: test $0xf,%esp
    0x7fffe1017a61: je 0x7fffe1017a79
    0x7fffe1017a67: sub $0x8,%rsp
    0x7fffe1017a6b: call 0x7ffff6bcdb22 <_ZN13SharedRuntime19OSR_migration_beginEP10JavaThread>
    0x7fffe1017a70: add $0x8,%rsp
    0x7fffe1017a74: jmp 0x7fffe1017a7e
    0x7fffe1017a79: call 0x7ffff6bcdb22 <_ZN13SharedRuntime19OSR_migration_beginEP10JavaThread>
    0x7fffe1017a7e: push %rax
    0x7fffe1017a7f: push %rdi
    0x7fffe1017a80: push %rsi
    0x7fffe1017a81: push %rdx
    0x7fffe1017a82: push %rcx
    0x7fffe1017a83: push %r8
    0x7fffe1017a85: push %r9
    0x7fffe1017a87: push %r10
    0x7fffe1017a89: push %r11
    0x7fffe1017a8b: test $0xf,%esp
    0x7fffe1017a91: je 0x7fffe1017aa9
    0x7fffe1017a97: sub $0x8,%rsp
    0x7fffe1017a9b: call 0x7ffff5d1c04e <_ZN6Thread7currentEv>
    0x7fffe1017aa0: add $0x8,%rsp
    0x7fffe1017aa4: jmp 0x7fffe1017aae
    0x7fffe1017aa9: call 0x7ffff5d1c04e <_ZN6Thread7currentEv>
    0x7fffe1017aae: pop %r11
    0x7fffe1017ab0: pop %r10
    0x7fffe1017ab2: pop %r9
    0x7fffe1017ab4: pop %r8
    0x7fffe1017ab6: pop %rcx
    0x7fffe1017ab7: pop %rdx
    0x7fffe1017ab8: pop %rsi
    0x7fffe1017ab9: pop %rdi
    0x7fffe1017aba: cmp %rax,%r15
    0x7fffe1017abd: je 0x7fffe1017ad7
    0x7fffe1017ac3: movabs $0x7ffff7311da0,%rdi
    0x7fffe1017acd: and $0xfffffffffffffff0,%rsp
    0x7fffe1017ad1: call 0x7ffff694f3c0 <_ZN14MacroAssembler7debug64EPclPl>
    0x7fffe1017ad6: hlt
    0x7fffe1017ad7: pop %rax
    0x7fffe1017ad8: movq $0x0,0x2c0(%r15)
    0x7fffe1017ae3: movq $0x0,0x2d0(%r15)
    0x7fffe1017aee: movq $0x0,0x2c8(%r15)
    0x7fffe1017af9: vzeroupper
    0x7fffe1017afc: cmpq $0x0,0x8(%r15)
    0x7fffe1017b04: je 0x7fffe1017b0f
    0x7fffe1017b0a: jmp 0x7fffe1000c20
    0x7fffe1017b0f: mov -0x40(%rbp),%r13
    0x7fffe1017b13: mov -0x38(%rbp),%r14
    0x7fffe1017b17: ret
    0x7fffe1017b18: mov %rax,%rsi ## LP64_ONLY(__ mov(j_rarg0, rax));
    0x7fffe1017b1b: mov -0x8(%rbp),%rdx ## __ movptr(sender_sp, Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize)); // get sender sp
    0x7fffe1017b1f: leave ## __ leave(); // remove frame anchor
    0x7fffe1017b20: pop %rcx ## __ pop(retaddr); // get return address
    0x7fffe1017b21: mov %rdx,%rsp ## __ mov(rsp, sender_sp); // set sp to sender sp
    0x7fffe1017b24: and $0xfffffffffffffff0,%rsp ## // Ensure compiled code always sees stack at proper alignment __ andptr(rsp, -(StackAlignmentInBytes));
    0x7fffe1017b28: push %rcx ## // push the return address __ push(retaddr);
    0x7fffe1017b29: jmp *0xf8(%rbx) ## __ jmp(Address(rbx, nmethod::osr_entry_point_offset()));
    + +

    完整堆栈

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    38
    39
    40
    41
    42
    43
    44
    45
    46
    47
    48
    49
    50
    51
    52
    53
    54
    55
    56
    57
    58
    59
    60
    61
    62
    63
    64
    65
    66
    67
    68
    69
    70
    71
    (gdb) bt
    #0 TemplateTable::branch (is_jsr=false, is_wide=false)
    at /home/dai/jdk/src/hotspot/cpu/x86/templateTable_x86.cpp:2188
    #1 0x00007ffff6d74ce0 in TemplateTable::if_0cmp (cc=TemplateTable::equal)
    at /home/dai/jdk/src/hotspot/cpu/x86/templateTable_x86.cpp:2302
    #2 0x00007ffff6d66161 in Template::generate (
    this=0x7ffff7bd8ce0 <TemplateTable::_template_table+4896>,
    masm=0x7ffff0029588)
    at /home/dai/jdk/src/hotspot/share/interpreter/templateTable.cpp:62
    #3 0x00007ffff6d59a3c in TemplateInterpreterGenerator::generate_and_dispatch (
    this=0x7ffff59fea10,
    t=0x7ffff7bd8ce0 <TemplateTable::_template_table+4896>, tos_out=vtos)
    at /home/dai/jdk/src/hotspot/share/interpreter/templateInterpreterGenerator.cpp:380
    #4 0x00007ffff6d59572 in TemplateInterpreterGenerator::set_short_entry_points
    (this=0x7ffff59fea10,
    t=0x7ffff7bd8ce0 <TemplateTable::_template_table+4896>,
    bep=@0x7ffff59fe398: 0x7fffe1008f14 "H\277h\260N\367\377\177",
    cep=@0x7ffff59fe3a0: 0x7fffe1008f14 "H\277h\260N\367\377\177",
    sep=@0x7ffff59fe3a8: 0x7fffe1008f14 "H\277h\260N\367\377\177",
    aep=@0x7ffff59fe3b0: 0x7fffe1008f14 "H\277h\260N\367\377\177",
    iep=@0x7ffff59fe3b8: 0x7fffe1017627 "PSQRH\213M\330H\205\311\017\204g",
    lep=@0x7ffff59fe3c0: 0x7fffe1008f14 "H\277h\260N\367\377\177",
    fep=@0x7ffff59fe3c8: 0x7fffe1008f14 "H\277h\260N\367\377\177",
    --Type <RET> for more, q to quit, c to continue without paging--
    dep=@0x7ffff59fe3d0: 0x7fffe1008f14 "H\277h\260N\367\377\177",
    vep=@0x7ffff59fe3d8: 0x7fffe1017620 "\213\004$H\203\304\bPSQRH\213M\330H\205\311\017\204g")
    at /home/dai/jdk/src/hotspot/share/interpreter/templateInterpreterGenerator.cpp:344
    #5 0x00007ffff6d590ec in TemplateInterpreterGenerator::set_entry_points (
    this=0x7ffff59fea10, code=Bytecodes::_ifeq)
    at /home/dai/jdk/src/hotspot/share/interpreter/templateInterpreterGenerator.cpp:313
    #6 0x00007ffff6d58d4a in TemplateInterpreterGenerator::set_entry_points_for_all_bytes (this=0x7ffff59fea10)
    at /home/dai/jdk/src/hotspot/share/interpreter/templateInterpreterGenerator.cpp:269
    #7 0x00007ffff6d5893a in TemplateInterpreterGenerator::generate_all (
    this=0x7ffff59fea10)
    at /home/dai/jdk/src/hotspot/share/interpreter/templateInterpreterGenerator.cpp:227
    #8 0x00007ffff6d57259 in TemplateInterpreterGenerator::TemplateInterpreterGenerator (this=0x7ffff59fea10, _code=0x7ffff00febe0)
    at /home/dai/jdk/src/hotspot/share/interpreter/templateInterpreterGenerator.cpp:40
    #9 0x00007ffff6d55de4 in TemplateInterpreter::initialize_code ()
    at /home/dai/jdk/src/hotspot/share/interpreter/templateInterpreter.cpp:62
    --Type <RET> for more, q to quit, c to continue without paging--
    #10 0x00007ffff65cc48d in interpreter_init_code ()
    at /home/dai/jdk/src/hotspot/share/interpreter/interpreter.cpp:137
    #11 0x00007ffff65a6d94 in init_globals ()
    at /home/dai/jdk/src/hotspot/share/runtime/init.cpp:134
    #12 0x00007ffff6d8d1ca in Threads::create_vm (args=0x7ffff59fed50,
    canTryAgain=0x7ffff59fec5b)
    at /home/dai/jdk/src/hotspot/share/runtime/thread.cpp:2843
    #13 0x00007ffff66b243b in JNI_CreateJavaVM_inner (vm=0x7ffff59feda8,
    penv=0x7ffff59fedb0, args=0x7ffff59fed50)
    at /home/dai/jdk/src/hotspot/share/prims/jni.cpp:3613
    #14 0x00007ffff66b2787 in JNI_CreateJavaVM (vm=0x7ffff59feda8,
    penv=0x7ffff59fedb0, args=0x7ffff59fed50)
    at /home/dai/jdk/src/hotspot/share/prims/jni.cpp:3701
    #15 0x00007ffff7faca6a in InitializeJVM (pvm=0x7ffff59feda8,
    penv=0x7ffff59fedb0, ifn=0x7ffff59fee00)
    at /home/dai/jdk/src/java.base/share/native/libjli/java.c:1459
    #16 0x00007ffff7fa95ec in JavaMain (_args=0x7fffffffa9a0)
    at /home/dai/jdk/src/java.base/share/native/libjli/java.c:411
    #17 0x00007ffff7fb05ec in ThreadJavaMain (args=0x7fffffffa9a0)
    at /home/dai/jdk/src/java.base/unix/native/libjli/java_md.c:651
    #18 0x00007ffff7c94b43 in start_thread (arg=<optimized out>)
    at ./nptl/pthread_create.c:442
    #19 0x00007ffff7d26a00 in clone3 ()
    --Type <RET> for more, q to quit, c to continue without paging--
    at ../sysdeps/unix/sysv/linux/x86_64/clone3.S:81
    (gdb) info breakpoints
    Num Type Disp Enb Address What
    1 breakpoint keep y 0x00007ffff6d73852 in TemplateTable::branch(bool, bool) at /home/dai/jdk/src/hotspot/cpu/x86/templateTable_x86.cpp:2122
    breakpoint already hit 1 time
    (gdb) p _masm->_code_section->_end

    + + + +

    相关阅读

    + + +
    + + + + +
    +
    +
    +
    + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    背景

    了解jdk异常的捕获原理

    +

    堆栈

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    (gdb) bt
    #0 PosixSignals::pd_hotspot_signal_handler (sig=sig@entry=11, info=info@entry=0x7ffff7bfd330, uc=uc@entry=0x7ffff7bfd200, thread=0x7ffff00295a0) at /home/ubuntu/jdk/src/hotspot/os_cpu/linux_x86/os_linux_x86.cpp:201
    #1 0x00007ffff7090f7d in JVM_handle_linux_signal (abort_if_unrecognized=1, ucVoid=0x7ffff7bfd200, info=0x7ffff7bfd330, sig=11) at /home/ubuntu/jdk/src/hotspot/os/posix/signals_posix.cpp:656
    #2 JVM_handle_linux_signal (sig=11, info=0x7ffff7bfd330, ucVoid=0x7ffff7bfd200, abort_if_unrecognized=1) at /home/ubuntu/jdk/src/hotspot/os/posix/signals_posix.cpp:557
    #3 <signal handler called>
    #4 0x00007fffe8537640 in ?? ()
    #5 0x0000000000000246 in ?? ()
    #6 0x00007fffe8537734 in ?? ()
    #7 0x00007ffff79f1858 in ?? () from /home/ubuntu/jdk/build/linux-x86_64-server-fastdebug/jdk/lib/server/libjvm.so
    #8 0x00007ffff7bfe290 in ?? ()
    #9 0x00007ffff734777a in VM_Version::get_processor_features () at /home/ubuntu/jdk/src/hotspot/cpu/x86/vm_version_x86.cpp:803
    + +

    这里会返回true , 然后就跳过jdk的退出

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    38
    39
    40
    41
    42
    43
    44
    45
    46
    47
    48
    49
    50
    51
    52
    53
    54
    55
    56
    57
    58
    59
    60
    61
    62
    63
    64
    65
    66
    67
    68
    69
    70
    71
    72
    73
    74
    75
    76
    77
    78
    79
    80
    81
    82
    83
    84
    85
    86
    87
    88
    89
    90
    91
    92
    93
    94
    95
    96
    97
    98
    99
    100
    101
    102
    103
    104
    105
    106
    107
    108
    109
    110
    111
    112
    113
    114
    115
    116
    117
    118
    119
    120
    121
    122
    123
    124
    125
    126
    127
    128
    129
    130
    131
    132
    133
    134
    135
    136
    137
    138
    139
    140
    141
    142
    143
    144
    145
    146
    147
    148
    149
    150
    151
    152
    153
    154
    155
    156
    157
    158
    159
    160
    161
    162
    163
    164
    165
    166
    167
    168
    169
    170
    171
    172
    173
    174
    175
    176
    177
    178
    179
    180
    181
    182
    183
    184
    185
    186
    187
    188
    189
    190
    191
    192
    193
    194
    195
    196
    197
    198
    199
    200
    201
    202
    203
    204
    205
    206
    207
    208
    209
    210
    211
    212
    213
    214
    215
    bool PosixSignals::pd_hotspot_signal_handler(int sig, siginfo_t* info,
    ucontext_t* uc, JavaThread* thread) {

    /*
    NOTE: does not seem to work on linux.
    if (info == NULL || info->si_code <= 0 || info->si_code == SI_NOINFO) {
    // can't decode this kind of signal
    info = NULL;
    } else {
    assert(sig == info->si_signo, "bad siginfo");
    }
    */
    // decide if this trap can be handled by a stub
    address stub = NULL;

    address pc = NULL;

    //%note os_trap_1
    if (info != NULL && uc != NULL && thread != NULL) {
    pc = (address) os::Posix::ucontext_get_pc(uc);

    if (sig == SIGSEGV && info->si_addr == 0 && info->si_code == SI_KERNEL) {
    // An irrecoverable SI_KERNEL SIGSEGV has occurred.
    // It's likely caused by dereferencing an address larger than TASK_SIZE.
    return false;
    }

    // Handle ALL stack overflow variations here
    if (sig == SIGSEGV) {
    address addr = (address) info->si_addr;

    // check if fault address is within thread stack
    if (thread->is_in_full_stack(addr)) {
    // stack overflow
    if (os::Posix::handle_stack_overflow(thread, addr, pc, uc, &stub)) {
    return true; // continue
    }
    }
    }

    if ((sig == SIGSEGV) && VM_Version::is_cpuinfo_segv_addr(pc)) {
    // Verify that OS save/restore AVX registers.
    stub = VM_Version::cpuinfo_cont_addr();
    }

    if (thread->thread_state() == _thread_in_Java) {
    // Java thread running in Java code => find exception handler if any
    // a fault inside compiled code, the interpreter, or a stub

    if (sig == SIGSEGV && SafepointMechanism::is_poll_address((address)info->si_addr)) {
    stub = SharedRuntime::get_poll_stub(pc);
    } else if (sig == SIGBUS /* && info->si_code == BUS_OBJERR */) {
    // BugId 4454115: A read from a MappedByteBuffer can fault
    // here if the underlying file has been truncated.
    // Do not crash the VM in such a case.
    CodeBlob* cb = CodeCache::find_blob(pc);
    CompiledMethod* nm = (cb != NULL) ? cb->as_compiled_method_or_null() : NULL;
    bool is_unsafe_arraycopy = thread->doing_unsafe_access() && UnsafeCopyMemory::contains_pc(pc);
    if ((nm != NULL && nm->has_unsafe_access()) || is_unsafe_arraycopy) {
    address next_pc = Assembler::locate_next_instruction(pc);
    if (is_unsafe_arraycopy) {
    next_pc = UnsafeCopyMemory::page_error_continue_pc(pc);
    }
    stub = SharedRuntime::handle_unsafe_access(thread, next_pc);
    }
    }
    else

    #ifdef AMD64
    if (sig == SIGFPE &&
    (info->si_code == FPE_INTDIV || info->si_code == FPE_FLTDIV)) {
    stub =
    SharedRuntime::
    continuation_for_implicit_exception(thread,
    pc,
    SharedRuntime::
    IMPLICIT_DIVIDE_BY_ZERO);
    #else
    if (sig == SIGFPE /* && info->si_code == FPE_INTDIV */) {
    // HACK: si_code does not work on linux 2.2.12-20!!!
    int op = pc[0];
    if (op == 0xDB) {
    // FIST
    // TODO: The encoding of D2I in x86_32.ad can cause an exception
    // prior to the fist instruction if there was an invalid operation
    // pending. We want to dismiss that exception. From the win_32
    // side it also seems that if it really was the fist causing
    // the exception that we do the d2i by hand with different
    // rounding. Seems kind of weird.
    // NOTE: that we take the exception at the NEXT floating point instruction.
    assert(pc[0] == 0xDB, "not a FIST opcode");
    assert(pc[1] == 0x14, "not a FIST opcode");
    assert(pc[2] == 0x24, "not a FIST opcode");
    return true;
    } else if (op == 0xF7) {
    // IDIV
    stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_DIVIDE_BY_ZERO);
    } else {
    // TODO: handle more cases if we are using other x86 instructions
    // that can generate SIGFPE signal on linux.
    tty->print_cr("unknown opcode 0x%X with SIGFPE.", op);
    fatal("please update this code.");
    }
    #endif // AMD64
    } else if (sig == SIGSEGV &&
    MacroAssembler::uses_implicit_null_check(info->si_addr)) {
    // Determination of interpreter/vtable stub/compiled code null exception
    stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_NULL);
    }
    } else if ((thread->thread_state() == _thread_in_vm ||
    thread->thread_state() == _thread_in_native) &&
    (sig == SIGBUS && /* info->si_code == BUS_OBJERR && */
    thread->doing_unsafe_access())) {
    address next_pc = Assembler::locate_next_instruction(pc);
    if (UnsafeCopyMemory::contains_pc(pc)) {
    next_pc = UnsafeCopyMemory::page_error_continue_pc(pc);
    }
    stub = SharedRuntime::handle_unsafe_access(thread, next_pc);
    }

    // jni_fast_Get<Primitive>Field can trap at certain pc's if a GC kicks in
    // and the heap gets shrunk before the field access.
    if ((sig == SIGSEGV) || (sig == SIGBUS)) {
    address addr = JNI_FastGetField::find_slowcase_pc(pc);
    if (addr != (address)-1) {
    stub = addr;
    }
    }
    }

    #ifndef AMD64
    // Execution protection violation
    //
    // This should be kept as the last step in the triage. We don't
    // have a dedicated trap number for a no-execute fault, so be
    // conservative and allow other handlers the first shot.
    //
    // Note: We don't test that info->si_code == SEGV_ACCERR here.
    // this si_code is so generic that it is almost meaningless; and
    // the si_code for this condition may change in the future.
    // Furthermore, a false-positive should be harmless.
    if (UnguardOnExecutionViolation > 0 &&
    stub == NULL &&
    (sig == SIGSEGV || sig == SIGBUS) &&
    uc->uc_mcontext.gregs[REG_TRAPNO] == trap_page_fault) {
    int page_size = os::vm_page_size();
    address addr = (address) info->si_addr;
    address pc = os::Posix::ucontext_get_pc(uc);
    // Make sure the pc and the faulting address are sane.
    //
    // If an instruction spans a page boundary, and the page containing
    // the beginning of the instruction is executable but the following
    // page is not, the pc and the faulting address might be slightly
    // different - we still want to unguard the 2nd page in this case.
    //
    // 15 bytes seems to be a (very) safe value for max instruction size.
    bool pc_is_near_addr =
    (pointer_delta((void*) addr, (void*) pc, sizeof(char)) < 15);
    bool instr_spans_page_boundary =
    (align_down((intptr_t) pc ^ (intptr_t) addr,
    (intptr_t) page_size) > 0);

    if (pc == addr || (pc_is_near_addr && instr_spans_page_boundary)) {
    static volatile address last_addr =
    (address) os::non_memory_address_word();

    // In conservative mode, don't unguard unless the address is in the VM
    if (addr != last_addr &&
    (UnguardOnExecutionViolation > 1 || os::address_is_in_vm(addr))) {

    // Set memory to RWX and retry
    address page_start = align_down(addr, page_size);
    bool res = os::protect_memory((char*) page_start, page_size,
    os::MEM_PROT_RWX);

    log_debug(os)("Execution protection violation "
    "at " INTPTR_FORMAT
    ", unguarding " INTPTR_FORMAT ": %s, errno=%d", p2i(addr),
    p2i(page_start), (res ? "success" : "failed"), errno);
    stub = pc;

    // Set last_addr so if we fault again at the same address, we don't end
    // up in an endless loop.
    //
    // There are two potential complications here. Two threads trapping at
    // the same address at the same time could cause one of the threads to
    // think it already unguarded, and abort the VM. Likely very rare.
    //
    // The other race involves two threads alternately trapping at
    // different addresses and failing to unguard the page, resulting in
    // an endless loop. This condition is probably even more unlikely than
    // the first.
    //
    // Although both cases could be avoided by using locks or thread local
    // last_addr, these solutions are unnecessary complication: this
    // handler is a best-effort safety net, not a complete solution. It is
    // disabled by default and should only be used as a workaround in case
    // we missed any no-execute-unsafe VM code.

    last_addr = addr;
    }
    }
    }
    #endif // !AMD64

    if (stub != NULL) {
    // save all thread context in case we need to restore it
    if (thread != NULL) thread->set_saved_exception_pc(pc);

    os::Posix::ucontext_set_pc(uc, stub);
    return true; ///////////////////////////////////////// 这里会是true
    }

    return false;
    }
    + +

    相关阅读

    + + +
    + + + + +
    +
    +
    +
    + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    背景

    想比较两个数字的时候:Long的equal比较的值,而不是对象的地址

    +
    1
    2
    3
    4
    5
    6
    7
    public boolean equals(Object obj) {
    if (obj instanceof Long) {
    return this.value == (Long)obj;
    } else {
    return false;
    }
    }
    + +

    为什么?

    +

    因为涉及到binary numeric promotion

    +

    jls 文档

    相关文档

    +
    1
    2
    3
    4
    5
    6
    // "==" 操作符 的jls 文档

    15.21.1 Numerical Equality Operators == and !=
    If the operands of an equality operator are both of numeric type, or one is of
    numeric type and the other is convertible (§5.1.8) to numeric type, binary numeric
    promotion is performed on the operands (§5.6.2).
    + +
    1
    2
    3
    4
    5
    6
    7
    // Binary Numeric Promotion 文档

    5.6.2 Binary Numeric Promotion
    When an operator applies binary numeric promotion to a pair of operands, each
    of which must denote a value that is convertible to a numeric type, the following
    rules apply, in order:
    1. If any operand is of a reference type, it is subjected to unboxing conversion
    + +
    1
    2
    3
    4
    5
    6
    // == 会触发 Binary Numeric Promotion
    Binary numeric promotion is performed on the operands of certain operators:
    • The multiplicative operators *, /, and % (§15.17)
    • The addition and subtraction operators for numeric types + and - (§15.18.2)
    • The numerical comparison operators <, <=, >, and >= (§15.20.1)
    • The numerical equality operators == and != (§15.21.1)
    + +

    所以

    +
    1
    2
    3
    4
    5
    6
    7
    8
    @Test
    public void testEq(){
    Long i = new Long(1000L);
    Long j = new Long(1000L);
    Assert.assertFalse(i == j); // 两个都是对象 , 不满足Binary Numeric Promotion 条件 ,所以不会拆箱 , 所以比较的是地址

    Assert.assertTrue(i == 1000L); // 这里 满足 Binary Numeric Promotion 条件 , 所以比较的是值
    }
    + + +
    + + + + +
    +
    +
    +
    + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    背景

    使用cloudflare 搭建了一个vitepress 的静态页面,页面重复了,然后chrome devtool有下面的错误Hydration completed but contains mismatches

    +

    解决方式

    Auto Minify 里面去掉css/js/html 勾选

    + + +
    + + + + +
    +
    +
    +
    + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    main 函数介绍

    java的main函数在入口函数
    一般都是这个签名

    +
    1
    2
    3
    public static void main(String[] argv){

    }
    + +

    那么这个main函数是怎么加载的呢?

    +

    调用时机:

    jni_invoke_static

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    (gdb) p method._value->print()
    {method}
    - this oop: 0x00007fffb44112d8
    - method holder: 'Hello'
    - constants: 0x00007fffb4411030 constant pool [34] {0x00007fffb4411030} for 'Hello' cache=0x00007fffb44113e0
    - access: 0x9 public static
    - name: 'main'
    - signature: '([Ljava/lang/String;)V'
    - max stack: 3
    - max locals: 1
    - size of params: 1
    - method size: 13
    - vtable index: -2
    - i2i entry: 0x00007fffe100dc00 /////////// entity_point
    - adapters: AHE@0x00007ffff01015d0: 0xb i2c: 0x00007fffe1114d60 c2i: 0x00007fffe1114e1a c2iUV: 0x00007fffe1114de4 c2iNCI: 0x00007fffe1114e57
    - compiled entry 0x00007fffe1114e1a
    - code size: 13
    - code start: 0x00007fffb44112c0 // 这里是bytecode 的起点
    - code end (excl): 0x00007fffb44112cd // 这是bytecode的终点
    - checked ex length: 0
    - linenumber start: 0x00007fffb44112cd
    - localvar length: 0
    $7 = void

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    (gdb) info registers 
    rax 0x7ffff59fe940 140737314285888
    rbx 0x7fffe1000c9e 140736968264862
    rcx 0x7fffb44112d8 140736217551576
    rdx 0xa 10
    rsi 0x7ffff59febf8 140737314286584
    rdi 0x7ffff59fe940 140737314285888
    rbp 0x7ffff59fe870 0x7ffff59fe870
    rsp 0x7ffff59fe810 0x7ffff59fe810
    r8 0x7fffe100dc00 140736968317952 // 这里就是入口点
    r9 0x7ffff59feaf0 140737314286320
    r10 0x7ffff053ae20 140737225403936
    r11 0x7ffff0000090 140737219920016
    r12 0x1 1
    r13 0x0 0
    r14 0x7ffff7c94850 140737350551632
    r15 0x7fffffffa800 140737488332800
    rip 0x7fffe1000ca6 0x7fffe1000ca6
    eflags 0x202 [ IF ]
    cs 0x33 51
    ss 0x2b 43
    ds 0x0 0
    es 0x0 0
    fs 0x0 0
    gs 0x0 0
    + +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    $2 = void
    (gdb) where
    #0 JavaCalls::call_helper (result=0x7ffff7bfec10, method=..., args=0x7ffff7bfeb30, __the_thread__=0x7ffff00295a0) at /home/ubuntu/jdk/src/hotspot/share/runtime/javaCalls.cpp:333
    #1 0x00007ffff6799785 in jni_invoke_static (result=result@entry=0x7ffff7bfec10, method_id=method_id@entry=0x7ffff02c84d0, args=args@entry=0x7ffff7bfec80, __the_thread__=__the_thread__@entry=0x7ffff00295a0, env=0x7ffff00298d0,
    call_type=JNI_STATIC, receiver=0x0) at /home/ubuntu/jdk/src/hotspot/share/prims/jni.cpp:889
    #2 0x00007ffff679cd19 in jni_CallStaticVoidMethod (env=0x7ffff00298d0, cls=<optimized out>, methodID=0x7ffff02c84d0) at /home/ubuntu/jdk/src/hotspot/share/prims/jni.cpp:1713
    #3 0x00007ffff7fadcb5 in JavaMain (_args=<optimized out>) at /home/ubuntu/jdk/src/java.base/share/native/libjli/java.c:547
    #4 0x00007ffff7fb0f4d in ThreadJavaMain (args=<optimized out>) at /home/ubuntu/jdk/src/java.base/unix/native/libjli/java_md.c:651
    #5 0x00007ffff7c94b43 in start_thread (arg=<optimized out>) at ./nptl/pthread_create.c:442
    #6 0x00007ffff7d26a00 in clone3 () at ../sysdeps/unix/sysv/linux/x86_64/clone3.S:81
    (gdb
    + + +

    c++ 调用java static method方法

    调用generate_call_stub ,这是入口点,这个会调用method的entry_point

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    38
    39
    40
    41
    42
    43
    44
    45
    46
    47
    48
    49
    50
    51
    52
    53
    54
    55
    56
    57
    58
    59
    60
    61
    62
    63
    64
    65
    66
    67
    68
    69
    70
    71
    72
    73
    74
    75
    76
    77
    78
    79
    80
    81
    82
    83
    84
    85
    86
    87
    88
    89
    90
    91
    92
    93
    94
    95
    96
    97
    98
    99
    100
    101
    102
    103
    104
    105
    106
    107
    108
    109
    110
    111
    112
    113
    114
    115
    116
    117
    118
    119
    120
    121
    122
    123
    124
    125
    126
    127
    128
    129
    130
    131
    132
    133
    134
    135
    136
    137
    138
    139
    140
    141
    142
    143
    144
    145
    146
    147
    148
    149
    150
    151
    152
    153
    154
    155
    156
    157
    158
    159
    160
    161
    162
    163
    164
    165
    166
    167
    168
    169
    170
    171
    172
    173
    174
    175
    176
    177
    178
    179
    180
    181
    182
    183
    184
    185
    186
    187
    188
    189
    190
    191
    192
    193
    194
    195
    196
    197
    198
    199
    200
    201
    202
    203
    204
    205
    206
    207
    208
    209
    210
    211
    212
    213
    214
    215
    216
    217
    218
    219
    220
    221
    address StubGenerator::generate_call_stub(address& return_address) {

    assert((int)frame::entry_frame_after_call_words == -(int)rsp_after_call_off + 1 &&
    (int)frame::entry_frame_call_wrapper_offset == (int)call_wrapper_off,
    "adjust this code");
    StubCodeMark mark(this, "StubRoutines", "call_stub");
    address start = __ pc();

    // same as in generate_catch_exception()!
    const Address rsp_after_call(rbp, rsp_after_call_off * wordSize);

    const Address call_wrapper (rbp, call_wrapper_off * wordSize);
    const Address result (rbp, result_off * wordSize);
    const Address result_type (rbp, result_type_off * wordSize);
    const Address method (rbp, method_off * wordSize);
    const Address entry_point (rbp, entry_point_off * wordSize);
    const Address parameters (rbp, parameters_off * wordSize);
    const Address parameter_size(rbp, parameter_size_off * wordSize);

    // same as in generate_catch_exception()!
    const Address thread (rbp, thread_off * wordSize);

    const Address r15_save(rbp, r15_off * wordSize);
    const Address r14_save(rbp, r14_off * wordSize);
    const Address r13_save(rbp, r13_off * wordSize);
    const Address r12_save(rbp, r12_off * wordSize);
    const Address rbx_save(rbp, rbx_off * wordSize);

    // stub code
    __ enter();
    __ subptr(rsp, -rsp_after_call_off * wordSize);

    // save register parameters
    #ifndef _WIN64
    __ movptr(parameters, c_rarg5); // parameters
    __ movptr(entry_point, c_rarg4); // entry_point
    #endif

    __ movptr(method, c_rarg3); // method
    __ movl(result_type, c_rarg2); // result type
    __ movptr(result, c_rarg1); // result
    __ movptr(call_wrapper, c_rarg0); // call wrapper

    // save regs belonging to calling function
    __ movptr(rbx_save, rbx);
    __ movptr(r12_save, r12);
    __ movptr(r13_save, r13);
    __ movptr(r14_save, r14);
    __ movptr(r15_save, r15);

    #ifdef _WIN64
    int last_reg = 15;
    if (UseAVX > 2) {
    last_reg = 31;
    }
    if (VM_Version::supports_evex()) {
    for (int i = xmm_save_first; i <= last_reg; i++) {
    __ vextractf32x4(xmm_save(i), as_XMMRegister(i), 0);
    }
    } else {
    for (int i = xmm_save_first; i <= last_reg; i++) {
    __ movdqu(xmm_save(i), as_XMMRegister(i));
    }
    }

    const Address rdi_save(rbp, rdi_off * wordSize);
    const Address rsi_save(rbp, rsi_off * wordSize);

    __ movptr(rsi_save, rsi);
    __ movptr(rdi_save, rdi);
    #else
    const Address mxcsr_save(rbp, mxcsr_off * wordSize);
    {
    Label skip_ldmx;
    __ stmxcsr(mxcsr_save);
    __ movl(rax, mxcsr_save);
    __ andl(rax, 0xFFC0); // Mask out any pending exceptions (only check control and mask bits)
    ExternalAddress mxcsr_std(StubRoutines::x86::addr_mxcsr_std());
    __ cmp32(rax, mxcsr_std, rscratch1);
    __ jcc(Assembler::equal, skip_ldmx);
    __ ldmxcsr(mxcsr_std, rscratch1);
    __ bind(skip_ldmx);
    }
    #endif

    // Load up thread register
    __ movptr(r15_thread, thread);
    __ reinit_heapbase();

    #ifdef ASSERT
    // make sure we have no pending exceptions
    {
    Label L;
    __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), NULL_WORD);
    __ jcc(Assembler::equal, L);
    __ stop("StubRoutines::call_stub: entered with pending exception");
    __ bind(L);
    }
    #endif

    // pass parameters if any
    BLOCK_COMMENT("pass parameters if any");
    Label parameters_done;
    __ movl(c_rarg3, parameter_size);
    __ testl(c_rarg3, c_rarg3);
    __ jcc(Assembler::zero, parameters_done);

    Label loop;
    __ movptr(c_rarg2, parameters); // parameter pointer
    __ movl(c_rarg1, c_rarg3); // parameter counter is in c_rarg1
    __ BIND(loop);
    __ movptr(rax, Address(c_rarg2, 0));// get parameter
    __ addptr(c_rarg2, wordSize); // advance to next parameter
    __ decrementl(c_rarg1); // decrement counter
    __ push(rax); // pass parameter
    __ jcc(Assembler::notZero, loop);

    // call Java function
    __ BIND(parameters_done);
    __ movptr(rbx, method); // get Method*
    __ movptr(c_rarg1, entry_point); // get entry_point
    __ mov(r13, rsp); // set sender sp
    BLOCK_COMMENT("call Java function");
    __ call(c_rarg1);

    BLOCK_COMMENT("call_stub_return_address:");
    return_address = __ pc();

    // store result depending on type (everything that is not
    // T_OBJECT, T_LONG, T_FLOAT or T_DOUBLE is treated as T_INT)
    __ movptr(c_rarg0, result);
    Label is_long, is_float, is_double, exit;
    __ movl(c_rarg1, result_type);
    __ cmpl(c_rarg1, T_OBJECT);
    __ jcc(Assembler::equal, is_long);
    __ cmpl(c_rarg1, T_LONG);
    __ jcc(Assembler::equal, is_long);
    __ cmpl(c_rarg1, T_FLOAT);
    __ jcc(Assembler::equal, is_float);
    __ cmpl(c_rarg1, T_DOUBLE);
    __ jcc(Assembler::equal, is_double);

    // handle T_INT case
    __ movl(Address(c_rarg0, 0), rax);

    __ BIND(exit);

    // pop parameters
    __ lea(rsp, rsp_after_call);

    #ifdef ASSERT
    // verify that threads correspond
    {
    Label L1, L2, L3;
    __ cmpptr(r15_thread, thread);
    __ jcc(Assembler::equal, L1);
    __ stop("StubRoutines::call_stub: r15_thread is corrupted");
    __ bind(L1);
    __ get_thread(rbx);
    __ cmpptr(r15_thread, thread);
    __ jcc(Assembler::equal, L2);
    __ stop("StubRoutines::call_stub: r15_thread is modified by call");
    __ bind(L2);
    __ cmpptr(r15_thread, rbx);
    __ jcc(Assembler::equal, L3);
    __ stop("StubRoutines::call_stub: threads must correspond");
    __ bind(L3);
    }
    #endif

    __ pop_cont_fastpath();

    // restore regs belonging to calling function
    #ifdef _WIN64
    // emit the restores for xmm regs
    if (VM_Version::supports_evex()) {
    for (int i = xmm_save_first; i <= last_reg; i++) {
    __ vinsertf32x4(as_XMMRegister(i), as_XMMRegister(i), xmm_save(i), 0);
    }
    } else {
    for (int i = xmm_save_first; i <= last_reg; i++) {
    __ movdqu(as_XMMRegister(i), xmm_save(i));
    }
    }
    #endif
    __ movptr(r15, r15_save);
    __ movptr(r14, r14_save);
    __ movptr(r13, r13_save);
    __ movptr(r12, r12_save);
    __ movptr(rbx, rbx_save);

    #ifdef _WIN64
    __ movptr(rdi, rdi_save);
    __ movptr(rsi, rsi_save);
    #else
    __ ldmxcsr(mxcsr_save);
    #endif

    // restore rsp
    __ addptr(rsp, -rsp_after_call_off * wordSize);

    // return
    __ vzeroupper();
    __ pop(rbp);
    __ ret(0);

    // handle return types different from T_INT
    __ BIND(is_long);
    __ movq(Address(c_rarg0, 0), rax);
    __ jmp(exit);

    __ BIND(is_float);
    __ movflt(Address(c_rarg0, 0), xmm0);
    __ jmp(exit);

    __ BIND(is_double);
    __ movdbl(Address(c_rarg0, 0), xmm0);
    __ jmp(exit);

    return start;
    }
    +

    如何列出汇编代码

    1
    2
    // 列出从0x7fffe1000ca6 开始的100 个汇编指令
    x/100i 0x7fffe1000ca6
    + +

    如何用gdb断点地址

    1
    2
    (gdb) b *0x7fffe1000ca6

    + + +

    方法入口点

    1
    2
    3
    JavaCalls::call_helper
    -----> address entry_point = method->from_interpreted_entry();
    ---------> Atomic::load_acquire(&_from_interpreted_entry)
    + +

    执行方法的时候会调用 _from_interpreted_entry生成对应的栈以及上下文,其中寄存器r13会指向下一个bytecode ,
    然后通过r13读取下一个bytecode的例程,并执行对应例程

    +

    那么_from_interpreted_entry 是从哪里可以设置的?
    link_method会设置

    +
    1
    2
    3
    4
    5
    6
    void Method::link_method(const methodHandle& h_method, TRAPS) {
    ...
    address entry = Interpreter::entry_for_method(h_method);
    set_interpreter_entry(entry);
    ...
    }
    +

    这里的Interpreter::entry_for_method(h_method)是下面这个数组:

    +
    1
    AbstractInterpreter::_entry_table  
    + +

    那么_entry_table是在哪里设置呢?

    +

    在下面

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    void TemplateInterpreterGenerator::generate_all(){

    #define method_entry(kind) \
    { CodeletMark cm(_masm, "method entry point (kind = " #kind ")"); \
    Interpreter::_entry_table[Interpreter::kind] = generate_method_entry(Interpreter::kind); \
    }

    // all non-native method kinds
    method_entry(zerolocals) // 就是这里会设置AbstractInterpreter::_entry_table[Interpreter::zerolocals] = generate_method_entry(Interpreter::zerolocals)
    }
    + +

    这里生成的例程就是包括方法帧

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    38
    39
    40
    41
    42
    43
    44
    45
    46
    47
    48
    49
    50
    51
    52
    53
    54
    55
    56
    57
    58
    59
    60
    61
    62
    63
    64
    65
    66
    67
    68
    69
    70
    71
    72
    73
    74
    75
    76
    77
    78
    79
    80
    81
    82
    83
    84
    85
    86
    87
    88
    89
    90
    91
    92
    93
    94
    95
    96
    97
    98
    99
    100
    101
    102
    103
    104
    105
    106
    107
    108
    109
    110
    111
    112
    113
    114
    115
    116
    117
    118
    119
    120
    121
    122
    123
    124
    125
    126
    127
    128
    129
    130
    131
    132
    133
    134
    135
    136
    137
    138
    139
    140
    141
    142
    143
    144
    145
    146
    147
    148
    149
    150
    151
    152
    address TemplateInterpreterGenerator::generate_normal_entry(bool synchronized) {
    // determine code generation flags
    bool inc_counter = UseCompiler || CountCompiledCalls || LogTouchedMethods;

    // ebx: Method*
    // rbcp: sender sp
    address entry_point = __ pc();

    const Address constMethod(rbx, Method::const_offset());
    const Address access_flags(rbx, Method::access_flags_offset());
    const Address size_of_parameters(rdx,
    ConstMethod::size_of_parameters_offset());
    const Address size_of_locals(rdx, ConstMethod::size_of_locals_offset());


    // get parameter size (always needed)
    __ movptr(rdx, constMethod);
    __ load_unsigned_short(rcx, size_of_parameters);

    // rbx: Method*
    // rcx: size of parameters
    // rbcp: sender_sp (could differ from sp+wordSize if we were called via c2i )

    __ load_unsigned_short(rdx, size_of_locals); // get size of locals in words
    __ subl(rdx, rcx); // rdx = no. of additional locals

    // YYY
    // __ incrementl(rdx);
    // __ andl(rdx, -2);

    // see if we've got enough room on the stack for locals plus overhead.
    generate_stack_overflow_check();

    // get return address
    __ pop(rax);

    // compute beginning of parameters
    __ lea(rlocals, Address(rsp, rcx, Interpreter::stackElementScale(), -wordSize));

    // rdx - # of additional locals
    // allocate space for locals
    // explicitly initialize locals
    {
    Label exit, loop;
    __ testl(rdx, rdx);
    __ jcc(Assembler::lessEqual, exit); // do nothing if rdx <= 0
    __ bind(loop);
    __ push((int) NULL_WORD); // initialize local variables
    __ decrementl(rdx); // until everything initialized
    __ jcc(Assembler::greater, loop);
    __ bind(exit);
    }

    // initialize fixed part of activation frame
    generate_fixed_frame(false);

    // make sure method is not native & not abstract
    #ifdef ASSERT
    __ movl(rax, access_flags);
    {
    Label L;
    __ testl(rax, JVM_ACC_NATIVE);
    __ jcc(Assembler::zero, L);
    __ stop("tried to execute native method as non-native");
    __ bind(L);
    }
    {
    Label L;
    __ testl(rax, JVM_ACC_ABSTRACT);
    __ jcc(Assembler::zero, L);
    __ stop("tried to execute abstract method in interpreter");
    __ bind(L);
    }
    #endif

    // Since at this point in the method invocation the exception
    // handler would try to exit the monitor of synchronized methods
    // which hasn't been entered yet, we set the thread local variable
    // _do_not_unlock_if_synchronized to true. The remove_activation
    // will check this flag.

    const Register thread = NOT_LP64(rax) LP64_ONLY(r15_thread);
    NOT_LP64(__ get_thread(thread));
    const Address do_not_unlock_if_synchronized(thread,
    in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()));
    __ movbool(do_not_unlock_if_synchronized, true);

    __ profile_parameters_type(rax, rcx, rdx);
    // increment invocation count & check for overflow
    Label invocation_counter_overflow;
    if (inc_counter) {
    generate_counter_incr(&invocation_counter_overflow);
    }

    Label continue_after_compile;
    __ bind(continue_after_compile);

    // check for synchronized interpreted methods
    bang_stack_shadow_pages(false);

    // reset the _do_not_unlock_if_synchronized flag
    NOT_LP64(__ get_thread(thread));
    __ movbool(do_not_unlock_if_synchronized, false);

    // check for synchronized methods
    // Must happen AFTER invocation_counter check and stack overflow check,
    // so method is not locked if overflows.
    if (synchronized) {
    // Allocate monitor and lock method
    lock_method();
    } else {
    // no synchronization necessary
    #ifdef ASSERT
    {
    Label L;
    __ movl(rax, access_flags);
    __ testl(rax, JVM_ACC_SYNCHRONIZED);
    __ jcc(Assembler::zero, L);
    __ stop("method needs synchronization");
    __ bind(L);
    }
    #endif
    }

    // start execution
    #ifdef ASSERT
    {
    Label L;
    const Address monitor_block_top (rbp,
    frame::interpreter_frame_monitor_block_top_offset * wordSize);
    __ movptr(rax, monitor_block_top);
    __ cmpptr(rax, rsp);
    __ jcc(Assembler::equal, L);
    __ stop("broken stack frame setup in interpreter");
    __ bind(L);
    }
    #endif

    // jvmti support
    __ notify_method_entry();

    __ dispatch_next(vtos); //////// 生成方法帧和上下文执行 , 执行下一个bytecode

    // invocation counter overflow
    if (inc_counter) {
    // Handle overflow of counter and compile method
    __ bind(invocation_counter_overflow);
    generate_counter_overflow(continue_after_compile);
    }

    return entry_point;
    }
    + + + + +

    相关阅读

    + + +
    + + + + +
    +
    +
    +
    + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    背景

    jdk11 的时候,会抛出如下的报错:

    +
    1
    2
    3
    4
    5
    WARNING: An illegal reflective access operation has occurred
    WARNING: Illegal reflective access by org.codehaus.groovy.reflection.CachedClass (file:/D:/packageFile/org/codehaus/groovy/groovy/2.5.14/groovy-2.5.14.jar) to method java.lang.Object.finalize()
    WARNING: Please consider reporting this to the maintainers of org.codehaus.groovy.reflection.CachedClass
    WARNING: Use --illegal-access=warn to enable warnings of further illegal reflective access operations
    WARNING: All illegal access operations will be denied in a future release
    + + + +
    + + + + +
    +
    +
    +
    + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    背景

    ANTLR 是一个lex/parser 工具,类似与c的bison/yacc

    +

    语法

    +

    Token names always start with a capital letter and so do lexer rules as defined by Java’s Character.isUpperCase method. Parser rule names always start with a lowercase letter (those that fail Character.isUpperCase). The initial character can be followed by uppercase and lowercase letters, digits, and underscores. Here are some sample names:

    +
    +
    1
    2
    ID, LPAREN, RIGHT_CURLY // token names/lexer rules
    expr, simpleDeclarator, d2, header_file // parser rule names
    + +

    大写字母开头的都是token name 或者lexer rurles name
    消息的都是parser rule name

    +

    相关阅读

    + + +
    + + + + +
    +
    +
    +
    + + + + + + + + + + +
    + + + + +
    + + + + + + + + +
    +
    + +
    + +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/page/20/index.html b/page/20/index.html new file mode 100644 index 0000000000..86b86729da --- /dev/null +++ b/page/20/index.html @@ -0,0 +1,1066 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + dinosaur + + + + + + + + + + + + +
    +
    + +
    +
    + + + + + +
    + + + + + + + + +
    + +
    + +
    +
    + + +
    + + 0% +
    + + +
    +
    +
    + + +
    + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    什么是语言 ?

    inductively defined sets

    inductively defined sets 是由三部分组成

    +
      +
    • 1 一个初始集合
    • +
    • 2 一个生成规则
    • +
    • 3 声明除了这个1 2两个条件之外没有其他的元素属于这个集合
    • +
    +

    例子

    自然数集合

    +
    1
    {0 , 1 , 2 ...}
    +

    这个集合
    首先一个元素

    +
    1
    {0}
    +

    然后是规则

    +
    1
    suc(i) 
    +

    left join 和right join的区别?

    +

    我是大学学通信工程的,有些数学概念还是不太全,只能偶尔补一下啦.
    我一直找left joinright join的定义或者rfc文件.
    就像c语言看c89 c99一样,你看sql也可以看sql 99, 这个文档有描述什么是left join

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    Let XN1 and XN2 be effective distinct names for X1 and X2, respectively. Let TN be an effective
    name for T.
    Case:
    a) If INNER or <cross join> is specified, then let S be the multiset of rows of T.
    b) If LEFT is specified, then let S be the multiset of rows resulting from:
    SELECT FROM T
    UNION ALL
    SELECT FROM X1
    c) If RIGHT is specified, then let S be the multiset of rows resulting from:
    SELECT FROM T
    UNION ALL
    SELECT FROM X2
    + +

    相关阅读

    + + +
    + + + + +
    +
    +
    +
    + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + + + +
    + + + + +
    +
    +
    +
    + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + + + +
    + + + + +
    +
    +
    +
    + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + + + +
    + + + + +
    +
    +
    +
    + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    basic paxos

    目的:
    basic paxos 目的是为了让多个副本最多只有一个值.

    +

    paxos make simple

    有一篇论文,描述了basic paxos 的证明和推导过程,描述了prepare 过程的原理

    +

    p1 每个acceptor必须有接收第一个它收到的proposal

    +

    p2 当一个proposal的value 被chosen , 那么所有后续proposal 的值等于value

    +

    p2a 当一个proposal的value 被chosen , 那么后续所有acceptor接收的值等于value

    +

    p2b 当一个proposal的value被chosen , 那么后续所有proposer issue 的proposal number 对应的值等于value

    +

    p2c 对应数字n 和值v , 当acceptor 有一个最大集合S ,这个集合满足其中一个条件: 1 没有accetor 一个大于n的值 2 issue 的v 等于这个最大集合S中proposal number 最大的值

    +

    到p2c 的时候就是prepare的规则和条件了

    +

    每个guarantee的原因

    P1: 为了保证只有一个proposal也能chose a value
    P2: 为了保证多个被chosen的proposal 都有同样的值(We can allow multiple proposals to be chosen, but we must guarantee
    that all chosen proposals have the same value)
    P2a: 为了满足p2 , 我们给出当被chosen的时候,所有acceptor都具有被chosen的value
    P2b:为了满足p2a,我们给出,当被chosen的时候,所有issue的值都有被chseon的value
    P2c:

    +
      +
    • 大前提
      1
      2
      3
      assume that some proposal with number m and value
      v is chosen and show that any proposal issued with number n > m also
      has value v
    • +
    • 小前提
      1
      2
      assumption that every proposal issued with a number in m . . (n − 1) has
      value v , where i . . j denotes the set of numbers from i through j
    • +
    +

    相关阅读:

    + + + +
    + + + + +
    +
    +
    +
    + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    双向绑定是什么?
    这个问题我一直很疑惑,直到我了解了同构和双射

    +

    所以双向绑定的本质就是视图和数据同构?

    +

    从某种角度上来说,这也是一个米田引理的应用?

    + + +
    + + + + +
    +
    +
    +
    + + + + + + + + + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    haskell 有个高阶函数curry

    +

    我们可以通过:t identify来确定标识符的类型

    +

    我们使用haskellcli看看

    +
    1
    2
    Prelude> :t curry
    curry :: ((a, b) -> c) -> a -> b -> c
    + +

    这是什么意思呢?
    我还没用懂

    +
    1
    2
    Lectures on Curry-Howard isomorphism[Sørensen & Urzyczyn 2006]Derivation and computation[Simmons 2000]Proofs and types[Girard, Lafont, Taylor 1996]

    + + + + + +
    + + + + +
    +
    +
    +
    + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    什么是可扩展性?

    +

    我们先看有类型的情况

    +
    1
    2
    3
    func(int x, int y){
    return x+y
    }
    +

    这个时候输入的是两个整数返回的是一个整数 x->y->z

    +

    可扩展性需要用类型系统或者形式化去描述

    + + +
    + + + + +
    +
    +
    +
    + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    纸带

    state(状态)

    我看到有关状态的书籍是在可计算理论相关的书籍,或者类型系统相关的书籍.

    +

    展开

    f(f(A,B),f(D,E))
    f(A,B,D,E);
    循环依赖
    f(f)

    + + +
    + + + + +
    +
    +
    +
    + + + + + + + + + + +
    + + + + +
    + + + + + + + + +
    +
    + +
    + +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/page/21/index.html b/page/21/index.html new file mode 100644 index 0000000000..bc12310cde --- /dev/null +++ b/page/21/index.html @@ -0,0 +1,1091 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + dinosaur + + + + + + + + + + + + +
    +
    + +
    +
    + + + + + +
    + + + + + + + + +
    + +
    + +
    +
    + + +
    + + 0% +
    + + +
    +
    +
    + + +
    + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    什么是范畴(category)?

    +

    范畴就是两个对象+一个映射关系组成的东西

    +

    什么是函子(functor)?

    +

    函子就是两个范畴加上这两个范畴的映射关系而组成

    +

    也就是说函子就是范畴+两个范畴的映射

    +

    hom函子是什么(hom functor)?
    // todo

    +

    Set

    1
    2
    3
    4
    Set is the category with
    objects: all sets.
    arrows: given sets X, Y , every (total) set-function f : X -> Y is an
    arrow.
    + +

    type and function

    类型是范畴里面的对象
    函数是其中的箭头, 这样是对的吗?
    好像不太对,那就应该是错的,但是他们隐隐中有关系,但是不像是是范畴.

    +

    相关阅读

    + + +
    + + + + +
    +
    +
    +
    + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + + + +
    + + + + +
    +
    +
    +
    + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    我是理解能力有问题吗?

    +

    我真的不懂那些需求文档写的是什么,文档写的是什么.

    +

    文档应该是傻逼都能通过文档get到的才叫文档,
    难道我比傻逼还傻?

    + + +
    + + + + +
    +
    +
    +
    + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    https://segmentfault.com/a/1190000018331788

    +

    对于计算机来说,没有类型,没有函数,也没有oom,什么都没有

    +

    那么我们的程序是怎么来的呢?

    +

    映射和等价

    +

    https://bartoszmilewski.com/2015/10/28/yoneda-embedding/

    +

    我一直觉得sql的各种下推优化可以用米田引理或者范畴学来描述

    +

    最近在看一些范畴学的内容,我一直觉得我的逻辑很差,经常写bug,是不是我的逻辑太差呢?

    +

    我考虑的边界有问题?

    +

    是什么边界问题呢? 是我问题没有描述清楚还是什么呢?

    +

    还是需求提供者提的需求有问题呢?

    +

    范畴学真的很神奇

    +

    或者说,映射真的很神奇

    +

    如果我们要比较两个东西,那么我们先把他们映射成一个可比较的集合里面,然后他们就能比较了.

    +

    泛型是什么?

    +

    我一直想知道

    + + +
    + + + + +
    +
    +
    +
    + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    性质

    推导

    状态机(state machine)

    状态机是如何定义呢?

    +

    https://zh.wikipedia.org/zh-hans/%E6%9C%89%E9%99%90%E7%8A%B6%E6%80%81%E6%9C%BA

    +

    确认(ACK)

    假设A要向B发送消息,我们假设
    下面这个表达式:

    +
    1
    (number: int , message : string)   => ( acknumber: int )
    +

    含义是这样的:

    +
      +
    • 输入: 一个number + 这个number 对应的消息 ,
    • +
    • 返回: 如果B收到就把A的number原样发送回来
    • +
    +

    raft 选举

    raft的选举貌似和basic paxos过程差不多,那么basic paxosvalue是raft的选举的master节点的id

    +

    任期term如何生成

    选举的正确性(如何保证一个任期(term)内只有一个一个值,或者一个都没有)

    前提:
    1一个accept一个任期只能投票一次
    2 一个选举者能获取多数票
    需要证明:
    当且仅当获取多数票最多只有一个:

    +

    相关阅读

    + + +
    + + + + +
    +
    +
    +
    + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    php 的opcode 对应很多的handler

    +

    选哪个handler 是怎么选择的呢?

    +

    和tcp协议一个连接 是一个五元组一样

    +

    php的opcode 的handler 是一个三元组
    分别是 opcode , op1 , op2

    +

    核心函数

    +
    1
    2
    3
    4
    5
    6
    7
    8
    ZEND_API void ZEND_FASTCALL zend_vm_set_opcode_handler(zend_op* op)
    {
    zend_uchar opcode = zend_user_opcodes[op->opcode];

    ...
    // zend_opcode_handlers 是什么呢? 是一堆函数指针的数组 ,每个opcode + op1+ op2 决定一个 函数指针
    op->handler = zend_opcode_handlers[zend_vm_get_opcode_handler_idx(zend_spec_handlers[opcode], op)];
    }
    +

    所以核心的核心就是

    +
    1
    zend_vm_get_opcode_handler_idx(zend_spec_handlers[opcode], op)
    +

    他做了什么呢?
    其实就是算出zend_opcode_handlers这个函数指针数组的偏移值

    + + +
    + + + + +
    +
    +
    +
    + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    前言

    写了很多mysql的sql,看了很多blog,兜兜转转,发现很多都是不太严谨的.
    很多解释也是有点盲人摸象的感觉,不能说是错但是有些片面.

    +

    BNF和indeductly define set

    1
    select  name from  table where table.file =1;
    +

    三值

    + +
    + + + + +
    +
    +
    +
    + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    代码是一堆字符串

    +

    代码会映射操作 然后这个就是一个编译的过程

    +

    所以编译就是一个映射的过程

    +

    很多编程语言其实都很像的,比如php和sql ,php的函数会放到一个全局的function_table 的hashmap里面,然后可以被调用.
    key是函数名,value是op_array

    +

    而mysql的内部函数也很类似,注册到一个hashmap里面:key是函数名,value则是相应的指针.

    +

    而类的加载方面,java和php也差不多.java的加载class其实就是反序列化的过程,然后放到内存,而php的opcache从某种程度上也是那种效果了

    + + +
    + + + + +
    +
    +
    +
    + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    我很蠢,什么都不会,代码也不会写

    +

    我只是喜欢一个我不存在的东西

    +

    我觉得写工作的代码很恶心

    +

    但是我喜欢修bug

    +

    最近最开心的是给php-src提了两个pr并且通过了 ,但是我还是不会写php,我也不会c,我也背不过php的array系列的函数.

    +

    说到底,我还是太弱了

    +

    我其实不会写代码,但是会修bug.

    +

    因为修bug是体力活,写代码是脑力活

    +

    我果然很笨,什么都不会 只会写一堆bug

    +

    准备看看能不能修mysql的代码

    +

    没有银弹 No Silver Bullet

    +

    我真的会写代码吗? 我其实什么都不懂吧

    +

    我真的什么都不懂,有点怀疑自己这几年是不是只会copy and plaste 了

    + + +
    + + + + +
    +
    +
    +
    + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    golang 的 lex和parse 在src\cmd\compile\internal\gc\main.go开始

    +

    核心步骤

    +
      +
    • parseFiles lex and parse
    • +
    • typecheck 语法树的遍历做类型检查
    • +
    • ssa
    • +
    +

    之后会经过link 连接和加载器ld

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    38
    39
    40
    41
    42
    43
    44
    45
    46
    47
    48
    49
    50
    51
    52
    53
    54
    55
    56
    57
    58
    59
    60
    61
    func Main(archInit func(*Arch)) {
    lines := parseFiles(flag.Args()) // lex and parse
    ...
    typecheckok = true

    // Process top-level declarations in phases.

    // Phase 1: const, type, and names and types of funcs.
    // This will gather all the information about types
    // and methods but doesn't depend on any of it.
    //
    // We also defer type alias declarations until phase 2
    // to avoid cycles like #18640.
    // TODO(gri) Remove this again once we have a fix for #25838.

    // Don't use range--typecheck can add closures to xtop.
    timings.Start("fe", "typecheck", "top1")
    for i := 0; i < len(xtop); i++ {
    n := xtop[i]
    if op := n.Op; op != ODCL && op != OAS && op != OAS2 && (op != ODCLTYPE || !n.Left.Name.Param.Alias) {
    xtop[i] = typecheck(n, ctxStmt)
    }
    }

    // Phase 2: Variable assignments.
    // To check interface assignments, depends on phase 1.

    // Don't use range--typecheck can add closures to xtop.
    timings.Start("fe", "typecheck", "top2")
    for i := 0; i < len(xtop); i++ {
    n := xtop[i]
    if op := n.Op; op == ODCL || op == OAS || op == OAS2 || op == ODCLTYPE && n.Left.Name.Param.Alias {
    xtop[i] = typecheck(n, ctxStmt)
    }
    }

    // Phase 3: Type check function bodies.
    // Don't use range--typecheck can add closures to xtop.
    timings.Start("fe", "typecheck", "func")
    var fcount int64
    for i := 0; i < len(xtop); i++ {
    n := xtop[i]
    if op := n.Op; op == ODCLFUNC || op == OCLOSURE {
    Curfn = n
    decldepth = 1
    saveerrors()
    typecheckslice(Curfn.Nbody.Slice(), ctxStmt)
    checkreturn(Curfn)
    if nerrors != 0 {
    Curfn.Nbody.Set(nil) // type errors; do not compile
    }
    // Now that we've checked whether n terminates,
    // we can eliminate some obviously dead code.
    deadcode(Curfn)
    fcount++
    }
    }
    // With all types checked, it's now safe to verify map keys. One single
    // check past phase 9 isn't sufficient, as we may exit with other errors
    // before then, thus skipping map key errors.
    }
    + + +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    (gdb) bt
    #0 cmd/go/internal/load.LoadImport (path=..., srcDir=..., parent=0xc000147200, stk=0xc0001db698, importPos=..., mode=1, ~r6=<optimized out>) at /home/dinosaur/newgo/go/src/cmd/go/internal/load/pkg.go:530
    #1 0x000000000079fa1b in cmd/go/internal/load.(*Package).load (p=0xc000147200, stk=0xc0001db698, bp=0xc0001b8a80, err=...) at /home/dinosaur/newgo/go/src/cmd/go/internal/load/pkg.go:1707
    #2 0x0000000000799827 in cmd/go/internal/load.loadImport (pre=0x0, path=..., srcDir=..., parent=0xc000146d80, stk=0xc0001db698, importPos=..., mode=1, ~r7=<optimized out>)
    at /home/dinosaur/newgo/go/src/cmd/go/internal/load/pkg.go:578
    #3 0x000000000079890a in cmd/go/internal/load.LoadImport (path=..., srcDir=..., parent=0xc000146d80, stk=0xc0001db698, importPos=..., mode=1, ~r6=<optimized out>) at /home/dinosaur/newgo/go/src/cmd/go/internal/load/pkg.go:531
    #4 0x000000000079fa1b in cmd/go/internal/load.(*Package).load (p=0xc000146d80, stk=0xc0001db698, bp=0xc0001b8700, err=...) at /home/dinosaur/newgo/go/src/cmd/go/internal/load/pkg.go:1707
    #5 0x0000000000799827 in cmd/go/internal/load.loadImport (pre=0x0, path=..., srcDir=..., parent=0xc000146900, stk=0xc0001db698, importPos=..., mode=1, ~r7=<optimized out>)
    at /home/dinosaur/newgo/go/src/cmd/go/internal/load/pkg.go:578
    #6 0x000000000079890a in cmd/go/internal/load.LoadImport (path=..., srcDir=..., parent=0xc000146900, stk=0xc0001cf698, importPos=..., mode=1, ~r6=<optimized out>) at /home/dinosaur/newgo/go/src/cmd/go/internal/load/pkg.go:531
    #7 0x000000000079fa1b in cmd/go/internal/load.(*Package).load (p=0xc000146900, stk=0xc0001db698, bp=0xc0001b8380, err=...) at /home/dinosaur/newgo/go/src/cmd/go/internal/load/pkg.go:1707
    #8 0x00000000007a4c66 in cmd/go/internal/load.GoFilesPackage (gofiles=..., ~r1=<optimized out>) at /home/dinosaur/newgo/go/src/cmd/go/internal/load/pkg.go:2230
    #9 0x00000000007a3c54 in cmd/go/internal/load.PackagesAndErrors (patterns=..., ~r1=...) at /home/dinosaur/newgo/go/src/cmd/go/internal/load/pkg.go:2056
    #10 0x00000000007a417d in cmd/go/internal/load.PackagesForBuild (args=..., ~r1=...) at /home/dinosaur/newgo/go/src/cmd/go/internal/load/pkg.go:2123
    #11 0x0000000000842528 in cmd/go/internal/work.runBuild (cmd=<optimized out>, args=...) at /home/dinosaur/newgo/go/src/cmd/go/internal/work/build.go:348
    #12 0x0000000000932219 in main.main () at /home/dinosaur/newgo/go/src/cmd/go/main.go:189

    + +
    + + + + +
    +
    +
    +
    + + + + + + + + + + +
    + + + + +
    + + + + + + + + +
    +
    + +
    + +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/page/22/index.html b/page/22/index.html new file mode 100644 index 0000000000..d21ae1efdf --- /dev/null +++ b/page/22/index.html @@ -0,0 +1,1257 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + dinosaur + + + + + + + + + + + + +
    +
    + +
    +
    + + + + + +
    + + + + + + + + +
    + +
    + +
    +
    + + +
    + + 0% +
    + + +
    +
    +
    + + +
    + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    计算机语言是什么?

    +

    我感觉是一个数学系统

    +

    编译成机器码是什么?

    +

    是绑定了动作

    +

    // lex parse 类型系统 ssa asm elf abi

    +
    1
    2
    3
    keyword   :
    int bool
    for while if
    + + +
    + + + + +
    +
    +
    +
    + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    优化的本质是什么呢?

    比如ssa,是减少死代码,通过常量传播和常量折叠减少运行时的计算

    +

    比如sql的逻辑优化: 就是一个逻辑下推 通过变换减少读io

    +

    编译的一般步骤:

    lex : 词法分析
    parse: 语法分析构造语法树
    cfg优化
    codegen

    +

    在golang 和php都有ssa 优化,ssa 优化是通过控制流图来做常量传递 常量折叠 和 死代码去除

    +

    php的ssa 优化在opcache中,而golang的也在类似的包里面

    +

    structure induction

    +

    CFG

    construct cfg

    ssa

    what is ssa

    +

    A program is defined to be in SSA form if each variable is a target of exactly one assignment
    statement in the program text.

    +
    +

    如果程序里面每个变量只被赋值一次那么这个程序就具有ssa 形式

    +

    def-use chain and use-def chain

    +

    Under SSA form, each variable is defined once. Def-use chains?are data structures
    that provide, for the single definition of a variable, the set of all its uses.
    In turn, a use-def chain?, which under SSA consists of a single name, uniquely
    specifies the definition that reaches the use.

    +
    +

    def-use chain 就是输入是 定义(赋值) , 输出是使用被使用的变量的集合

    +

    use-def chain 刚好相反 输入是使用的变量 而 输出是他的定义(赋值)的集合, 对于ssa 的程序来说, 每个变量只被赋值(定义)一次,所以这个use-def这个数据结构在ssa形式下这个集合只有一个元素
    ssa 形式下

    +

    ssa properties

    ssa 有什么性质 ?

    +

    DG

    JG

    insert φ-function

    construct ssa

    destruct ssa

    相关阅读

    + + +
    + + + + +
    +
    +
    +
    + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    我一直对所谓的可扩展性什么的很有疑惑,或者说我们要怎么设计一个规则系统,怎么知道这个规则的集合的边界在哪里

    +

    第一个例子: 流水线

    +

    流水线上每个节点都是一个回调,我们可以随意添加或者删除

    +

    有向无环图
    等价于原始递归函数

    +

    这个规则系统的路径则需要输入来确定,所以和语言是等价的

    +

    所以一个规则系统等价于一个语言,所以我们可以使用一些内容来等价和变换

    + + + +
    + + + + +
    +
    +
    +
    + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    其中一种是基于锁
    我之前一直对acid理解有问题,锁和事务的关系,其实是这样的:

    +

    1 read(x) 和write(x)是不可以交换顺序的
    2 write(x) write(x) 是不可以交换顺序的
    3 write(x) 和read(x) 也是不可以交换顺序的

    +

    我们的事务 t1和t2 如果完全按照先执行t1再执行t2 就一点问题都没有,就是有点慢,并发低。

    +

    那么我们就用一些等价的方法,尽量减少阻塞。我们不锁住整个事务,只对冲突的部分进行锁定,其他就因为等价所以顺序没有关系,因为其他部分没有顺序关系,所以不用上锁,所以并发会上去

    +
    +

    类型是什么?
    类型描述了一个特别的集合

    +

    结构体是什么?

    +

    结构图本质是类型的组合,也就是关系

    +

    举个例子

    +
    1
    2
    3
    4
    struct{
    int a,
    int b
    }
    +

    这个本质是 RXR 的关系 ,那一个结构体的变量又是什么? 是这个关系的一个元素

    +

    递归是什么?
    递归是差分方程,递归是不动点,但是递归的内容还得看

    +

    什么是可扩展性?

    +

    BNF 或者类似的规则系统为什么是正确的? 靠什么保证?是依赖范畴学或者其他数学的什么定理或者和数学的什么模型一致?
    我一直很好奇规则系统的约束怎么做到的?因为规则系统真的很神奇

    + + +
    + + + + +
    +
    +
    +
    + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    前言

    归纳法是一个很特别的推理方式。使用自然数的映射。(我的理解可能不太对)

    +

    Mathematical induction

    数学归纳法

    +
      +
    • P(0) is true
    • +
    • If P(m) is true then so is P(m + 1) for any natural number m.
      (P(O) & (Vm E w. P(m) =} P(m + 1)) =} Vn E w. P(n).
    • +
    +

    well define

    Q 的非空子集有最小值

    +

    induction 举例

    BNF 是一个典型的递归定义集合(induction define set) 。 归纳法有个很特别的东西,就是用一个很短的表达式描述一个有限集合

    +

    Recursive definitions of sets

    举例:
    自然数集合:
    P(0) = 0 ;
    P(N+1) = P(N)+1

    +

    相关阅读

    + + +
    + + + + +
    +
    +
    +
    + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    什么是编译?

    一个从一种状态集转换为另外一个状态集的过程

    +

    什么是优化?

    什么是类型,类型就是集合的约束

    一个类型就是一个集合

    +

    什么是隐式转换

    就是一个集合被编译器材自动从一个集合映射到另外一个集合

    +

    因为不同类型的运算是未定义的(也可能是不闭合的,但是更多是未定义的 )

    +

    举个例子sql的谓词有些是二值的有些是三值的,导致语义会很难每个人都清楚

    +

    同构

    什么是同构? 这是我最想弄明白的东西,真的很奇妙

    +

    语法和语义(syntax and semantic)

    自然语言的语法

    如果学过英语,那么i eat apple就是一个主谓宾结构,我对自然语言的语法的理解就是满足某些结构的结构(好吧可能是错误的结论)

    +

    数理逻辑的语法

    数理逻辑也有相类似的语法

    +

    编程语言的语法

    编程语言也是特定的token组合就是一个语法结构;
    举个例子:

    +
    1
    a = 1 ;   // 由三个token组成  token<a> token<=> token <1> , 由parse规约而成
    + +

    语义

    操作语义(operate semantic)

    描述这个语法对应的操作

    +

    表达式(Expressions)

    如果是c++的表达式,就是一个序列,这个序列有返回值
    举个例子:

    +
    1
    The result of the expression always has type void [1]
    +

    返回值或者求值结果是void

    +

    相关阅读

    + + +
    + + + + +
    +
    +
    +
    + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    反射是一个很特别的api,php的反射是一个很特别的回调

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    (gdb) bt
    #0 zim_reflection_class_hasProperty (execute_data=0x7ffff3a14220, return_value=0x7ffff3a14180) at /home/ubuntu/php-src-php-7.4.1/ext/reflection/php_reflection.c:4186
    #1 0x0000555555af49b2 in ZEND_DO_FCALL_SPEC_RETVAL_USED_HANDLER () at /home/ubuntu/php-src-php-7.4.1/Zend/zend_vm_execute.h:1729
    #2 0x0000555555b58295 in execute_ex (ex=0x7ffff3a14020) at /home/ubuntu/php-src-php-7.4.1/Zend/zend_vm_execute.h:53588
    #3 0x0000555555b5c32d in zend_execute (op_array=0x7ffff3a61c00, return_value=0x0) at /home/ubuntu/php-src-php-7.4.1/Zend/zend_vm_execute.h:57664
    #4 0x0000555555a80b27 in zend_execute_scripts (type=8, retval=0x0, file_count=3) at /home/ubuntu/php-src-php-7.4.1/Zend/zend.c:1663
    #5 0x00005555559e2bad in php_execute_script (primary_file=0x7fffffffd0e0) at /home/ubuntu/php-src-php-7.4.1/main/main.c:2619
    #6 0x0000555555b5ee34 in do_cli (argc=2, argv=0x55555678ab30) at /home/ubuntu/php-src-php-7.4.1/sapi/cli/php_cli.c:961
    #7 0x0000555555b5ff9e in main (argc=2, argv=0x55555678ab30) at /home/ubuntu/php-src-php-7.4.1/sapi/cli/php_cli.c:1352

    + + +
    + + + + +
    +
    +
    +
    + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    当我们使用jdbc 或者pdo或者其他mysql的驱动的时候,经常看到错误会有两个错误码
    举个例子

    +
    1
    Error number: 1005; Symbol: ER_CANT_CREATE_TABLE; SQLSTATE: HY000
    +

    1005HY000
    或者是

    +
    1
    SQLSTATE[23000]: Integrity constraint violation: 1062 Duplicate entry '34' for key 'PRIMARY',
    +

    230001062

    +

    那么两者的关系是怎么样的呢?

    +
    +
    +

    Error code: This value is numeric. It is MySQL-specific and is not portable to other database systems.

    +
    +
    +
    +

    SQLSTATE value: This value is a five-character string (for example, ‘42S02’). SQLSTATE values are taken from ANSI SQL and ODBC and are more standardized than the numeric error codes.

    +
    +

    不管怎么样,你会看到两个错误一个是SQLSTATE,一个是 errorcode,两者区别就是SQLSTATE更加标准或者通用一些,而errorcode则是mysql自己的

    +

    相关阅读

    + + +
    + + + + +
    +
    +
    +
    + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    编译流程

      +
    • github 拉代码

      +
      1
      git clone https://github.com/ClickHouse/ClickHouse.git
    • +
    • 创建子目录

      +
      1
      2
      3
      cd ClickHouse/
      mkdir build
      cmake ..
      +
    • +
    • 编译需要升级到gcc-8 g++8:

      +
    • +
    +
    1
    2
    GCC version must be at least 8.  For example, if GCC 8 is available under
    gcc-8, g++-8 names, do the following: export CC=gcc-8 CXX=g++-8;
    +

    我的操作系统是ubuntu所以

    +
    1
    2
    3
    4
    $ sudo  apt-get install gcc-8 g++-8
    $ sudo update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-8 800 --slave /usr/bin/g++ g++ /usr/bin/g++-8
    update-alternatives: using /usr/bin/gcc-8 to provide /usr/bin/gcc (gcc) in auto mode

    +

    然后看gcc版本升级到8.3.0

    +
    1
    2
    3
    4
    5
    $ gcc -v
    Using built-in specs.
    ...
    gcc version 8.3.0 (Ubuntu 8.3.0-6ubuntu1~18.04.1)

    +
      +
    • 删掉cmake相关缓存
      1
      $ rm -rf   CMakeCache.txt CMakeFiles
      +重新跑有这样的错误:
      1
      2
      3
      4
      Submodules are not initialized.  Run

      git submodule update --init --recursive

      +然后初始化git submodule:
      1
      $ git submodule update --init --recursive
      +因为我用的是v2ray + proxychains4,勉强把那些包下下来了
      然后继续跑
      1
      2
      3
      cmake -DCMAKE_BUILD_TYPE=Debug  -DCMAKE_INSTALL_PREFIX=/home/ubuntu/click  ..

      ninja all
      +然后我发现内存要比较大,而且硬盘要ssd,不然会编译特别慢
    • +
    +

    使用docker-compse启动

    相关参考

    +
      +
    • 第一步创建文件 config.xmlusers.xml
    • +
    • 第二步 新建docker-compose.yml
    • +
    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    version: '3'
    services:
    clickhouse-server:
    image: yandex/clickhouse-server
    container_name: clickhouse-server
    hostname: clickhouse-server
    ports:
    - 8123:8123
    expose:
    - 9000
    - 9009
    volumes:
    - ./config.xml:/etc/clickhouse-server/config.xml
    - ./users.xml:/etc/clickhouse-server/users.xml
    - ./data:/var/lib/clickhouse
    - ./log/clickhouse-server.log:/var/log/clickhouse-server/clickhouse-server.log
    - ./log/clickhouse-server.err.log:/var/log/clickhouse-server/clickhouse-server.err.log
    +
      +
    • 第二步
    • +
    +
    1
    docker-compose up
    + +

    使用clickhouse-client 连接

    +
    1
    docker run -it --rm --link clickhouse-server:clickhouse-server --net clickhouse_default yandex/clickhouse-client --host clickhouse-server --user seluser --password 8d969eef6ecad3c29a3a629280e686cf0c3f5d5a86aff3ca12020c923adc6c92
    + +

    相关阅读

    + + +
    + + + + +
    +
    +
    +
    + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    在看到jdbc使用的例子里面,看到了Class.forName(“xxx”)的相关调用

    +
    1
    Class.forName("com.mysql.jdbc.Driver")
    +

    这有什么用的?
    其实可以约等于PHP的class_exist,或者说是golang的空引入
    import _ "github.com/go-sql-driver/mysql
    就是为了调用一下static 块的代码,初始化一下

    +

    相关阅读

    + + +
    + + + + +
    +
    +
    +
    + + + + + + + + + + +
    + + + + +
    + + + + + + + + +
    +
    + +
    + +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/page/23/index.html b/page/23/index.html new file mode 100644 index 0000000000..e8adc4a12d --- /dev/null +++ b/page/23/index.html @@ -0,0 +1,1133 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + dinosaur + + + + + + + + + + + + +
    +
    + +
    +
    + + + + + +
    + + + + + + + + +
    + +
    + +
    +
    + + +
    + + 0% +
    + + +
    +
    +
    + + +
    + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    获取sql_mode

    +
    1
    SELECT @@sql_mode;
    + + +
    + + + + +
    +
    +
    +
    + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    thinkphp5 的默认配置会开启ERRMODE_EXCEPTION

    +
    1
    2
    3
    4
    5
    PDO::ATTR_CASE              => PDO::CASE_NATURAL,
    PDO::ATTR_ERRMODE => PDO::ERRMODE_EXCEPTION,
    PDO::ATTR_ORACLE_NULLS => PDO::NULL_NATURAL,
    PDO::ATTR_STRINGIFY_FETCHES => false,
    PDO::ATTR_EMULATE_PREPARES => false,
    +

    pdo实现

    pdo 的raise_impl_error会根据配置判断是否需要抛出异常,当设置成PDO::ERRMODE_EXCEPTION,则可以需要抛出异常

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    void pdo_raise_impl_error(pdo_dbh_t *dbh, pdo_stmt_t *stmt, const char *sqlstate, const char *supp) /* {{{ */
    {
    ...
    if (dbh && dbh->error_mode != PDO_ERRMODE_EXCEPTION) { // 没有设置 ERRMODE_EXCEPTION则抛warning
    php_error_docref(NULL, E_WARNING, "%s", message);
    } else {
    zval ex, info;
    zend_class_entry *def_ex = php_pdo_get_exception_base(1), *pdo_ex = php_pdo_get_exception();

    object_init_ex(&ex, pdo_ex);

    zend_update_property_string(def_ex, &ex, "message", sizeof("message")-1, message);
    zend_update_property_string(def_ex, &ex, "code", sizeof("code")-1, *pdo_err);

    array_init(&info);

    add_next_index_string(&info, *pdo_err);
    add_next_index_long(&info, 0);
    zend_update_property(pdo_ex, &ex, "errorInfo", sizeof("errorInfo")-1, &info);
    zval_ptr_dtor(&info);

    zend_throw_exception_object(&ex); // // 否则抛出异常
    }

    if (message) {
    efree(message);
    }
    }
    + + +

    所以sql相关的错误只要try_catch还是能catch不少的

    + + +
    + + + + +
    +
    +
    +
    + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    template标签只是为了方便

    +

    闭包作用域

    +

    this作用域

    +

    非prop属性作用

    +

    component标签

    +

    注册变量的等价方式

    + + + +
    + + + + +
    +
    +
    +
    + + + + + + + + + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    Breakpoint 2, 0x00007fffedb6c090 in swoole::Context::Context(unsigned long, void (*)(void*), void*)@plt ()
    from /usr/local/phpfork/lib/php/extensions/debug-non-zts-20170718/swoole.so
    (gdb) bt
    #0 0x00007fffedb6c090 in swoole::Context::Context(unsigned long, void (*)(void*), void*)@plt ()
    from /usr/local/phpfork/lib/php/extensions/debug-non-zts-20170718/swoole.so
    #1 0x00007fffedc207b1 in swoole::Coroutine::Coroutine (this=0x17a8540, fn=0x7fffedc1cdb2 <swoole::PHPCoroutine::main_func(void*)>, private_data=0x7fffffffa0a0)
    at /home/dinosaur/swoole-src/include/coroutine.h:204
    #2 0x00007fffedc205ee in swoole::Coroutine::create (fn=0x7fffedc1cdb2 <swoole::PHPCoroutine::main_func(void*)>, args=0x7fffffffa0a0)
    at /home/dinosaur/swoole-src/include/coroutine.h:121
    #3 0x00007fffedc1d7d0 in swoole::PHPCoroutine::create (fci_cache=0x7fffffffa140, argc=0, argv=0x0) at /home/dinosaur/swoole-src/swoole_coroutine.cc:857
    #4 0x00007fffedc1eebd in zif_swoole_coroutine_create (execute_data=0x7fffef61e090, return_value=0x7fffffffa1e0)
    at /home/dinosaur/swoole-src/swoole_coroutine.cc:964
    #5 0x0000000000aaf137 in ZEND_DO_FCALL_BY_NAME_SPEC_RETVAL_UNUSED_HANDLER () at /home/dinosaur/Downloads/php-7.2.2/Zend/zend_vm_execute.h:738
    #6 0x0000000000b42992 in execute_ex (ex=0x7fffef61e030) at /home/dinosaur/Downloads/php-7.2.2/Zend/zend_vm_execute.h:59743
    #7 0x0000000000b47d9d in zend_execute (op_array=0x7fffef684b00, return_value=0x0) at /home/dinosaur/Downloads/php-7.2.2/Zend/zend_vm_execute.h:63760
    #8 0x0000000000a3afe0 in zend_execute_scripts (type=8, retval=0x0, file_count=3) at /home/dinosaur/Downloads/php-7.2.2/Zend/zend.c:1496
    #9 0x000000000098c749 in php_execute_script (primary_file=0x7fffffffc8c0) at /home/dinosaur/Downloads/php-7.2.2/main/main.c:2590
    #10 0x0000000000b4b2a5 in do_cli (argc=2, argv=0x1561f20) at /home/dinosaur/Downloads/php-7.2.2/sapi/cli/php_cli.c:1011
    #11 0x0000000000b4c491 in main (argc=2, argv=0x1561f20) at /home/dinosaur/Downloads/php-7.2.2/sapi/cli/php_cli.c:1404

    + + +
    + + + + +
    +
    +
    +
    + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +
    +

    CLONE_NEWUTS (since Linux 2.6.19)
    If CLONE_NEWUTS is set, then create the process in a new UTS
    namespace, whose identifiers are initialized by duplicating
    the identifiers from the UTS namespace of the calling process.
    If this flag is not set, then (as with fork(2)) the process is
    created in the same UTS namespace as the calling process.

    +
    + + + +
    + + + + +
    +
    +
    +
    + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    类型系统

    type and program language 这本书介绍了类型系统. 什么是类型系统呢?

    +

    为什么会有隐式转换

    我写了很久弱类型语言,一直遇到各种隐式转换

    +

    但是最近(2020/04/26)我大概知道隐式转换的本质了 .
    说到底,弱类型语言也是有类型的语言,变量是有类型的.变量就是一个类型集合里面的一个元素

    +

    举个例子
    比如一个集合颜色{red,yellow,white,blue}
    不同类型的变量说到底也不是一个集合的内容,是没法直接比较.那么编译器就帮你做了一个映射,映射成一个类型,然后可以比较了,就那么简单.

    +

    那么隐式转换的问题是什么呢?其实是开发人员可能没有注意到发生了隐式转换,执行路径和预期不一致

    +

    那么隐式转换的好处是什么呢?可以少写很多代码,可以更快

    +

    这本书讲了表达式和求值

    +

    sql也是一种弱类型语言,所以也有弱类型的大坑隐式转换
    mysql的类型系统有人详细描述过吗?或者有相关的文档来说明吗?就像jls一样,可能是我没有看完完整的mysql文档吧

    +

    mysql类型

    mysql类型分为以下几种:

    +
      +
    • numeric
    • +
    • date and time
    • +
    • string
    • +
    • json
    • +
    +

    例子

    1
    select count(case when number_col='' OR number_col IS NULL THEN 1 END) FROM test;
    +

    假如number_col列是数字类型(比如int),则会发生隐式转换 number_col = ''里面,空字符串''会转换成 0

    +

    隐式转换在什么时候发生?

    相关sql

    +
    1
    select 1='222';
    + +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    Thread 28 "mysqld" hit Breakpoint 1, my_strtod (str=0x7f3a500061d0 "222", end=0x7f3ad4d46998, error=0x7f3ad4d469bc) at /home/dinosaur/Downloads/mysql-5.7.21/strings/dtoa.c:465
    465 {
    (gdb) bt
    #0 my_strtod (str=0x7f3a500061d0 "222", end=0x7f3ad4d46998, error=0x7f3ad4d469bc) at /home/dinosaur/Downloads/mysql-5.7.21/strings/dtoa.c:465
    #1 0x0000000001f7279d in my_strntod_8bit (cs=0x2e8ea60 <my_charset_utf8_general_ci>, str=0x7f3a500061d0 "222", length=3, end=0x7f3ad4d46998, err=0x7f3ad4d469bc)
    at /home/dinosaur/Downloads/mysql-5.7.21/strings/ctype-simple.c:741
    #2 0x0000000000fdaaf2 in double_from_string_with_check (cs=0x2e8ea60 <my_charset_utf8_general_ci>, cptr=0x7f3a500061d0 "222", end=0x7f3a500061d3 "")
    at /home/dinosaur/Downloads/mysql-5.7.21/sql/item.cc:3577
    #3 0x0000000000fdacc5 in Item_string::val_real (this=0x7f3a500061d8) at /home/dinosaur/Downloads/mysql-5.7.21/sql/item.cc:3594
    #4 0x0000000000f9e9b9 in Item::val_result (this=0x7f3a500061d8) at /home/dinosaur/Downloads/mysql-5.7.21/sql/item.h:1592
    #5 0x0000000000fedf4b in Item_cache_real::cache_value (this=0x7f3a50006928) at /home/dinosaur/Downloads/mysql-5.7.21/sql/item.cc:10089
    #6 0x0000000000fec91a in Item_cache::has_value (this=0x7f3a50006928) at /home/dinosaur/Downloads/mysql-5.7.21/sql/item.cc:9650
    #7 0x0000000000fedfbb in Item_cache_real::val_real (this=0x7f3a50006928) at /home/dinosaur/Downloads/mysql-5.7.21/sql/item.cc:10098
    #8 0x0000000000fff539 in Arg_comparator::compare_real (this=0x7f3a500065f8) at /home/dinosaur/Downloads/mysql-5.7.21/sql/item_cmpfunc.cc:1748
    #9 0x0000000001014cc8 in Arg_comparator::compare (this=0x7f3a500065f8) at /home/dinosaur/Downloads/mysql-5.7.21/sql/item_cmpfunc.h:92
    #10 0x00000000010017e7 in Item_func_eq::val_int (this=0x7f3a50006520) at /home/dinosaur/Downloads/mysql-5.7.21/sql/item_cmpfunc.cc:2507
    #11 0x0000000000fe6144 in Item::send (this=0x7f3a50006520, protocol=0x7f3a50001d10, buffer=0x7f3ad4d46e10) at /home/dinosaur/Downloads/mysql-5.7.21/sql/item.cc:7563
    #12 0x00000000015d4c48 in THD::send_result_set_row (this=0x7f3a50000b70, row_items=0x7f3a500058c8) at /home/dinosaur/Downloads/mysql-5.7.21/sql/sql_class.cc:4677
    #13 0x00000000015ceed3 in Query_result_send::send_data (this=0x7f3a50006770, items=...) at /home/dinosaur/Downloads/mysql-5.7.21/sql/sql_class.cc:2717
    #14 0x00000000015e697a in JOIN::exec (this=0x7f3a500069f0) at /home/dinosaur/Downloads/mysql-5.7.21/sql/sql_executor.cc:158
    #15 0x00000000016892ba in handle_query (thd=0x7f3a50000b70, lex=0x7f3a50002e78, result=0x7f3a50006770, added_options=0, removed_options=0)
    at /home/dinosaur/Downloads/mysql-5.7.21/sql/sql_select.cc:184
    #16 0x000000000163939e in execute_sqlcom_select (thd=0x7f3a50000b70, all_tables=0x0) at /home/dinosaur/Downloads/mysql-5.7.21/sql/sql_parse.cc:5156
    #17 0x0000000001632405 in mysql_execute_command (thd=0x7f3a50000b70, first_level=true) at /home/dinosaur/Downloads/mysql-5.7.21/sql/sql_parse.cc:2792
    #18 0x000000000163a31c in mysql_parse (thd=0x7f3a50000b70, parser_state=0x7f3ad4d48550) at /home/dinosaur/Downloads/mysql-5.7.21/sql/sql_parse.cc:5582
    #19 0x000000000162f0a3 in dispatch_command (thd=0x7f3a50000b70, com_data=0x7f3ad4d48e00, command=COM_QUERY) at /home/dinosaur/Downloads/mysql-5.7.21/sql/sql_parse.cc:1458
    #20 0x000000000162df32 in do_command (thd=0x7f3a50000b70) at /home/dinosaur/Downloads/mysql-5.7.21/sql/sql_parse.cc:999
    #21 0x0000000001770f97 in handle_connection (arg=0x5271810) at /home/dinosaur/Downloads/mysql-5.7.21/sql/conn_handler/connection_handler_per_thread.cc:300
    #22 0x0000000001de0b41 in pfs_spawn_thread (arg=0x526e200) at /home/dinosaur/Downloads/mysql-5.7.21/storage/perfschema/pfs.cc:2190
    #23 0x00007f3ade33b6ba in start_thread (arg=0x7f3ad4d49700) at pthread_create.c:333
    #24 0x00007f3add76d41d in clone () at ../sysdeps/unix/sysv/linux/x86_64/clone.S:109
    + +

    隐式转换规则

    +

    If one or both arguments are NULL, the result of the comparison is NULL, except for the NULL-safe <=> equality comparison operator. For NULL <=> NULL, the result is true. No conversion is needed.

    +
    +
    +

    If both arguments in a comparison operation are strings, they are compared as strings.

    +
    +
    +

    If both arguments are integers, they are compared as integers.

    +
    +
    +

    Hexadecimal values are treated as binary strings if not compared to a number.

    +
    +
    +

    If one of the arguments is a TIMESTAMP or DATETIME column and the other argument is a constant, the constant is converted to a timestamp before the comparison is performed. This is done to be more ODBC-friendly. This is not done for the arguments to IN(). To be safe, always use complete datetime, date, or time strings when doing comparisons. For example, to achieve best results when using BETWEEN with date or time values, use CAST() to explicitly convert the values to the desired data type.

    +
    +
    +

    A single-row subquery from a table or tables is not considered a constant. For example, if a subquery returns an integer to be compared to a DATETIME value, the comparison is done as two integers. The integer is not converted to a temporal value. To compare the operands as DATETIME values, use CAST() to explicitly convert the subquery value to DATETIME.

    +
    +
    +

    If one of the arguments is a decimal value, comparison depends on the other argument. The arguments are compared as decimal values if the other argument is a decimal or integer value, or as floating-point values if the other argument is a floating-point value.

    +
    +
    +

    In all other cases, the arguments are compared as floating-point (real) numbers. For example, a comparison of string and numeric operands takes places as a comparison of floating-point numbers.

    +
    +

    mysql 隐式转换可能不走索引

    文档只描述了字符串转数字的情况

    +

    举例

    +

    下面是表的例子先看表的样子,表里面underlying_code 是varchar类型

    +
    show create table `base_underlying_information`
    +CREATE TABLE `base_underlying_information` (
    +  `id` int(11) NOT NULL AUTO_INCREMENT COMMENT '主键ID',
    +  `sec_id` varchar(10) NOT NULL COMMENT '标的ID',
    +  `uni_code` varchar(30) NOT NULL COMMENT '标识,规则code-last-type',
    +  `underlying_code` varchar(50) NOT NULL COMMENT '标的代码',
    +  ... 省略一堆其他字段
    +  PRIMARY KEY (`id`),
    +  
    +  KEY `idx_underlying_code` (`underlying_code`),
    +) ENGINE=InnoDB  CHARSET=utf8 
    +
    1
    2
    3
    - 隐式转换的时候
    当sql中 条件是数字而 `603023`的时候

    +EXPLAIN SELECT * FROM `base_underlying_information` WHERE underlying_code = 603023 + +
    1
    2
    3
    4
    这时候的explain 是发现没有走索引
    因为满足以下条件

    > In all other cases, the arguments are compared as floating-point (real) numbers. For example, **a comparison of string and numeric operands takes places as a comparison of floating-point numbers.**
    +id select_type table partitions type possible_keys key key_len ref rows filtered Extra +1 SIMPLE base_underlying_information \N ALL idx_underlying_code \N \N \N 506079 10.00 Using where +
    1
    2
    3
    4
    5
    6

    发生了隐式转换

    下面是文档的描述

    > For comparisons of a string column with a number, MySQL cannot use an index on the column to look up the value quickly. If str_col is an indexed string column, the index cannot be used when performing the lookup in the following statement:
    +SELECT * FROM tbl_name WHERE str_col=1; +
    1
    2
    3
    4
    5
    6
    7
    > The reason for this is that there are many different strings that may convert to the value 1, such as '1', ' 1', or '1a'.




    - 没有隐式转换的时候
    因为表里面是varchar 条件里面也是varchar 所以是没有隐式转换
    +EXPLAIN SELECT * FROM `base_underlying_information` WHERE underlying_code = '603023' +
    1
    2
    3
    ```
    id select_type table partitions type possible_keys key key_len ref rows filtered Extra
    1 SIMPLE base_underlying_information \N ref idx_underlying_code idx_underlying_code 152 const 1 100.00 \N
    + + + +看了一下词法分析好像没有做转换的,看了一下词法分析也没有做,那应该是运行时的时候做的,那是哪个函数呢? + +- http://postgres.cn/docs/9.6/extend-type-system.html +- https://dev.mysql.com/doc/refman/8.0/en/date-and-time-literals.html +- https://blog.csdn.net/n88Lpo/article/details/101013055 +- https://dev.mysql.com/doc/refman/5.7/en/type-conversion.html +
    + + +
    + + + + +
    +
    +
    +
    + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    docker的stop本质就是kill -9 ,一个特别的信号而已。具体实现得看代码
    // todo

    + + +
    + + + + +
    +
    +
    +
    + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    es上面安装ik分词

    +
    1
    ./bin/elasticsearch-plugin install https://github.com/medcl/elasticsearch-analysis-ik/releases/download/v7.4.0/elasticsearch-analysis-ik-7.4.0.zip
    + +

    返回

    +
    1
    -> Downloading https://github.com/medcl/elasticsearch-analysis-ik/releases/download/v7.4.0/elasticsearch-analysis-ik-7.4.0.zip
    + + +

    分词核心函数

    +

    相关阅读

    + + + +
    + + + + +
    +
    +
    +
    + + + + + + + + + + + + + + + + + + +
    + + + + +
    + + + + + + + + +
    +
    + +
    + +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/page/24/index.html b/page/24/index.html new file mode 100644 index 0000000000..d1f1039cd4 --- /dev/null +++ b/page/24/index.html @@ -0,0 +1,1296 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + dinosaur + + + + + + + + + + + + +
    +
    + +
    +
    + + + + + +
    + + + + + + + + +
    + +
    + +
    +
    + + +
    + + 0% +
    + + +
    +
    +
    + + +
    + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    很好奇explain的时候怎么explain到很多内容的,所以遇到了explain的内容是Impossible ON condition的时候觉得很好奇

    + + +
    + + + + +
    +
    +
    +
    + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    tokenlizer

    代码:

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    <?php
    $code = '<?php echo "string1"."string2"; >';

    $tokens = token_get_all($code);

    foreach ($tokens as $token) {

    if (is_array($token)) {

    // 行号、标识符字面量、对应内容

    printf("%d - %s\t%s\n", $token[2], token_name($token[0]), $token[1]);

    }

    }
    + +

    输出内容:

    +
    1
    2
    3
    4
    5
    6
    1 - T_OPEN_TAG  <?php
    1 - T_ECHO echo
    1 - T_WHITESPACE
    1 - T_CONSTANT_ENCAPSED_STRING "string1"
    1 - T_CONSTANT_ENCAPSED_STRING "string2"
    1 - T_WHITESPACE
    +

    php-cs-fixer

    php-cs-fixer的核心函数是:token_get_all

    +
    1
    2
    3
    $tokens = \defined('TOKEN_PARSE')
    ? token_get_all($code, TOKEN_PARSE)
    : token_get_all($code);
    +

    调用的核心堆栈:

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    #0 E:\PHP-CS-Fixer\src\Tokenizer\Tokens.php(222): PhpCsFixer\Tokenizer\Tokens->setCode('<?php\n\n/*\n * Th...')
    #1 E:\PHP-CS-Fixer\src\Runner\Runner.php(171): PhpCsFixer\Tokenizer\Tokens::fromCode('<?php\n\n/*\n * Th...')
    #2 E:\PHP-CS-Fixer\src\Runner\Runner.php(132): PhpCsFixer\Runner\Runner->fixFile(Object(SplFileInfo), Object(PhpCsFixer\Linter\ProcessLintingResult))
    #3 E:\PHP-CS-Fixer\src\Console\Command\FixCommand.php(219): PhpCsFixer\Runner\Runner->fix()
    #4 E:\PHP-CS-Fixer\vendor\symfony\console\Command\Command.php(255): PhpCsFixer\Console\Command\FixCommand->execute(Object(Symfony\Component\Console\Input\ArgvInput), Object(Symfony\Component\Console\Output\ConsoleOutput))
    #5 E:\PHP-CS-Fixer\vendor\symfony\console\Application.php(982): Symfony\Component\Console\Command\Command->run(Object(Symfony\Component\Console\Input\ArgvInput), Object(Symfony\Component\Console\Output\ConsoleOutput))
    #6 E:\PHP-CS-Fixer\vendor\symfony\console\Application.php(255): Symfony\Component\Console\Application->doRunCommand(Object(PhpCsFixer\Console\Command\FixCommand), Object(Symfony\Component\Console\Input\ArgvInput), Object(Symfony\Component\Console\Output\ConsoleOutput))
    #7 E:\PHP-CS-Fixer\src\Console\Application.php(84): Symfony\Component\Console\Application->doRun(Object(Symfony\Component\Console\Input\ArgvInput), Object(Symfony\Component\Console\Output\ConsoleOutput))
    #8 E:\PHP-CS-Fixer\vendor\symfony\console\Application.php(148): PhpCsFixer\Console\Application->doRun(Object(Symfony\Component\Console\Input\ArgvInput), Object(Symfony\Component\Console\Output\ConsoleOutput))
    #9 E:\PHP-CS-Fixer\php-cs-fixer(101): Symfony\Component\Console\Application->run()
    #10 {main}
    + +

    举个例子下面的堆栈:

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    #0 E:\PHP-CS-Fixer\src\Fixer\Operator\BinaryOperatorSpacesFixer.php(339): PhpCsFixer\Fixer\Operator\BinaryOperatorSpacesFixer->fixWhiteSpaceAroundOperatorToSingleSpace(Object(PhpCsFixer\Tokenizer\Tokens), 19)
    #1 E:\PHP-CS-Fixer\src\Fixer\Operator\BinaryOperatorSpacesFixer.php(256): PhpCsFixer\Fixer\Operator\BinaryOperatorSpacesFixer->fixWhiteSpaceAroundOperator(Object(PhpCsFixer\Tokenizer\Tokens), 19)
    #2 E:\PHP-CS-Fixer\src\AbstractFixer.php(75): PhpCsFixer\Fixer\Operator\BinaryOperatorSpacesFixer->applyFix(Object(SplFileInfo), Object(PhpCsFixer\Tokenizer\Tokens))
    #3 E:\PHP-CS-Fixer\src\Runner\Runner.php(192): PhpCsFixer\AbstractFixer->fix(Object(SplFileInfo), Object(PhpCsFixer\Tokenizer\Tokens))
    #4 E:\PHP-CS-Fixer\src\Runner\Runner.php(132): PhpCsFixer\Runner\Runner->fixFile(Object(SplFileInfo), Object(PhpCsFixer\Linter\ProcessLintingResult))
    #5 E:\PHP-CS-Fixer\src\Console\Command\FixCommand.php(219): PhpCsFixer\Runner\Runner->fix()
    #6 E:\PHP-CS-Fixer\vendor\symfony\console\Command\Command.php(255): PhpCsFixer\Console\Command\FixCommand->execute(Object(Symfony\Component\Console\Input\ArgvInput), Object(Symfony\Component\Console\Output\ConsoleOutput))
    #7 E:\PHP-CS-Fixer\vendor\symfony\console\Application.php(982): Symfony\Component\Console\Command\Command->run(Object(Symfony\Component\Console\Input\ArgvInput), Object(Symfony\Component\Console\Output\ConsoleOutput))
    #8 E:\PHP-CS-Fixer\vendor\symfony\console\Application.php(255): Symfony\Component\Console\Application->doRunCommand(Object(PhpCsFixer\Console\Command\FixCommand), Object(Symfony\Component\Console\Input\ArgvInput), Object(Symfony\Component\Console\Output\ConsoleOutput))
    #9 E:\PHP-CS-Fixer\src\Console\Application.php(84): Symfony\Component\Console\Application->doRun(Object(Symfony\Component\Console\Input\ArgvInput), Object(Symfony\Component\Console\Output\ConsoleOutput))
    #10 E:\PHP-CS-Fixer\vendor\symfony\console\Application.php(148): PhpCsFixer\Console\Application->doRun(Object(Symfony\Component\Console\Input\ArgvInput), Object(Symfony\Component\Console\Output\ConsoleOutput))
    #11 E:\PHP-CS-Fixer\php-cs-fixer(101): Symfony\Component\Console\Application->run()
    #12 {main}
    +

    核心就是给后面加入token
    // todo

    + + +
    + + + + +
    +
    +
    +
    + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    如何创建网桥

    创建网桥,可以通过bridge-utils包的brctl来创建一个网桥

    +
    1
    $sudo brctl addbr br0
    +

    然后通过brctl show可以看到列出的网桥

    +
    1
    2
    3
    4
    $brctl  show
    bridge name bridge id STP enabled interfaces
    br0 8000.000000000000 no

    + +

    通过strace查看系统调用

    +
    1
    $sudo strace  brctl addbr br1
    + + +

    输出

    +
    1
    2
    3
    4
    5
    ubuntu@VM-0-3-ubuntu:~/libnlbuild/bin$ sudo strace  brctl addbr br1
    ...
    socket(AF_UNIX, SOCK_STREAM, 0) = 3
    ioctl(3, SIOCBRADDBR, "br1") = 0
    +++ exited with 0 +++
    +

    看到调用

    +
    1
    ioctl(3, SIOCBRADDBR, "br1") 
    +

    3 指的是打开的文件描述符.0,1,2都是特殊的标准输入输出错误等的文件描述符,所以下一个打开的文件就是3

    +

    我写的一个创建网桥的小例子

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    //  bradd.c
    #include <linux/sockios.h>
    #include <sys/types.h>
    #include <sys/socket.h>
    #include <sys/un.h>
    #include <fcntl.h>
    #include <sys/ioctl.h>
    #include <stdio.h>
    int main(){
    int br_socket_fd,ret;
    if(br_socket_fd = socket(AF_LOCAL, SOCK_STREAM, 0) < 0){
    perror("Error: ");
    }
    if(ret = ioctl(br_socket_fd, SIOCBRADDBR,"hello") < 0) // SIOCBRADDBR 由sockios.h 引入
    {
    perror("ioctl error");
    }
    return 0;
    }
    + +
    1
    2
    3
    $gcc bradd.c -o 
    ## 需要使用sudo添加网桥
    $sudo ./bradd
    + +

    然后用brctl show 输出,创建了一个叫hello的网桥:

    +
    1
    2
    3
    4
    $ brctl show
    bridge name bridge id STP enabled interfaces
    docker0 8000.024273119fd1 no vethe6cf6a0
    hello 8000.000000000000 no
    + +

    然后我们发现了docker0hello两个网桥相差一个interfaces,我们如何添加veth呢?

    +
      +
    • 在brctl 中可以使用brctl addif
      1
      2
      3
      4
      5
      6
      7
      8
      9
      10
      11
      int br_add_interface(const char *bridge, const char *dev)
      {
      struct ifreq ifr;
      ...
      int ifindex = if_nametoindex(dev);
      ...
      strncpy(ifr.ifr_name, bridge, IFNAMSIZ);
      ifr.ifr_ifindex = ifindex;
      err = ioctl(br_socket_fd, SIOCBRADDIF, &ifr);
      ...
      }
      +最后调用linux 的net/bridge/br_if.c:
    • +
    +

    // dev 是我们要添加的设备
    // br 是我们的网桥

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    38
    39
    40
    41
    42
    43
    44
    45
    46
    47
    48
    49
    50
    51
    52
    53
    54
    55
    56
    57
    58
    59
    60
    61
    62
    63
    64
    65
    66
    67
    68
    69
    70
    71
    72
    73
    74
    75
    76
    77
    78
    79
    80
    81
    82
    83
    84
    85
    86
    87
    88
    89
    90
    91
    92
    93
    94
    95
    96
    97
    98
    99
    100
    101
    102
    103
    /* called with RTNL */
    int br_add_if(struct net_bridge *br, struct net_device *dev,
    struct netlink_ext_ack *extack)
    {
    struct net_bridge_port *p;
    int err = 0;
    unsigned br_hr, dev_hr;
    bool changed_addr;

    ...
    p = new_nbp(br, dev);
    if (IS_ERR(p))
    return PTR_ERR(p);

    call_netdevice_notifiers(NETDEV_JOIN, dev);

    err = dev_set_allmulti(dev, 1);
    if (err) {
    kfree(p); /* kobject not yet init'd, manually free */
    goto err1;
    }

    err = kobject_init_and_add(&p->kobj, &brport_ktype, &(dev->dev.kobj),
    SYSFS_BRIDGE_PORT_ATTR);
    if (err)
    goto err2;

    err = br_sysfs_addif(p);
    if (err)
    goto err2;

    err = br_netpoll_enable(p);
    if (err)
    goto err3;

    err = netdev_rx_handler_register(dev, br_handle_frame, p);
    if (err)
    goto err4;

    dev->priv_flags |= IFF_BRIDGE_PORT;

    err = netdev_master_upper_dev_link(dev, br->dev, NULL, NULL, extack);
    if (err)
    goto err5;

    err = nbp_switchdev_mark_set(p);
    if (err)
    goto err6;

    dev_disable_lro(dev);

    list_add_rcu(&p->list, &br->port_list);

    nbp_update_port_count(br);

    netdev_update_features(br->dev);

    br_hr = br->dev->needed_headroom;
    dev_hr = netdev_get_fwd_headroom(dev);
    if (br_hr < dev_hr)
    update_headroom(br, dev_hr);
    else
    netdev_set_rx_headroom(dev, br_hr);

    if (br_fdb_insert(br, p, dev->dev_addr, 0))
    netdev_err(dev, "failed insert local address bridge forwarding table\n");

    if (br->dev->addr_assign_type != NET_ADDR_SET) {
    /* Ask for permission to use this MAC address now, even if we
    * don't end up choosing it below.
    */
    err = dev_pre_changeaddr_notify(br->dev, dev->dev_addr, extack);
    if (err)
    goto err7;
    }

    err = nbp_vlan_init(p, extack);
    if (err) {
    netdev_err(dev, "failed to initialize vlan filtering on this port\n");
    goto err7;
    }

    spin_lock_bh(&br->lock);
    changed_addr = br_stp_recalculate_bridge_id(br);

    if (netif_running(dev) && netif_oper_up(dev) &&
    (br->dev->flags & IFF_UP))
    br_stp_enable_port(p);
    spin_unlock_bh(&br->lock);

    br_ifinfo_notify(RTM_NEWLINK, NULL, p);

    if (changed_addr)
    call_netdevice_notifiers(NETDEV_CHANGEADDR, br->dev);

    br_mtu_auto_adjust(br);
    br_set_gso_limits(br);

    kobject_uevent(&p->kobj, KOBJ_ADD);

    return 0;
    ...
    }
    + +

    添加虚拟设备:

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    # strace  ip link add vethaaa type veth peer name vethbbb
    execve("/sbin/ip", ["ip", "link", "add", "vethaaa", "type", "veth", "peer", "name", "vethbbb"], 0x7ffed8af30f0 /* 23 vars */)
    ...
    socket(AF_NETLINK, SOCK_RAW|SOCK_CLOEXEC, NETLINK_ROUTE) = 3
    setsockopt(3, SOL_SOCKET, SO_SNDBUF, [32768], 4) = 0
    setsockopt(3, SOL_SOCKET, SO_RCVBUF, [1048576], 4) = 0
    setsockopt(3, SOL_NETLINK, NETLINK_EXT_ACK, [1], 4) = 0
    bind(3, {sa_family=AF_NETLINK, nl_pid=0, nl_groups=00000000}, 12) = 0
    getsockname(3, {sa_family=AF_NETLINK, nl_pid=26226, nl_groups=00000000}, [12]) = 0
    sendto(3, {{len=32, type=RTM_NEWLINK, flags=NLM_F_REQUEST|NLM_F_ACK, seq=0, pid=0}, {ifi_family=AF_UNSPEC, ifi_type=ARPHRD_NETROM, ifi_index=0, ifi_flags=0, ifi_change=0}}, 32, 0, NULL, 0) = 32
    recvmsg(3, {msg_name={sa_family=AF_NETLINK, nl_pid=0, nl_groups=00000000}, msg_namelen=12, msg_iov=[{iov_base={{len=52, type=NLMSG_ERROR, flags=0, seq=0, pid=26226}, {error=-ENODEV, msg={{len=32, type=RTM_NEWLINK, flags=NLM_F_REQUEST|NLM_F_ACK, seq=0, pid=0}, {ifi_family=AF_UNSPEC, ifi_type=ARPHRD_NETROM, ifi_index=0, ifi_flags=0, ifi_change=0}}}}, iov_len=16384}], msg_iovlen=1, msg_controllen=0, msg_flags=0}, 0) = 52
    access("/proc/net", R_OK) = 0
    access("/proc/net/unix", R_OK) = 0
    socket(AF_UNIX, SOCK_DGRAM|SOCK_CLOEXEC, 0) = 4
    ioctl(4, SIOCGIFINDEX, {ifr_name="vethaaa"}) = -1 ENODEV (No such device)
    close(4) = 0
    brk(NULL) = 0x560e12455000
    brk(0x560e12476000) = 0x560e12476000
    openat(AT_FDCWD, "/usr/lib/ip/link_veth.so", O_RDONLY|O_CLOEXEC) = -1 ENOENT (No such file or directory)
    sendmsg(3, {msg_name={sa_family=AF_NETLINK, nl_pid=0, nl_groups=00000000}, msg_namelen=12, msg_iov=[{iov_base={{len=92, type=RTM_NEWLINK, flags=NLM_F_REQUEST|NLM_F_ACK|NLM_F_EXCL|NLM_F_CREATE, seq=1576836139, pid=0}, {ifi_family=AF_UNSPEC, ifi_type=ARPHRD_NETROM, ifi_index=0, ifi_flags=0, ifi_change=0}, [{{nla_len=12, nla_type=IFLA_IFNAME}, "vethaaa"}, {{nla_len=48, nla_type=IFLA_LINKINFO}, [{{nla_len=8, nla_type=IFLA_INFO_KIND}, "veth"...}, {{nla_len=36, nla_type=IFLA_INFO_DATA}, "\x20\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0c\x00\x03\x00\x76\x65\x74\x68\x62\x62\x62\x00"}]}]}, iov_len=92}], msg_iovlen=1, msg_controllen=0, msg_flags=0}, 0) = 92
    recvmsg(3, {msg_name={sa_family=AF_NETLINK, nl_pid=0, nl_groups=00000000}, msg_namelen=12, msg_iov=[{iov_base=NULL, iov_len=0}], msg_iovlen=1, msg_controllen=0, msg_flags=MSG_TRUNC}, MSG_PEEK|MSG_TRUNC) = 36
    recvmsg(3, {msg_name={sa_family=AF_NETLINK, nl_pid=0, nl_groups=00000000}, msg_namelen=12, msg_iov=[{iov_base={{len=36, type=NLMSG_ERROR, flags=NLM_F_CAPPED, seq=1576836139, pid=26226}, {error=0, msg={len=92, type=RTM_NEWLINK, flags=NLM_F_REQUEST|NLM_F_ACK|NLM_F_EXCL|NLM_F_CREATE, seq=1576836139, pid=0}}}, iov_len=36}], msg_iovlen=1, msg_controllen=0, msg_flags=0}, 0) = 36

    + + +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    socket(AF_NETLINK, SOCK_RAW|SOCK_CLOEXEC, NETLINK_ROUTE) = 3
    setsockopt(3, SOL_SOCKET, SO_SNDBUF, [32768], 4) = 0
    setsockopt(3, SOL_SOCKET, SO_RCVBUF, [1048576], 4) = 0
    setsockopt(3, SOL_NETLINK, NETLINK_EXT_ACK, [1], 4) = 0
    bind(3, {sa_family=AF_NETLINK, nl_pid=0, nl_groups=00000000}, 12) = 0
    getsockname(3, {sa_family=AF_NETLINK, nl_pid=18263, nl_groups=00000000}, [12]) = 0
    sendto(3, {{len=32, type=RTM_NEWLINK, flags=NLM_F_REQUEST|NLM_F_ACK, seq=0, pid=0}, {ifi_family=AF_UNSPEC, ifi_type=ARPHRD_NETROM, ifi_index=0, ifi_flags=0, ifi_change=0}}, 32, 0, NULL, 0) = 32
    recvmsg(3, {msg_name={sa_family=AF_NETLINK, nl_pid=0, nl_groups=00000000}, msg_namelen=12, msg_iov=[{iov_base={{len=52, type=NLMSG_ERROR, flags=0, seq=0, pid=18263}, {error=-EPERM, msg={{len=32, type=RTM_NEWLINK, flags=NLM_F_REQUEST|NLM_F_ACK, seq=0, pid=0}, {ifi_family=AF_UNSPEC, ifi_type=ARPHRD_NETROM, ifi_index=0, ifi_flags=0, ifi_change=0}}}}, iov_len=16384}], msg_iovlen=1, msg_controllen=0, msg_flags=0}, 0) = 52
    access("/proc/net", R_OK) = 0
    access("/proc/net/unix", R_OK) = 0
    socket(AF_UNIX, SOCK_DGRAM|SOCK_CLOEXEC, 0) = 4
    ioctl(4, SIOCGIFINDEX, {ifr_name="p1"}) = -1 ENODEV (No such device)
    close(4) = 0
    brk(NULL) = 0x5595d01bb000
    brk(0x5595d01dc000) = 0x5595d01dc000
    openat(AT_FDCWD, "/usr/lib/ip/link_veth.so", O_RDONLY|O_CLOEXEC) = -1 ENOENT (No such file or directory)
    sendmsg(3, {msg_name={sa_family=AF_NETLINK, nl_pid=0, nl_groups=00000000}, msg_namelen=12, msg_iov=[{iov_base={{len=84, type=RTM_NEWLINK, flags=NLM_F_REQUEST|NLM_F_ACK|NLM_F_EXCL|NLM_F_CREATE, seq=1576748752, pid=0}, {ifi_family=AF_UNSPEC, ifi_type=ARPHRD_NETROM, ifi_index=0, ifi_flags=0, ifi_change=0}, [{{nla_len=7, nla_type=IFLA_IFNAME}, "p1"}, {{nla_len=44, nla_type=IFLA_LINKINFO}, [{{nla_len=8, nla_type=IFLA_INFO_KIND}, "veth"...}, {{nla_len=32, nla_type=IFLA_INFO_DATA}, "\x1c\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x07\x00\x03\x00\x70\x32\x00\x00"}]}]}, iov_len=84}], msg_iovlen=1, msg_controllen=0, msg_flags=0}, 0) = 84
    recvmsg(3, {msg_name={sa_family=AF_NETLINK, nl_pid=0, nl_groups=00000000}, msg_namelen=12, msg_iov=[{iov_base=NULL, iov_len=0}], msg_iovlen=1, msg_controllen=0, msg_flags=MSG_TRUNC}, MSG_PEEK|MSG_TRUNC) = 104
    recvmsg(3, {msg_name={sa_family=AF_NETLINK, nl_pid=0, nl_groups=00000000}, msg_namelen=12, msg_iov=[{iov_base={{len=104, type=NLMSG_ERROR, flags=0, seq=1576748752, pid=18263}, {error=-EPERM, msg={{len=84, type=RTM_NEWLINK, flags=NLM_F_REQUEST|NLM_F_ACK|NLM_F_EXCL|NLM_F_CREATE, seq=1576748752, pid=0}, {ifi_family=AF_UNSPEC, ifi_type=ARPHRD_NETROM, ifi_index=0, ifi_flags=0, ifi_change=0}, [{{nla_len=7, nla_type=IFLA_IFNAME}, "p1"}, {{nla_len=44, nla_type=IFLA_LINKINFO}, [{{nla_len=8, nla_type=IFLA_INFO_KIND}, "veth"...}, {{nla_len=32, nla_type=IFLA_INFO_DATA}, "\x1c\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x07\x00\x03\x00\x70\x32\x00\x00"}]}]}}}, iov_len=104}], msg_iovlen=1, msg_controllen=0, msg_flags=0}, 0) = 104
    write(2, "RTNETLINK answers: Operation not"..., 43RTNETLINK answers: Operation not permitted
    ) = 43
    exit_group(2) = ?
    +++ exited with 2 +++

    + +

    linux 相关的netlink veth内容:

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    // drivers\net\veth.c
    static struct rtnl_link_ops veth_link_ops = {
    .kind = DRV_NAME,
    .priv_size = sizeof(struct veth_priv),
    .setup = veth_setup,
    .validate = veth_validate,
    .newlink = veth_newlink,
    .dellink = veth_dellink,
    .policy = veth_policy,
    .maxtype = VETH_INFO_MAX,
    .get_link_net = veth_get_link_net,
    };
    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    E:\linux-master\net\netlink\af_netlink.c
    static const struct proto_ops netlink_ops = {
    .family = PF_NETLINK,
    .owner = THIS_MODULE,
    .release = netlink_release,
    .bind = netlink_bind,
    .connect = netlink_connect,
    .socketpair = sock_no_socketpair,
    .accept = sock_no_accept,
    .getname = netlink_getname,
    .poll = datagram_poll,
    .ioctl = netlink_ioctl,
    .listen = sock_no_listen,
    .shutdown = sock_no_shutdown,
    .setsockopt = netlink_setsockopt,
    .getsockopt = netlink_getsockopt,
    .sendmsg = netlink_sendmsg,
    .recvmsg = netlink_recvmsg,
    .mmap = sock_no_mmap,
    .sendpage = sock_no_sendpage,
    };
    + + +

    添加veth 设备

    首先是添加socket

    +
    1
    2
    3
    4
    (gdb) bt
    #0 socket () at ../sysdeps/unix/syscall-template.S:78
    #1 0x00005555555b60c7 in rtnl_open_byproto (rth=0x5555557d8020 <rth>, subscriptions=0, protocol=<optimized out>) at libnetlink.c:194
    #2 0x000055555555f956 in main (argc=9, argv=0x7fffffffe548) at ip.c:308
    + +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    Breakpoint 6, __libc_sendmsg (fd=3, msg=msg@entry=0x7fffffffdd70, flags=flags@entry=0) at ../sysdeps/unix/sysv/linux/sendmsg.c:28
    28 ../sysdeps/unix/sysv/linux/sendmsg.c: No such file or directory.
    (gdb) bt
    #0 __libc_sendmsg (fd=3, msg=msg@entry=0x7fffffffdd70, flags=flags@entry=0) at ../sysdeps/unix/sysv/linux/sendmsg.c:28
    #1 0x00005555555b5c8f in __rtnl_talk_iov (rtnl=0x5555557d8020 <rth>, iov=iov@entry=0x7fffffffddf0, iovlen=iovlen@entry=1, answer=answer@entry=0x0, show_rtnl_err=show_rtnl_err@entry=true,
    errfn=0x0) at libnetlink.c:887
    #2 0x00005555555b7225 in __rtnl_talk (errfn=0x0, show_rtnl_err=true, answer=<optimized out>, n=0x7fffffffde40, rtnl=<optimized out>) at libnetlink.c:1000
    #3 rtnl_talk (rtnl=<optimized out>, n=n@entry=0x7fffffffde40, answer=answer@entry=0x0) at libnetlink.c:1006
    #4 0x000055555557bc6e in iplink_modify (cmd=cmd@entry=16, flags=flags@entry=1536, argc=3, argc@entry=6, argv=<optimized out>, argv@entry=0x7fffffffe560) at iplink.c:1084
    #5 0x000055555557c0c6 in do_iplink (argc=7, argv=0x7fffffffe558) at iplink.c:1641
    #6 0x000055555555ff0c in do_cmd (argv0=0x7fffffffe7d8 "link", argc=8, argv=0x7fffffffe550) at ip.c:113
    #7 0x000055555555f9a0 in main (argc=9, argv=0x7fffffffe548) at ip.c:317
    + +

    比如命令ip link add veth_0 type veth peer name veth_0_peer
    初始化的时候req.n 的长度是32

    +
    1
    2
     p req.n.nlmsg_len 
    $1 = 32
    +

    经过ret = iplink_parse(argc, argv, &req, &type); 后变成44,

    +
    1
    2
    (gdb) p ((char *)n)[32]@64
    $50 = "\v\000\003\000veth_0\000\000\064\000\022\000\b\000\001\000veth(\000\002\000$\000\001", '\000' <repeats 17 times>, "\020\000\003\000veth_0_peer"
    +

    iptables是什么?

    1
    2
    # type iptables
    iptables is hashed (/sbin/iptables)
    + +

    iptables命令为什么可以处理那些问题呢?

    +

    iptable原理

    iptable就是通过socket netlink做特别的通信,改变netfilter子系统的相关hook

    +

    源码
    相关阅读

    +

    相关阅读

    + + +
    + + + + +
    +
    +
    +
    + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    异常例子

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    import java.io.*;
    public class className
    {
    public void deposit(double amount) throws RemoteException
    {
    // Method implementation
    throw new RemoteException();
    }
    //Remainder of class definition
    }
    + +

    异常的实质是什么?

    实质就是一个获取堆栈的类,这个类特别的地方在于可以获取堆栈,核心也在于获取堆栈和捕获异常

    +

    checked exception

    来源

    +
    +

    The unchecked exception classes are the run-time exception classes and the error classes.

    +
    +
    +

    The checked exception classes are all exception classes other than the unchecked exception classes. That is, the checked exception classes are Throwable and all its subclasses other than RuntimeException and its subclasses and Error and its subclasses.

    +
    +

    uncheckded exception就是运行时异常类和error类,其他都是checked exception

    + + +
    + + + + +
    +
    +
    +
    + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    版本是java8

    +

    基本类型和对象

    java的string是什么呢?
    很明显是对象

    +

    特化的+

    +

    15.18.1. String Concatenation Operator +
    If only one operand expression is of type String, then string conversion (§5.1.11) is performed on the other operand to produce a string at run time.
    来源

    +
    +

    java的字符串连接符是+,而php的是.

    +

    stringbuilder

    string常量折叠

    由于上面提到的jls8中的String Concatenation Operator +提到相关内容,如果操作符中只有一个string类型的话,类型转换会发生在运行时.没有规定两个都是string的时候怎么处理,所以javac将他折叠了

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    /** If tree is a concatenation of string literals, replace it
    * by a single literal representing the concatenated string.
    */
    protected JCExpression foldStrings(JCExpression tree) {
    if (!allowStringFolding)
    return tree;
    ListBuffer<JCExpression> opStack = new ListBuffer<>();
    ListBuffer<JCLiteral> litBuf = new ListBuffer<>();
    boolean needsFolding = false;
    JCExpression curr = tree;
    while (true) {
    if (curr.hasTag(JCTree.Tag.PLUS)) {
    JCBinary op = (JCBinary)curr;
    needsFolding |= foldIfNeeded(op.rhs, litBuf, opStack, false);
    curr = op.lhs;
    } else {
    needsFolding |= foldIfNeeded(curr, litBuf, opStack, true);
    break; //last one!
    }
    }
    if (needsFolding) {
    List<JCExpression> ops = opStack.toList();
    JCExpression res = ops.head;
    for (JCExpression op : ops.tail) {
    res = F.at(op.getStartPosition()).Binary(optag(TokenKind.PLUS), res, op);
    storeEnd(res, getEndPos(op));
    }
    return res;
    } else {
    return tree;
    }
    +

    [foldStrings]https://github.com/openjdk/jdk/blob/6bab0f539fba8fb441697846347597b4a0ade428/src/jdk.compiler/share/classes/com/sun/tools/javac/parser/JavacParser.java#L950

    +

    相关阅读

    + + +
    + + + + +
    +
    +
    +
    + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    看到phpstorm的相关警告,经常会看到phpstorm会警告没有ext-json,我才最近发现composer.json会添加相关的扩展校验.

    +

    举个例子

    +
    1
    2
    3
    4
    5
    6
    7
    8
    "require": {
    "php": ">=5.4.0",
    "topthink/framework": "^5.0",
    "php-imap/php-imap": "~2.0",
    "phpoffice/phpspreadsheet": "^1.3",
    "hprose/hprose": "^2.0",
    "ext-json": "*" // 这就是解析require json 扩展
    },
    +

    这个就是校验是否含有json扩展,那么composer是怎么实现的呢?其实是通过extension_loaded这个函数取查看扩展版本的

    +

    实现是在composer的129行实现,通过extension_loaded获取扩展.

    +

    相关阅读

    + + +
    + + + + +
    +
    +
    +
    + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    写了很久的php的原生代码,对php相对路径自动加载这类坑的恐惧已经深入骨髓了.
    java和golang也有类似的内容,对于写java没多久的我来说,先记录一下

    +

    关于类名和classpath的关系,oracle的文档有相关的描述

    +
    +

    Class Path and Package Names

    +
    +
    +

    Java classes are organized into packages that are mapped to directories in the file system. But, unlike the file system, whenever you specify a package name, you specify the whole package name and never part of it. For example, the package name for java.awt.Button is always specified as java.awt.

    +
    +
    +

    For example, suppose you want the JRE to find a class named Cool.class in the package utility.myapp. If the path to that directory is /java/MyClasses/utility/myapp, then you would set the class path so that it contains /java/MyClasses. To run that application, you could use the following java command:

    +
    +
    +

    java -classpath /java/MyClasses utility.myapp.Cool
    When the application runs, the JVM uses the class path settings to find any other classes defined in the utility.myapp package that are used by the Cool class.

    +
    +
    +

    The entire package name is specified in the command. It is not possible, for example, to set the class path so it contains /java/MyClasses/utility and use the command java myapp.Cool. The class would not be found.

    +
    +

    You might wonder what defines the package name for a class. The answer is that the package name is part of the class and cannot be modified, except by recompiling the class.

    +

    首先说编译:
    查看javac 的帮助:

    +
    1
    2
    javac --help
    Usage: javac <options> <source files>
    +

    javac 的参数是:
    javac+文件路径

    +

    举个例子:
    现在在com的上一级目录上

    +

    下面是HelloWorld.java的代码

    +
    1
    2
    3
    4
    5
    6
    7
    8
    package com.helloworld;

    public class HelloWorld
    {
    static public int m = 1;
    public int i = 1;

    }
    + +
    1
    2
    3
    4
    5
    6
    7
    8
    # ls 
    com
    # tree
    .
    └── com
    └── helloworld
    └── HelloWorld.java

    +

    我要怎么编译com/hellowrld/HelloWorld.java下面的文件呢?

    +

    这么编译就可以了:

    +
    1
    javac com/helloworld/*.java
    + +

    然后看一下目录树,在下面多了一个class文件

    +
    1
    2
    3
    4
    5
    6
    7
    # tree
    .
    └── com
    └── helloworld
    ├── HelloWorld.class
    └── HelloWorld.java

    +

    重新开始,我们看看-d 这个参数有什么用:

    +
    1
    2
    3
    4
    5
    6
    7
    # mkdir classes
    # tree
    .
    ├── classess
    └── com
    └── helloworld
    └── HelloWorld.java
    +

    那么我们编译之后会在设置的-d的目录里面添加相关目录:

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    # javac -d ./classes/    com/helloworld/*.java
    # tree
    .
    ├── classes
    │   └── com
    │   └── helloworld
    │   └── HelloWorld.class
    ├── classess
    └── com
    └── helloworld
    └── HelloWorld.java
    + + + + + +

    相关阅读

    + + +
    + + + + +
    +
    +
    +
    + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    抽象语法树(Abstract Syntax Tree)

    上一篇简单介绍了lex和yacc

    +

    这次主要是介绍构造一个抽象语法树

    +

    比如1+2+3构造成

    +
    1
    2

    1
    + + +
    + + + + +
    +
    +
    +
    + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    composer的psr4

    我这次主要是要描述composer的psr4自动加载相关内容.php有很多历史的包袱,所以需要做很多妥协,而namespace 以及自动加载也是.

    +

    include 和 require的大坑

    例子

    include 和require的区别什么的可能还是某些面试官的问题之一,但是include和require都有一个致命的大坑,include和require一个相对路径是相对于工作目录的.

    +

    举个例子.

    +

    当前在index.php 的目录中

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    # tree

    test
       ├── index.php
       ├── relative.php
       └── subdir
       ├── a.php
       └── relative.php

    + +

    index.php 的代码很简单,就是包含一个路径

    +
    1
    2
    <?php
    include "./subdir/a.php";
    +

    两个relative.php 文件分别输出自己的路径
    subdir/relative.php文件:

    +
    1
    2
    <?php
    echo "test/subdir/relative.php"
    + +

    relative.php文件:

    +
    1
    2
    <?php
    echo "test/relative.php";
    + +

    那么如果与index.php 同目录下会include哪一个呢?

    +

    答案是:

    +
    1
    2
    # php index.php 
    test/relative.php
    +

    include了与index.php 同一个目录下的relative.php 文件

    +

    而如果你在index.php的上一层目录执行,也就是test目录它甚至会报错

    +
    1
    2
    3
    php test/index.php 
    PHP Warning: include(./subdir/a.php): failed to open stream: No such file or directory in /root/test/index.php on line 2
    PHP Warning: include(): Failed opening './subdir/a.php' for inclusion (include_path='.:/usr/share/php') in /root/test/index.php on line 2
    + +

    这一切都是因为当是相对路径的时候,调用了getcwd()来获取工作目录,如果你使用shell的pwd话也可以看自己的工作目录.

    +

    由于这个比较坑的特性,php的代码如果手工使用include并且还使用了相对路径,那之后就非常难以维护了.所以我们需要尽量减少使用include相对路径,因为你知道的原因,你一旦写了一个相对路径,总会有后人copy and paste你的代码,然后把这个include也复制进去了,而这就是下一个屎坑的开始.

    +

    所以,自动加载可以减缓这种大坑的产生,因为他可以减少手工include相对路径的风险,因为他们往往会这样include文件include __DIR__ . 'aaa/bbb/ccc.php',由于不是相对路径,所以会好很多.

    +

    CLI模式与CGI/FASTCGI工作目录的不同

    CLI SAPI 不会将当前目录改为已运行的脚本所在的目录。

    +

    以下范例显示了本模块与 CGI SAPI 模块之间的不同:

    +
    1
    2
    3
    4
    <?php
    // 名为 test.php 的简单测试程序
    echo getcwd(), "\n";
    ?>
    +

    在使用 CGI 版本时,其输出为

    +
    1
    2
    3
    4
    5
    $ pwd
    /tmp

    $ php-cgi -f another_directory/test.php
    /tmp/another_directory
    +

    明显可以看到 PHP 将当前目录改成了刚刚运行过的脚本所在的目录。

    +

    使用 CLI SAPI 模式,得到:

    +
    1
    2
    3
    4
    5
    $ pwd
    /tmp

    $ php -q another_directory/test.php
    /tmp
    +

    include 和require 的opcode和getcwd

    require 和include 词法分析和语法分析后,生成opcode是73,ZEND_INCLUDE_OR_EVAL,在include或者require之后,如果是相对路径

    +

    最后会调用

    +
    1
    VCWD_GETCWD(cwd, MAXPATHLEN)
    +

    这个最后就是调用glibc 下面的getcwd

    +

    getcwd 系统调用

    每个进程task_struct会有fs_struct 结构,这个结构体会含有pwdroot,如果使用getcwd()这个函数,通过glibc会通过系统调用读取fs_struct的pwd属性并返回

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    static void get_fs_root_and_pwd_rcu(struct fs_struct *fs, struct path *root,
    struct path *pwd)
    {
    ...
    *root = fs->root;
    *pwd = fs->pwd;
    ...
    }
    SYSCALL_DEFINE2(getcwd, char __user *, buf, unsigned long, size)
    {
    int error;
    struct path pwd, root;
    char *page = __getname();

    if (!page)
    return -ENOMEM;

    rcu_read_lock();
    get_fs_root_and_pwd_rcu(current->fs, &root, &pwd); // 每个进程会关联一个fs_struct结构,fs_struct 结构有两个属性root和pwd描述了root目录和pwd目录

    char *cwd = page + PATH_MAX;
    int buflen = PATH_MAX;

    prepend(&cwd, &buflen, "\0", 1);
    error = prepend_path(&pwd, &root, &cwd, &buflen);
    ...
    copy_to_user(buf, cwd, len) // 将处理后的pwd 返回到用户态
    ...

    }
    + +

    include和require总结

    include以及require如果引入相对路径的文件,那么这个相对路径都是相对于getcwd(),也就是当前工作目录.

    +

    而cgi和cli模式又有不同

    +
      +
    • cli模式下的当前路径就是shell pwd的值
    • +
    • 而cgi 这个SAPI和cli这个CLI SAPI不一样的地方在于他会帮你切换一次工作目录到第一次运行的php文件的当前目录作为工作目录.
    • +
    +

    命名空间

    命名空间是什么?

    +

    其实就是一堆限定符.

    +

    为什么要有命名空间?
    因为我们要复用别人的代码,你想引用别人的一个库,别人库里写了个hello函数,你也写了个hello函数.这就麻烦了,所以引入命名空间,只要保证大家的命名空间不一样,那样就算大家都有相同的函数名,也不会冲突了.

    +

    自动加载

    开始说到自动加载了,自动加载.什么是自动加载呢?

    +

    其实就是动态include,或者叫做运行时include.

    +

    平时我们怎么include文件的呢?

    +

    就是手工include一堆文件,就像我刚才上面的例子一样.这样至少有两个风险:

    +
      +
    • 新手使用了相对路径include
    • +
    • 得手工引入,但是include会重复引入文件,得使用include_once 或者require_once
    • +
    +

    就风险而言,新手使用相对路径引入的危险是非常大的.重复引入只是会校验多一点有一点性能影响而言.

    +

    spl_autoload_register

    spl_autoload_*这一类的函数都是php自动加载的核心函数,实现自动加载则是依赖spl_autoload_register

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13

    /* {{{ proto bool spl_autoload_register([mixed autoload_function [, bool throw [, bool prepend]]])
    Register given function as __autoload() implementation */
    PHP_FUNCTION(spl_autoload_register)
    {

    ...

    if (zend_hash_add_mem(SPL_G(autoload_functions), lc_name, &alfi, sizeof(autoload_func_info)) == NULL) {
    ...
    }
    ...
    } /* }}} */
    +

    然后相关的调用会在zend_hash_exists(EG(class_table), lc_name) 判断是否在全局的EG(class_table) 里面
    下面的spl_autoload_call是一个例子

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    PHP_FUNCTION(spl_autoload_call)
    {

    if (SPL_G(autoload_functions)) { // spl_autoload_register 放进去的 SPL_G(autoload_functions)
    int l_autoload_running = SPL_G(autoload_running);
    SPL_G(autoload_running) = 1;
    lc_name = zend_string_alloc(Z_STRLEN_P(class_name), 0);
    zend_str_tolower_copy(ZSTR_VAL(lc_name), Z_STRVAL_P(class_name), Z_STRLEN_P(class_name));
    zend_hash_internal_pointer_reset_ex(SPL_G(autoload_functions), &pos);
    while (zend_hash_get_current_key_ex(SPL_G(autoload_functions), &func_name, &num_idx, &pos) == HASH_KEY_IS_STRING) { // 循环回调函数
    alfi = zend_hash_get_current_data_ptr_ex(SPL_G(autoload_functions), &pos);
    zend_call_method(Z_ISUNDEF(alfi->obj)? NULL : &alfi->obj, alfi->ce, &alfi->func_ptr, ZSTR_VAL(func_name), ZSTR_LEN(func_name), retval, 1, class_name, NULL); // 调用注册的回调函数

    if (zend_hash_exists(EG(class_table), lc_name)) { // 回调找到了类名,则跳出循环

    break;
    }
    zend_hash_move_forward_ex(SPL_G(autoload_functions), &pos);
    }
    ...
    }
    ..
    } /* }}} */
    +

    自动加载流程其实很简单
    自动加载的例子

    +
    1
    2
    3
    4
    5
    6
    7
    <?php
    // test.php
    spl_autoload_register(function ($class) {
    include "$class" . '.php';
    });
    $obj = new ClassA();

    +

    以及类ClassA.php

    +
    1
    2
    <?php
    class ClassA{}
    + +

    下面是堆栈

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    (gdb) bt
    #0 zif_spl_autoload_call (execute_data=0x7fffef61e0a0, return_value=0x7fffffffa2f0) at /home/dinosaur/Downloads/php-7.2.2/ext/spl/php_spl.c:393
    #1 0x0000000000932807 in zend_call_function (fci=0x7fffffffa330, fci_cache=0x7fffffffa300) at /home/dinosaur/Downloads/php-7.2.2/Zend/zend_execute_API.c:833
    #2 0x0000000000933000 in zend_lookup_class_ex (name=0x7fffe6920b58, key=0x7fffe70e63f0, use_autoload=1) at /home/dinosaur/Downloads/php-7.2.2/Zend/zend_execute_API.c:990
    #3 0x0000000000933dbd in zend_fetch_class_by_name (class_name=0x7fffe6920b58, key=0x7fffe70e63f0, fetch_type=512) at /home/dinosaur/Downloads/php-7.2.2/Zend/zend_execute_API.c:1425
    #4 0x00000000009b7e46 in ZEND_NEW_SPEC_CONST_HANDLER () at /home/dinosaur/Downloads/php-7.2.2/Zend/zend_vm_execute.h:3211
    #5 0x0000000000a380a4 in execute_ex (ex=0x7fffef61e030) at /home/dinosaur/Downloads/php-7.2.2/Zend/zend_vm_execute.h:59929
    #6 0x0000000000a3d0ab in zend_execute (op_array=0x7fffef683300, return_value=0x0) at /home/dinosaur/Downloads/php-7.2.2/Zend/zend_vm_execute.h:63760
    #7 0x000000000094cd22 in zend_execute_scripts (type=8, retval=0x0, file_count=3) at /home/dinosaur/Downloads/php-7.2.2/Zend/zend.c:1496
    #8 0x00000000008b0b4a in php_execute_script (primary_file=0x7fffffffcaa0) at /home/dinosaur/Downloads/php-7.2.2/main/main.c:2590
    #9 0x0000000000a3fd23 in do_cli (argc=2, argv=0x1441a60) at /home/dinosaur/Downloads/php-7.2.2/sapi/cli/php_cli.c:1011
    #10 0x0000000000a40ee0 in main (argc=2, argv=0x1441a60) at /home/dinosaur/Downloads/php-7.2.2/sapi/cli/php_cli.c:1404

    + +

    所以整个自动加载的核心流程就是在查找类的时候会去调用spl_autoload_call,这个函数则会回调注册的自动加载函数,直到遍历所有的回调函数都没有找到或者在某个遍历的时候找到了直接返回。

    +

    psr规范与psr4

    psrPHP Standards Recommendations的简称,而psr4和psr0有都是和自动加载相关的内容.

    +

    其实就是规定了一个简单的替换

    +
    1
    \Aura\Web\Response\Status	Aura\Web	/path/to/aura-web/src/	/path/to/aura-web/src/Response/Status.php
    +

    psr4规定了我们如何去加载一个文件: 将完全限定名用前缀地址替换,后面则是后面的文件.
    举个例子:
    你要加载的类是:

    +
    1
    \Aura\Web\Response\Status
    +

    那么你可以使用Aura\Web 映射/path/to/aura-web/src/,那么类\Aura\Web\Response\Status就会去/path/to/aura-web/src/Response/Status.php文件找

    +

    可以说有点像nginx的路由配置:
    下面是nginx的配置

    +
    1
    2
    3
    location ^~ /images/ {
        # 匹配任何已 /images/ 开头的任何查询并且停止搜索。任何正则表达式将不会被测试。
    }
    +

    那么上面的\Aura\Web\Response\Status的psr4 有点像这样:

    +
    1
    2
    3
    location ^~ /Aura/Web/ {
        root /path/to/aura-web/src/;
    }
    + + +

    相关阅读

    + + +
    + + + + +
    +
    +
    +
    + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    php7 异常、错误以及相关坑

    php 的坑非常之多,有高低版本的,有历史包袱类的。也有与其他语言不一致导致的知识迁移导致的坑。

    +

    前置知识

    throwable

    +
    +

    PHP 7 changes how most errors are reported by PHP. Instead of reporting errors through the traditional error reporting mechanism used by PHP 5, most errors are now reported by throwing Error exceptions.

    +
    +
    +

    (人肉机翻)php 7 改变了php大多数的errors的警告提示方式。和php 5 传统的error reporting 机制不同,php 的大多数错误通过抛出错误异常来警告提示。

    +
    +

    填坑开始

    例子1

      +
    • php 版本7,除以0的错误会变成异常
      1
      2
      3
      4
      5
      6
      7
      8
      <?php
      // test.php
      try {
      echo 1%0;
      } catch (DivisionByZeroError $e) {
      echo "bbb";
      }
      ?>
      +然后执行
      1
      2
      php test.php 
      bbb
      +输出bbb ,也就是被try catch 住了。
    • +
    +

    那么我们先看php 是怎么catch 住这个错误的

    +

    堆栈如下:

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    Breakpoint 1, zend_throw_exception_ex (exception_ce=0x14cfe70, code=0, format=0x1087ea4 "Modulo by zero") at /home/dinosaur/Downloads/php-7.2.2/Zend/zend_exceptions.c:913
    913 {
    (gdb) bt
    #0 zend_throw_exception_ex (exception_ce=0x14cfe70, code=0, format=0x1087ea4 "Modulo by zero") at /home/dinosaur/Downloads/php-7.2.2/Zend/zend_exceptions.c:913
    #1 0x00000000009b9feb in ZEND_MOD_SPEC_CONST_CONST_HANDLER () at /home/dinosaur/Downloads/php-7.2.2/Zend/zend_vm_execute.h:4270
    #2 0x0000000000a381e4 in execute_ex (ex=0x7fffef61e030) at /home/dinosaur/Downloads/php-7.2.2/Zend/zend_vm_execute.h:59989
    #3 0x0000000000a3d0ab in zend_execute (op_array=0x7fffef684300, return_value=0x0) at /home/dinosaur/Downloads/php-7.2.2/Zend/zend_vm_execute.h:63760
    #4 0x000000000094cd22 in zend_execute_scripts (type=8, retval=0x0, file_count=3) at /home/dinosaur/Downloads/php-7.2.2/Zend/zend.c:1496
    #5 0x00000000008b0b4a in php_execute_script (primary_file=0x7fffffffca10) at /home/dinosaur/Downloads/php-7.2.2/main/main.c:2590
    #6 0x0000000000a3fd23 in do_cli (argc=2, argv=0x1441f40) at /home/dinosaur/Downloads/php-7.2.2/sapi/cli/php_cli.c:1011
    #7 0x0000000000a40ee0 in main (argc=2, argv=0x1441f40) at /home/dinosaur/Downloads/php-7.2.2/sapi/cli/php_cli.c:1404
    +

    相关阅读

    +

    例子二

    php 版本7

    +
    1
    2
    3
    4
    5
    6
    7
    <?php
    try {
    echo 1/0; // 取余改成了除法
    } catch (DivisionByZeroError $e) {
    echo "bbb";
    }
    ?>
    + +

    输出

    +
    1
    2
    Warning: Division by zero in /home/dinosaur/test/test.php on line 3
    INF
    +

    发现了不一样了吗?

    +

    ① 抛了warning 没有被try catch 住

    +

    ② php 脚本继续执行,(并输出INF)

    +

    我们看看堆栈:

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    (gdb) bt
    #0 zend_error (type=2, format=0x107dcfc "Division by zero") at /home/dinosaur/Downloads/php-7.2.2/Zend/zend.c:1105
    #1 0x000000000093fb5b in div_function (result=0x7fffef61e090, op1=0x7fffe70e61c0, op2=0x7fffe70e61d0) at /home/dinosaur/Downloads/php-7.2.2/Zend/zend_operators.c:1173
    #2 0x00000000009a82a0 in fast_div_function (result=0x7fffef61e090, op1=0x7fffe70e61c0, op2=0x7fffe70e61d0) at /home/dinosaur/Downloads/php-7.2.2/Zend/zend_operators.h:738
    #3 0x00000000009b9f22 in ZEND_DIV_SPEC_CONST_CONST_HANDLER () at /home/dinosaur/Downloads/php-7.2.2/Zend/zend_vm_execute.h:4251
    #4 0x0000000000a381d4 in execute_ex (ex=0x7fffef61e030) at /home/dinosaur/Downloads/php-7.2.2/Zend/zend_vm_execute.h:59986
    #5 0x0000000000a3d0ab in zend_execute (op_array=0x7fffef684300, return_value=0x0) at /home/dinosaur/Downloads/php-7.2.2/Zend/zend_vm_execute.h:63760
    #6 0x000000000094cd22 in zend_execute_scripts (type=8, retval=0x0, file_count=3) at /home/dinosaur/Downloads/php-7.2.2/Zend/zend.c:1496
    #7 0x00000000008b0b4a in php_execute_script (primary_file=0x7fffffffca10) at /home/dinosaur/Downloads/php-7.2.2/main/main.c:2590
    #8 0x0000000000a3fd23 in do_cli (argc=2, argv=0x1441f40) at /home/dinosaur/Downloads/php-7.2.2/sapi/cli/php_cli.c:1011
    #9 0x0000000000a40ee0 in main (argc=2, argv=0x1441f40) at /home/dinosaur/Downloads/php-7.2.2/sapi/cli/php_cli.c:1404

    + +

    zend_error 翻到最底下就是write 系统调用了

    +
    1
    2
    3
    4
    5
    if (Z_LVAL_P(op2) == 0) {
    zend_error(E_WARNING, "Division by zero");
    ZVAL_DOUBLE(result, ((double) Z_LVAL_P(op1) / (double) Z_LVAL_P(op2)));
    return SUCCESS;
    }
    + +

    zend_error后就return 了,所以后面的程序可以继续执行

    +

    对比总结

    1/0 不会被抛出异常,会有warning 并继续执行 

    +

    坑点在于:

    +
      +
    • 不是所有的error都能被catch
    • +
    • 没有被catch 住的话会继续执行
    • +
    + + +
    + + + + +
    +
    +
    +
    + + + + + + + + + + +
    + + + + +
    + + + + + + + + +
    +
    + +
    + +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/page/25/index.html b/page/25/index.html new file mode 100644 index 0000000000..fc05060216 --- /dev/null +++ b/page/25/index.html @@ -0,0 +1,1472 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + dinosaur + + + + + + + + + + + + +
    +
    + +
    +
    + + + + + +
    + + + + + + + + +
    + +
    + +
    +
    + + +
    + + 0% +
    + + +
    +
    +
    + + +
    + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    词法分析

    lex主要是用来做词法分析用的,简单来说就是分词.
    每次调用yylex都会返回一个词,lucence的标准分词器也是用lex一类的包分好词的.
    Lucene的分词分好之后会构造倒排索引.

    +

    lex例子

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18

    %{
    %}
    %%

    end { ECHO ;return 2 ;}

    aaa {ECHO ;}

    .|\N {}

    %%
    int main(){
    yylex();
    }
    int yywrap(){
    return 1;
    }
    +

    然后执行

    +
    1
    lex test.lex
    + +

    语法分析

    语法分析是什么?

    +

    语法分析是一个特别的规则系统,或者说.语法分析是一个图灵机,可以表达正则表达式无法表达的内容

    +

    语法分析如何选择?
    语法分析的一个关键问题是如何在多个产生式中选择一个产生式,有且仅有一个产生式.

    +

    bison是yacc的gun版本
    和flex一样,bison也是分成3个部分,使用%%分割
    Linux下面开源的yacc版本为bison

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    ...定义段...

    %%

    ...规则段...

    %%

    ...用户子例程段...
    +

    第一个部分主要是c的相关声明和token声明,非终结符的声明等
    第二部分主要是产生式和语义动作
    第三部分则是执行的相关c函数

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    38
    39
    40
    41
    42
    43
    44
    45
    46
    47
    48
    49
    50
    51
    52
    53
    54
    55
    56
    57
    58
    59
    60
    61
    62
    63
    64
    65
    66
    /* Infix notation calculator--calc */

    %{
    #define YYSTYPE double
    #include <math.h>
    #include <stdio.h>
    %}

    /* BISON Declarations */
    %token NUM
    %left '-' '+'
    %left '*' '/'
    %left NEG /* negation--unary minus */
    %right '^' /* exponentiation */

    /* Grammar follows */
    %%
    input: /* empty string */
    | input line
    ;

    line: '\n'
    | exp '\n' { printf ("\t%.10g\n", $1); }
    ;

    exp: NUM { $$ = $1; }
    | exp '+' exp { $$ = $1 + $3; }
    | exp '-' exp { $$ = $1 - $3; }
    | exp '*' exp { $$ = $1 * $3; }
    | exp '/' exp { $$ = $1 / $3; }
    | '-' exp %prec NEG { $$ = -$2; }
    | exp '^' exp { $$ = pow ($1, $3); }
    | '(' exp ')' { $$ = $2; }
    ;
    %%
    #include <ctype.h>
    main ()
    {
    yyparse ();
    }
    yyerror (s) /* Called by yyparse on error */
    char *s;
    {
    printf ("%s\n", s);
    }

    yylex ()
    {
    int c;

    /* skip white space */
    while ((c = getchar ()) == ' ' || c == '\t')
    ;
    /* process numbers */
    if (c == '.' || isdigit (c))
    {
    ungetc (c, stdin);
    scanf ("%lf", &yylval);
    return NUM;
    }
    /* return end-of-file */
    if (c == EOF)
    return 0;
    /* return single chars */
    return c;
    }
    + +

    生成并编译

    +
    1
    2
    3
    4
    5
    bison bison parse.y
    gcc parse.tab.c -lm
    # ./a.out
    3+2
    5
    + + + + + +

    下面描述常用的变量的使用

    %token

    %token 放在定义段

    +
    1
    %token NUMBER
    +

    会在生成c文件的时候变成

    +
    1
    #define NUMBER 258 
    +

    所以可以理解%token是一种简写,可以减少#define的使用

    +
    YYSTYPE
    +

    In real parsers, the values of different symbols use different data types, e.g.,
    int and double for numeric symbols, char * for strings, and pointers to
    structures for higher level symbols. If you have multiple value types, you
    have to list all the value types used in a parser so that yacc can create a C
    union typedef called YYSTYPE to contain them. (Fortunately, yacc gives
    you a lot of help ensuring that you use the right value type for each
    symbol .)

    +
    +

    引用自lex & yacc

    +

    YYSTYPE 是一个类型的宏定义,目的是给终结符合非终结符确定类型的集合

    +

    %union 是YYSTYPE定义的简写

    +

    %token 是定义词素枚举值的简写

    +

    %type 是非终结符的类型定义的简写

    +
    1
    2
    3
    4
    5
    6
    %union {
    double dval;
    int vblno;
    }

    %token NUMBER
    +

    使用--defines参数生成头文件

    +
    1
    # bison --defines test.y
    +

    最后会生成如下的文件

    +
    1
    2
    3
    4
    5
    6
    7
    8
    enum yytokentype{
    NUMBER = 258
    };

    union YYSTYPE{
    double dval;
    int vblno;
    };
    + +

    如果给token 添加类型的话

    +
    1
    2
    3
    %token <vblno> NAME
    %token <dval> NUMBER
    %type <dval> expression
    +
    +

    In action code, yacc automatically qualifies symbol value references
    with the appropriate field’name, e.g., if the third symbol is a NUMBER,
    a reference to $3 acts like $3,dval.

    +
    +

    引用自lex & yacc

    +

    在语义动作的代码里面,如果第三个元素是NUMBER 的话, $3等价于$3.dval

    +

    相关阅读

    + + + +
    + + + + +
    +
    +
    +
    + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    最近找了个华为云的vps,想做个简单的网址,于是一番注册域名和http证书。

    +

    结过弄了很久发现居然访问不了。

    +

    其实原因是没有备案,我的证书配置是正常的。

    +

    排查过程

    通过curl 定位

    直接curl -v url可以看到详细的握手过程

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    ./curl https://gitlab.shakudada.xyz -v
    * STATE: INIT => CONNECT handle 0x1a23898; line 1491 (connection #-5000)
    * Added connection 0. The cache now contains 1 members
    * STATE: CONNECT => WAITRESOLVE handle 0x1a23898; line 1532 (connection #0)
    * Trying 139.9.222.124:443...
    * TCP_NODELAY set
    * STATE: WAITRESOLVE => WAITCONNECT handle 0x1a23898; line 1611 (connection #0)
    * Connected to gitlab.shakudada.xyz (139.9.222.124) port 443 (#0)
    * STATE: WAITCONNECT => SENDPROTOCONNECT handle 0x1a23898; line 1667 (connection #0)
    * Marked for [keep alive]: HTTP default
    * ALPN, offering http/1.1
    * Cipher selection: ALL:!EXPORT:!EXPORT40:!EXPORT56:!aNULL:!LOW:!RC4:@STRENGTH
    * successfully set certificate verify locations:
    * CAfile: /etc/ssl/certs/ca-certificates.crt
    CApath: none
    * TLSv1.2 (OUT), TLS header, Certificate Status (22):
    * TLSv1.2 (OUT), TLS handshake, Client hello (1):
    * STATE: SENDPROTOCONNECT => PROTOCONNECT handle 0x1a23898; line 1682 (connection #0)
    * error:140770FC:SSL routines:SSL23_GET_SERVER_HELLO:unknown protocol
    * Marked for [closure]: Failed HTTPS connection
    * multi_done
    * Closing connection 0
    * The cache now contains 0 members
    * Expire cleared (transfer 0x1a23898)
    curl: (35) error:140770FC:SSL routines:SSL23_GET_SERVER_HELLO:unknown protocol
    dinosaur@dinosaur-X550VXK:~/curl/mycurl/bin$
    + +

    client hello 后就失败了,所以直接tcpdump 看看包数据

    +
    1
    2
    tcpdump -i wlp3s0  host 139.9.222.124 and  port 443 -A -X

    +

    其中139.9.222.124就是我那没有备案的ip

    +

    以下是抓包,去掉了开始的tcp的三次握手

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    38
    39
    40
    41
    42
    43
    44
    45
    46
    47
    48
    49
    50
    51
    52
    53
    54
    55
    56
    57
    58
    59
    60
    61
    62
    63
    64
    65
    66
    67
    68
    69
    70
    71
    72
    73
    74
    75
    76
    77
    78
    79
    80
    81
    82
    83
    84
    85
    86
    87
    88
    89
    90
    91
    92
    22:22:23.280367 IP 192.168.1.106.33170 > 139.9.222.124.https: Flags [P.], seq 1:518, ack 1, win 229, options [nop,nop,TS val 434651673 ecr 3365805465], length 517
    0x0000: 4500 0239 15ab 4000 4006 f77b c0a8 016a E..9..@.@..{...j
    0x0010: 8b09 de7c 8192 01bb f6ab 981d 4d9a ceb8 ...|........M...
    0x0020: 8018 00e5 22f2 0000 0101 080a 19e8 4219 ....".........B.
    0x0030: c89e 1d99 1603 0102 0001 0001 fc03 0304 ................
    0x0040: 6e2a ea14 6844 e2e1 db8c 1ee3 3582 e33f n*..hD......5..?
    0x0050: 9128 2ad2 cd1c bac2 1e70 dd4f 6587 d700 .(*......p.Oe...
    0x0060: 009e c030 c02c c028 c024 c014 c00a 00a5 ...0.,.(.$......
    0x0070: 00a3 00a1 009f 006b 006a 0069 0068 0039 .......k.j.i.h.9
    0x0080: 0038 0037 0036 0088 0087 0086 0085 c032 .8.7.6.........2
    0x0090: c02e c02a c026 c00f c005 009d 003d 0035 ...*.&.......=.5
    0x00a0: 0084 c02f c02b c027 c023 c013 c009 00a4 .../.+.'.#......
    0x00b0: 00a2 00a0 009e 0067 0040 003f 003e 0033 .......g.@.?.>.3
    0x00c0: 0032 0031 0030 009a 0099 0098 0097 0045 .2.1.0.........E
    0x00d0: 0044 0043 0042 c031 c02d c029 c025 c00e .D.C.B.1.-.).%..
    0x00e0: c004 009c 003c 002f 0096 0041 c012 c008 .....<./...A....
    0x00f0: 0016 0013 0010 000d c00d c003 000a 00ff ................
    0x0100: 0100 0135 0000 0019 0017 0000 1467 6974 ...5.........git
    0x0110: 6c61 622e 7368 616b 7564 6164 612e 7879 lab.shakudada.xy
    0x0120: 7a00 0b00 0403 0001 0200 0a00 1c00 1a00 z...............
    0x0130: 1700 1900 1c00 1b00 1800 1a00 1600 0e00 ................
    0x0140: 0d00 0b00 0c00 0900 0a00 0d00 2000 1e06 ................
    0x0150: 0106 0206 0305 0105 0205 0304 0104 0204 ................
    0x0160: 0303 0103 0203 0302 0102 0202 0300 0f00 ................
    0x0170: 0101 3374 0000 0010 000b 0009 0868 7474 ..3t.........htt
    0x0180: 702f 312e 3100 1500 b000 0000 0000 0000 p/1.1...........
    0x0190: 0000 0000 0000 0000 0000 0000 0000 0000 ................
    0x01a0: 0000 0000 0000 0000 0000 0000 0000 0000 ................
    0x01b0: 0000 0000 0000 0000 0000 0000 0000 0000 ................
    0x01c0: 0000 0000 0000 0000 0000 0000 0000 0000 ................
    0x01d0: 0000 0000 0000 0000 0000 0000 0000 0000 ................
    0x01e0: 0000 0000 0000 0000 0000 0000 0000 0000 ................
    0x01f0: 0000 0000 0000 0000 0000 0000 0000 0000 ................
    0x0200: 0000 0000 0000 0000 0000 0000 0000 0000 ................
    0x0210: 0000 0000 0000 0000 0000 0000 0000 0000 ................
    0x0220: 0000 0000 0000 0000 0000 0000 0000 0000 ................
    0x0230: 0000 0000 0000 0000 00 .........
    22:22:23.292507 IP 139.9.222.124.https > 192.168.1.106.33170: Flags [FP.], seq 1:650, ack 518, win 8192, length 649
    0x0000: 4500 02b1 15ac 4000 f606 4102 8b09 de7c E.....@...A....|
    0x0010: c0a8 016a 01bb 8192 4d9a ceb8 f6ab 9a22 ...j....M......"
    0x0020: 5019 2000 d25b 0000 4854 5450 2f31 2e31 P....[..HTTP/1.1
    0x0030: 2034 3033 2046 6f72 6269 6464 656e 0a43 .403.Forbidden.C
    0x0040: 6f6e 7465 6e74 2d54 7970 653a 2074 6578 ontent-Type:.tex
    0x0050: 742f 6874 6d6c 3b20 6368 6172 7365 743d t/html;.charset=
    0x0060: 7574 662d 380a 5365 7276 6572 3a20 4144 utf-8.Server:.AD
    0x0070: 4d2f 322e 312e 310a 436f 6e6e 6563 7469 M/2.1.1.Connecti
    0x0080: 6f6e 3a20 636c 6f73 650a 436f 6e74 656e on:.close.Conten
    0x0090: 742d 4c65 6e67 7468 3a20 3533 300a 0a3c t-Length:.530..<
    0x00a0: 6874 6d6c 3e0a 3c68 6561 643e 0a3c 6d65 html>.<head>.<me
    0x00b0: 7461 2068 7474 702d 6571 7569 763d 2243 ta.http-equiv="C
    0x00c0: 6f6e 7465 6e74 2d54 7970 6522 2063 6f6e ontent-Type".con
    0x00d0: 7465 6e74 3d22 7465 7874 6d6c 3b63 6861 tent="textml;cha
    0x00e0: 7273 6574 3d47 4232 3331 3222 202f 3e0a rset=GB2312"./>.
    0x00f0: 2020 203c 7374 796c 653e 626f 6479 7b62 ...<style>body{b
    0x0100: 6163 6b67 726f 756e 642d 636f 6c6f 723a ackground-color:
    0x0110: 2346 4646 4646 467d 3c2f 7374 796c 653e #FFFFFF}</style>
    0x0120: 200a 3c74 6974 6c65 3ee9 9d9e e6b3 95e9 ..<title>.......
    0x0130: 98bb e696 ad32 3334 3c2f 7469 746c 653e .....234</title>
    0x0140: 0a20 203c 7363 7269 7074 206c 616e 6775 ...<script.langu
    0x0150: 6167 653d 226a 6176 6173 6372 6970 7422 age="javascript"
    0x0160: 2074 7970 653d 2274 6578 742f 6a61 7661 .type="text/java
    0x0170: 7363 7269 7074 223e 0a20 2020 2020 2020 script">........
    0x0180: 2020 7769 6e64 6f77 2e6f 6e6c 6f61 6420 ..window.onload.
    0x0190: 3d20 6675 6e63 7469 6f6e 2028 2920 7b20 =.function.().{.
    0x01a0: 0a20 2020 2020 2020 2020 2020 646f 6375 ............docu
    0x01b0: 6d65 6e74 2e67 6574 456c 656d 656e 7442 ment.getElementB
    0x01c0: 7949 6428 226d 6169 6e46 7261 6d65 2229 yId("mainFrame")
    0x01d0: 2e73 7263 3d20 2268 7474 703a 2f2f 3131 .src=."http://11
    0x01e0: 342e 3131 352e 3139 322e 3234 363a 3930 4.115.192.246:90
    0x01f0: 3830 2f65 7272 6f72 2e68 746d 6c22 3b0a 80/error.html";.
    0x0200: 2020 2020 2020 2020 2020 2020 7d0a 3c2f ............}.</
    0x0210: 7363 7269 7074 3e20 2020 0a3c 2f68 6561 script>....</hea
    0x0220: 643e 0a20 203c 626f 6479 3e0a 2020 2020 d>...<body>.....
    0x0230: 3c69 6672 616d 6520 7374 796c 653d 2277 <iframe.style="w
    0x0240: 6964 7468 3a31 3030 253b 2068 6569 6768 idth:100%;.heigh
    0x0250: 743a 3130 3025 3b22 2069 643d 226d 6169 t:100%;".id="mai
    0x0260: 6e46 7261 6d65 2220 7372 633d 2222 2066 nFrame".src="".f
    0x0270: 7261 6d65 626f 7264 6572 3d22 3022 2073 rameborder="0".s
    0x0280: 6372 6f6c 6c69 6e67 3d22 6e6f 223e 3c2f crolling="no"></
    0x0290: 6966 7261 6d65 3e0a 2020 2020 3c2f 626f iframe>.....</bo
    0x02a0: 6479 3e0a 2020 2020 2020 3c2f 6874 6d6c dy>.......</html
    0x02b0: 3e >
    22:22:23.292552 IP 192.168.1.106.33170 > 139.9.222.124.https: Flags [.], ack 651, win 239, options [nop,nop,TS val 434651685 ecr 3365805465], length 0
    0x0000: 4500 0034 15ac 4000 4006 f97f c0a8 016a E..4..@.@......j
    0x0010: 8b09 de7c 8192 01bb f6ab 9a22 4d9a d142 ...|......."M..B
    0x0020: 8010 00ef d4f7 0000 0101 080a 19e8 4225 ..............B%
    0x0030: c89e 1d99 ....
    22:22:23.292562 IP 139.9.222.124.https > 192.168.1.106.33170: Flags [.], ack 518, win 235, options [nop,nop,TS val 3365805485 ecr 434651673], length 0
    0x0000: 4500 0034 1ff1 4000 3106 fe3a 8b09 de7c E..4..@.1..:...|
    0x0010: c0a8 016a 01bb 8192 4d9a ceb8 f6ab 9a22 ...j....M......"
    0x0020: 8010 00eb d77d 0000 0101 080a c89e 1dad .....}..........
    0x0030: 19e8 4219
    + + +

    4500 这两个字节开头明显就是ip报头,4代表ipv4,5则是ip报头的长度,也就是ip报头长度是5*4=20;

    +

    ip报头

    也就是

    +
    1
    2
    0x0000:  4500 0239 15ab 4000 4006 f77b c0a8 016a  E..9..@.@..{...j
    0x0010: 8b09 de7c
    +

    一直到de7c都是ip报头

    +

    tcp 报头

    1
    2
    0x0000:  4500 0239 15ab 4000 4006 f77b c0a8 016a  E..9..@.@..{...j
    0x0010: 8b09 de7c 8192 01bb <- 01bb就是443也就是目的端口
    +

    1*16*16+11*16+16=443

    +

    版本:IP协议的版本,目前的IP协议版本号为4,下一代IP协议版本号为6。

    +

    首部长度:IP报头的长度。固定部分的长度(20字节)和可变部分的长度之和。共占4位。最大为1111,即10进制的15,代表IP报头的最大长度可以为15个32bits(4字节),也就是最长可为15*4=60字节,除去固定部分的长度20字节,可变部分的长度最大为40字节。

    +

    翻了一下rfc8446,TLSV12的client hello的版本magic number0x0303,搜索了一下果然有

    +
    1
    2
    3
    4
    5
    6
    7
    8
    struct {
    ProtocolVersion legacy_version = 0x0303; /* TLS v1.2 */
    Random random;
    opaque legacy_session_id<0..32>;
    CipherSuite cipher_suites<2..2^16-2>;
    opaque legacy_compression_methods<1..2^8-1>;
    Extension extensions<8..2^16-1>;
    } ClientHello;
    + +

    但是返回的明文很明显不是一个错误的链接

    +

    所以被sni阻断了

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    In the OCSPStatusRequest, the "ResponderIDs" provides a list of OCSP
    responders that the client trusts. A zero-length "responder_id_list"
    sequence has the special meaning that the responders are implicitly
    known to the server - e.g., by prior arrangement. "Extensions" is a
    DER encoding of OCSP request extensions.

    Both "ResponderID" and "Extensions" are DER-encoded ASN.1 types as
    defined in [OCSP]. "Extensions" is imported from [PKIX]. A zero-
    length "request_extensions" value means that there are no extensions
    (as opposed to a zero-length ASN.1 SEQUENCE, which is not valid for
    the "Extensions" type).

    In the case of the "id-pkix-ocsp-nonce" OCSP extension, [OCSP] is
    unclear about its encoding; for clarification, the nonce MUST be a
    DER-encoded OCTET STRING, which is encapsulated as another OCTET
    STRING (note that implementations based on an existing OCSP client
    will need to be checked for conformance to this requirement).

    Servers that receive a client hello containing the "status_request"
    extension, MAY return a suitable certificate status response to the
    client along with their certificate. If OCSP is requested, they
    SHOULD use the information contained in the extension when selecting
    an OCSP responder, and SHOULD include request_extensions in the OCSP
    request.
    + + +
    + + + + +
    +
    +
    +
    + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +
    +

    本文主要是记录mysql各种类型的字符串受什么限制。

    +
    +

    前言

    今天遇到一个特别的事情:把一个pdf的文档转成html然后存进mysql里面,所以我用了text 的字段来存。
    结果读出来的时候发现少了一截。搜索了一番才发现text居然最大只能支持16kb的字节的内容。

    +

    字节和字符

    如果你写过php,你可以比较清晰地知道strlen("你好")mb_strlen("你好")两者的区别。
    如果是java的话,字节流的InputStreamOutputStream 或者writerreader这两个系列的区别你肯定也不陌生。

    +

    mysql字符串的长度与类型关系

    +

    String Type Storage Requirements

    +
    +
    +

    In the following table, M represents the declared column length in characters for nonbinary string types and bytes for binary string types. L represents the actual length in bytes of a given string value.

    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    Data TypeStorage Required
    CHAR(M)The compact family of InnoDB row formats optimize storage for variable-length character
    BINARY(M)M bytes, 0 <= M <= 255
    VARCHAR(M), VARBINARY(M)L + 1 bytes if column values require 0 − 255 bytes, L + 2 bytes if values may require more than 255 bytes
    TINYBLOB, TINYTEXTL + 1 bytes, where L < 28
    BLOB, TEXTL + 2 bytes, where L < 216
    MEDIUMBLOB, MEDIUMTEXTL + 3 bytes, where L < 224
    LONGBLOB, LONGTEXTL + 4 bytes, where L < 232
    ENUM(‘value1’,’value2’,…)1 or 2 bytes, depending on the number of enumeration values (65,535 values maximum)
    SET(‘value1’,’value2’,…)1, 2, 3, 4, or 8 bytes, depending on the number of set members (64 members maximum)
    +

    来源

    +

    CHAR

    CHAR 最大是255个字符

    +

    用如下的sql创建256个字符的char类型字符串会报错误

    +
    +

    ERROR 1074 (42000): Column length too big for column ‘name’ (max = 255); use BLOB or TEXT instead

    +
    +
    1
    CREATE TABLE `test123` ( `name` char(256)) ENGINE=InnoDB AUTO_INCREMENT=2 DEFAULT CHARSET=utf8mb4;
    + + +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    (gdb) bt
    #0 my_error (nr=1074, MyFlags=0) at /home/dinosaur/Downloads/mysql-5.7.21/mysys/my_error.c:194
    #1 0x0000000000f93e75 in Create_field::init (this=0x7fb9b8006740, thd=0x7fb9b8000b70, fld_name=0x7fb9b8006730 "name", fld_type=MYSQL_TYPE_STRING, fld_length=0x7fb9b8006738 "256", fld_decimals=0x0, fld_type_modifier=0,
    fld_default_value=0x0, fld_on_update_value=0x0, fld_comment=0x7fb9b8002fe0, fld_change=0x0, fld_interval_list=0x7fb9b8003150, fld_charset=0x0, fld_geom_type=0, fld_gcol_info=0x0)
    at /home/dinosaur/Downloads/mysql-5.7.21/sql/field.cc:10962
    #2 0x000000000163ae21 in add_field_to_list (thd=0x7fb9b8000b70, field_name=0x7fba3d30c460, type=MYSQL_TYPE_STRING, length=0x7fb9b8006738 "256", decimals=0x0, type_modifier=0, default_value=0x0, on_update_value=0x0,
    comment=0x7fb9b8002fe0, change=0x0, interval_list=0x7fb9b8003150, cs=0x0, uint_geom_type=0, gcol_info=0x0) at /home/dinosaur/Downloads/mysql-5.7.21/sql/sql_parse.cc:5798
    #3 0x000000000178e3f6 in MYSQLparse (YYTHD=0x7fb9b8000b70) at /home/dinosaur/Downloads/mysql-5.7.21/sql/sql_yacc.yy:6337
    #4 0x000000000163d75a in parse_sql (thd=0x7fb9b8000b70, parser_state=0x7fba3d30d550, creation_ctx=0x0) at /home/dinosaur/Downloads/mysql-5.7.21/sql/sql_parse.cc:7131
    #5 0x0000000001639f07 in mysql_parse (thd=0x7fb9b8000b70, parser_state=0x7fba3d30d550) at /home/dinosaur/Downloads/mysql-5.7.21/sql/sql_parse.cc:5469
    #6 0x000000000162f0a3 in dispatch_command (thd=0x7fb9b8000b70, com_data=0x7fba3d30de00, command=COM_QUERY) at /home/dinosaur/Downloads/mysql-5.7.21/sql/sql_parse.cc:1458
    #7 0x000000000162df32 in do_command (thd=0x7fb9b8000b70) at /home/dinosaur/Downloads/mysql-5.7.21/sql/sql_parse.cc:999
    #8 0x0000000001770f97 in handle_connection (arg=0x570d510) at /home/dinosaur/Downloads/mysql-5.7.21/sql/conn_handler/connection_handler_per_thread.cc:300
    #9 0x0000000001de0b41 in pfs_spawn_thread (arg=0x5749fc0) at /home/dinosaur/Downloads/mysql-5.7.21/storage/perfschema/pfs.cc:2190
    #10 0x00007fba478aa6ba in start_thread (arg=0x7fba3d30e700) at pthread_create.c:333
    #11 0x00007fba46d3341d in clone () at ../sysdeps/unix/sysv/linux/x86_64/clone.S:109
    + + +

    varchar最大长度

    和char类似,想创建一个65532字符的varchar类型字段

    +
    1
    2
    CREATE TABLE `test123` ( `name` varchar(65533)) ENGINE=InnoDB AUTO_INCREMENT=2 DEFAULT CHARSET=utf8mb4;

    +

    结果也是一样的错误

    +
    +

    ERROR 1074 (42000): Column length too big for column ‘name’ (max = 16383); use BLOB or TEXT instead

    +
    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    (gdb) bt
    #0 my_error (nr=1074, MyFlags=0) at /home/dinosaur/Downloads/mysql-5.7.21/mysys/my_error.c:194
    #1 0x00000000016c9998 in prepare_blob_field (thd=0x7fb9b8000b70, sql_field=0x7fb9b8006840) at /home/dinosaur/Downloads/mysql-5.7.21/sql/sql_table.cc:4715
    #2 0x00000000016c6a33 in mysql_prepare_create_table (thd=0x7fb9b8000b70, error_schema_name=0x7fb9b8006728 "test", error_table_name=0x7fb9b8006168 "test123", create_info=0x7fba3d30c6b0, alter_info=0x7fba3d30c600,
    tmp_table=false, db_options=0x7fba3d30b080, file=0x7fb9b8006ac0, key_info_buffer=0x7fba3d30c170, key_count=0x7fba3d30c16c, select_field_count=0) at /home/dinosaur/Downloads/mysql-5.7.21/sql/sql_table.cc:3721
    #3 0x00000000016cac22 in create_table_impl (thd=0x7fb9b8000b70, db=0x7fb9b8006728 "test", table_name=0x7fb9b8006168 "test123", error_table_name=0x7fb9b8006168 "test123", path=0x7fba3d30c180 "./test/test123",
    create_info=0x7fba3d30c6b0, alter_info=0x7fba3d30c600, internal_tmp_table=false, select_field_count=0, no_ha_table=false, is_trans=0x7fba3d30c3da, key_info=0x7fba3d30c170, key_count=0x7fba3d30c16c)
    at /home/dinosaur/Downloads/mysql-5.7.21/sql/sql_table.cc:5131
    #4 0x00000000016cb884 in mysql_create_table_no_lock (thd=0x7fb9b8000b70, db=0x7fb9b8006728 "test", table_name=0x7fb9b8006168 "test123", create_info=0x7fba3d30c6b0, alter_info=0x7fba3d30c600, select_field_count=0,
    is_trans=0x7fba3d30c3da) at /home/dinosaur/Downloads/mysql-5.7.21/sql/sql_table.cc:5417
    #5 0x00000000016cb9a2 in mysql_create_table (thd=0x7fb9b8000b70, create_table=0x7fb9b80061a0, create_info=0x7fba3d30c6b0, alter_info=0x7fba3d30c600) at /home/dinosaur/Downloads/mysql-5.7.21/sql/sql_table.cc:5463
    #6 0x00000000016335be in mysql_execute_command (thd=0x7fb9b8000b70, first_level=true) at /home/dinosaur/Downloads/mysql-5.7.21/sql/sql_parse.cc:3248
    #7 0x000000000163a31c in mysql_parse (thd=0x7fb9b8000b70, parser_state=0x7fba3d30d550) at /home/dinosaur/Downloads/mysql-5.7.21/sql/sql_parse.cc:5582
    #8 0x000000000162f0a3 in dispatch_command (thd=0x7fb9b8000b70, com_data=0x7fba3d30de00, command=COM_QUERY) at /home/dinosaur/Downloads/mysql-5.7.21/sql/sql_parse.cc:1458
    #9 0x000000000162df32 in do_command (thd=0x7fb9b8000b70) at /home/dinosaur/Downloads/mysql-5.7.21/sql/sql_parse.cc:999
    #10 0x0000000001770f97 in handle_connection (arg=0x570d510) at /home/dinosaur/Downloads/mysql-5.7.21/sql/conn_handler/connection_handler_per_thread.cc:300
    #11 0x0000000001de0b41 in pfs_spawn_thread (arg=0x5749fc0) at /home/dinosaur/Downloads/mysql-5.7.21/storage/perfschema/pfs.cc:2190
    #12 0x00007fba478aa6ba in start_thread (arg=0x7fba3d30e700) at pthread_create.c:333
    #13 0x00007fba46d3341d in clone () at ../sysdeps/unix/sysv/linux/x86_64/clone.S:109

    (gdb) p sql_field->length
    $2 = 262132
    + + +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    static bool prepare_blob_field(THD *thd, Create_field *sql_field)
    {
    DBUG_ENTER("prepare_blob_field");

    if (sql_field->length > MAX_FIELD_VARCHARLENGTH && // sql_field->length = 262132
    !(sql_field->flags & BLOB_FLAG))
    {
    /* Convert long VARCHAR columns to TEXT or BLOB */
    char warn_buff[MYSQL_ERRMSG_SIZE];

    if (sql_field->def || thd->is_strict_mode()) // 严格模式下会打印errorERROR 1074 (42000): Column length too big for
    { // column 'name' (max = 16383); use BLOB or TEXT instead
    my_error(ER_TOO_BIG_FIELDLENGTH, MYF(0), sql_field->field_name,
    static_cast<ulong>(MAX_FIELD_VARCHARLENGTH / // MAX_FIELD_VARCHARLENGTH = 65535
    sql_field->charset->mbmaxlen)); // sql_field->charset->mbmaxlen = 4
    DBUG_RETURN(1);
    }
    ...
    }
    + + +

    也就是严格模式下,varchar 最大是65535字节的内容,改成varchar(16383)看看

    +
    1
    2
    3
    mysql> CREATE TABLE `test123` ( `name` varchar(16383)) ENGINE=InnoDB AUTO_INCREMENT=2 DEFAULT CHARSET=utf8mb4;
    Query OK, 0 rows affected (0.26 sec)

    +

    ok,没有问题

    + + +
    + + + + +
    +
    +
    +
    + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    es编译

    1
    gradle idea
    +

    跑了很久

    +
    +

    BUILD SUCCESSFUL in 49m 34s
    334 actionable tasks: 334 executed

    +
    +

    es 堆栈

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    38
    39
    40
    41
    42
    43
    44
    45
    46
    47
    48
    49
    50
    51
    52
    prepareRequest:61, RestCatAction (org.elasticsearch.rest.action.cat)
    handleRequest:80, BaseRestHandler (org.elasticsearch.rest)
    handleRequest:69, SecurityRestFilter (org.elasticsearch.xpack.security.rest)
    dispatchRequest:240, RestController (org.elasticsearch.rest)
    tryAllHandlers:337, RestController (org.elasticsearch.rest)
    dispatchRequest:174, RestController (org.elasticsearch.rest)
    dispatchRequest:324, AbstractHttpServerTransport (org.elasticsearch.http)
    handleIncomingRequest:374, AbstractHttpServerTransport (org.elasticsearch.http)
    incomingRequest:303, AbstractHttpServerTransport (org.elasticsearch.http)
    channelRead0:66, Netty4HttpRequestHandler (org.elasticsearch.http.netty4)
    channelRead0:31, Netty4HttpRequestHandler (org.elasticsearch.http.netty4)
    channelRead:105, SimpleChannelInboundHandler (io.netty.channel)
    invokeChannelRead:362, AbstractChannelHandlerContext (io.netty.channel)
    invokeChannelRead:348, AbstractChannelHandlerContext (io.netty.channel)
    fireChannelRead:340, AbstractChannelHandlerContext (io.netty.channel)
    channelRead:58, Netty4HttpPipeliningHandler (org.elasticsearch.http.netty4)
    invokeChannelRead:362, AbstractChannelHandlerContext (io.netty.channel)
    invokeChannelRead:348, AbstractChannelHandlerContext (io.netty.channel)
    fireChannelRead:340, AbstractChannelHandlerContext (io.netty.channel)
    channelRead:102, MessageToMessageDecoder (io.netty.handler.codec)
    channelRead:111, MessageToMessageCodec (io.netty.handler.codec)
    invokeChannelRead:362, AbstractChannelHandlerContext (io.netty.channel)
    invokeChannelRead:348, AbstractChannelHandlerContext (io.netty.channel)
    fireChannelRead:340, AbstractChannelHandlerContext (io.netty.channel)
    channelRead:102, MessageToMessageDecoder (io.netty.handler.codec)
    invokeChannelRead:362, AbstractChannelHandlerContext (io.netty.channel)
    invokeChannelRead:348, AbstractChannelHandlerContext (io.netty.channel)
    fireChannelRead:340, AbstractChannelHandlerContext (io.netty.channel)
    channelRead:102, MessageToMessageDecoder (io.netty.handler.codec)
    invokeChannelRead:362, AbstractChannelHandlerContext (io.netty.channel)
    invokeChannelRead:348, AbstractChannelHandlerContext (io.netty.channel)
    fireChannelRead:340, AbstractChannelHandlerContext (io.netty.channel)
    fireChannelRead:323, ByteToMessageDecoder (io.netty.handler.codec)
    channelRead:297, ByteToMessageDecoder (io.netty.handler.codec)
    invokeChannelRead:362, AbstractChannelHandlerContext (io.netty.channel)
    invokeChannelRead:348, AbstractChannelHandlerContext (io.netty.channel)
    fireChannelRead:340, AbstractChannelHandlerContext (io.netty.channel)
    channelRead:286, IdleStateHandler (io.netty.handler.timeout)
    invokeChannelRead:362, AbstractChannelHandlerContext (io.netty.channel)
    invokeChannelRead:348, AbstractChannelHandlerContext (io.netty.channel)
    fireChannelRead:340, AbstractChannelHandlerContext (io.netty.channel)
    channelRead:1434, DefaultChannelPipeline$HeadContext (io.netty.channel)
    invokeChannelRead:362, AbstractChannelHandlerContext (io.netty.channel)
    invokeChannelRead:348, AbstractChannelHandlerContext (io.netty.channel)
    fireChannelRead:965, DefaultChannelPipeline (io.netty.channel)
    read:163, AbstractNioByteChannel$NioByteUnsafe (io.netty.channel.nio)
    processSelectedKey:644, NioEventLoop (io.netty.channel.nio)
    processSelectedKeysPlain:544, NioEventLoop (io.netty.channel.nio)
    processSelectedKeys:498, NioEventLoop (io.netty.channel.nio)
    run:458, NioEventLoop (io.netty.channel.nio)
    run:897, SingleThreadEventExecutor$5 (io.netty.util.concurrent)
    run:834, Thread (java.lang)
    +

    以及

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    38
    39
    40
    41
    42
    43
    44
    45
    46
    47
    48
    49
    50
    51
    handleRequest:97, BaseRestHandler (org.elasticsearch.rest)
    handleRequest:69, SecurityRestFilter (org.elasticsearch.xpack.security.rest)
    dispatchRequest:240, RestController (org.elasticsearch.rest)
    tryAllHandlers:337, RestController (org.elasticsearch.rest)
    dispatchRequest:174, RestController (org.elasticsearch.rest)
    dispatchRequest:324, AbstractHttpServerTransport (org.elasticsearch.http)
    handleIncomingRequest:374, AbstractHttpServerTransport (org.elasticsearch.http)
    incomingRequest:303, AbstractHttpServerTransport (org.elasticsearch.http)
    channelRead0:66, Netty4HttpRequestHandler (org.elasticsearch.http.netty4)
    channelRead0:31, Netty4HttpRequestHandler (org.elasticsearch.http.netty4)
    channelRead:105, SimpleChannelInboundHandler (io.netty.channel)
    invokeChannelRead:362, AbstractChannelHandlerContext (io.netty.channel)
    invokeChannelRead:348, AbstractChannelHandlerContext (io.netty.channel)
    fireChannelRead:340, AbstractChannelHandlerContext (io.netty.channel)
    channelRead:58, Netty4HttpPipeliningHandler (org.elasticsearch.http.netty4)
    invokeChannelRead:362, AbstractChannelHandlerContext (io.netty.channel)
    invokeChannelRead:348, AbstractChannelHandlerContext (io.netty.channel)
    fireChannelRead:340, AbstractChannelHandlerContext (io.netty.channel)
    channelRead:102, MessageToMessageDecoder (io.netty.handler.codec)
    channelRead:111, MessageToMessageCodec (io.netty.handler.codec)
    invokeChannelRead:362, AbstractChannelHandlerContext (io.netty.channel)
    invokeChannelRead:348, AbstractChannelHandlerContext (io.netty.channel)
    fireChannelRead:340, AbstractChannelHandlerContext (io.netty.channel)
    channelRead:102, MessageToMessageDecoder (io.netty.handler.codec)
    invokeChannelRead:362, AbstractChannelHandlerContext (io.netty.channel)
    invokeChannelRead:348, AbstractChannelHandlerContext (io.netty.channel)
    fireChannelRead:340, AbstractChannelHandlerContext (io.netty.channel)
    channelRead:102, MessageToMessageDecoder (io.netty.handler.codec)
    invokeChannelRead:362, AbstractChannelHandlerContext (io.netty.channel)
    invokeChannelRead:348, AbstractChannelHandlerContext (io.netty.channel)
    fireChannelRead:340, AbstractChannelHandlerContext (io.netty.channel)
    fireChannelRead:323, ByteToMessageDecoder (io.netty.handler.codec)
    channelRead:297, ByteToMessageDecoder (io.netty.handler.codec)
    invokeChannelRead:362, AbstractChannelHandlerContext (io.netty.channel)
    invokeChannelRead:348, AbstractChannelHandlerContext (io.netty.channel)
    fireChannelRead:340, AbstractChannelHandlerContext (io.netty.channel)
    channelRead:286, IdleStateHandler (io.netty.handler.timeout)
    invokeChannelRead:362, AbstractChannelHandlerContext (io.netty.channel)
    invokeChannelRead:348, AbstractChannelHandlerContext (io.netty.channel)
    fireChannelRead:340, AbstractChannelHandlerContext (io.netty.channel)
    channelRead:1434, DefaultChannelPipeline$HeadContext (io.netty.channel)
    invokeChannelRead:362, AbstractChannelHandlerContext (io.netty.channel)
    invokeChannelRead:348, AbstractChannelHandlerContext (io.netty.channel)
    fireChannelRead:965, DefaultChannelPipeline (io.netty.channel)
    read:163, AbstractNioByteChannel$NioByteUnsafe (io.netty.channel.nio)
    processSelectedKey:644, NioEventLoop (io.netty.channel.nio)
    processSelectedKeysPlain:544, NioEventLoop (io.netty.channel.nio)
    processSelectedKeys:498, NioEventLoop (io.netty.channel.nio)
    run:458, NioEventLoop (io.netty.channel.nio)
    run:897, SingleThreadEventExecutor$5 (io.netty.util.concurrent)
    run:834, Thread (java.lang)
    +

    倒排索引简介

    +

    到排索引解决什么问题?

    +

    当我们有一个文档a.txt,里面有一堆文字hello wrold ,i am dinosaur

    +

    我们需要从所有文档里面判断这个文档里面是否存在world 这个词汇,应该怎么做呢?

    +

    当文档的数量很少的时候,可以

    +
      +
    • 1 打开文件
    • +
    • 2 从头开始去读取文件内容判断是否包含world
    • +
    +

    那么当我们不仅仅只有一个文档a.txt,我们还有b.txtc.txt的时候,我们怎么判断某个词word是否在这些文档里面呢?如果word在里面,又在那些文档的第几行呢?

    +

    如果我们还用之前的从头开始一个个文件读的话,如果文档数量少还好,如果文档很多,我们就非常慢才能读完所有的文档。

    +

    倒排索引解决的其中一个问题就是如何快速定位某个词是是否在这些文档中,如果在又在哪些文档里面。

    +

    相关例子

    +

    baseline invert index

    倒排索引包括主要两个部分:

    +
      +
    • 第一部分包含两个域:
        +
      • : 文档(document)中包含词文档个数,也就是说有多少个文档含有词,那么等于几。
      • +
      • 指向的指针
      • +
      +
    • +
    • 第二部分是一个列表,列表的每个元素包括以下两个域:
        +
      • : 文档对应的id,可以理解为文档主键
      • +
      • : 该 中包含词的数量
      • +
      +
    • +
    +

    uwiAvq.png

    +

    我自己写了的demo代码github 地址,输出如下

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    keeper  3|[{1 1} {4 1} {5 1}]
    In 1|[{2 1}]
    house 2|[{2 1} {3 1}]
    nignt 2|[{4 1} {5 1}]
    did 1|[{4 1}]
    dark 1|[{6 1}]
    old 4|[{1 1} {2 1} {3 1} {4 1}]
    night 3|[{1 1} {5 1} {6 1}]
    had 1|[{3 1}]
    sleeps 1|[{6 1}]
    keep 3|[{1 1} {3 1} {5 1}]
    big 2|[{2 1} {3 1}]
    keeps 3|[{1 1} {5 1} {6 1}]
    the 6|[{1 1} {2 1} {3 1} {4 1} {5 1} {6 1}]
    never 1|[{4 1}]
    and 1|[{6 1}]
    And 1|[{6 1}]
    in 5|[{1 1} {2 1} {3 1} {5 1} {6 1}]
    The 3|[{1 1} {3 1} {5 1}]
    sleep 1|[{4 1}]
    Where 1|[{4 1}]
    town 2|[{1 1} {3 1}]
    gown 1|[{2 1}]
    + + + +

    构造倒排索引的步骤

      +
    • 1 读取文档
    • +
    • 2 分词
    • +
    • 3 对分词正规化(normalized)
    • +
    • 4 建立包含词频和偏移量的倒排索引
    • +
    +

    分词

    https://www.cnblogs.com/forfuture1978/archive/2010/06/06/1752837.html

    +

    Lucene 的堆栈,主要的逻辑都在invert方法里面

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    incrementToken:48, FilteringTokenFilter (org.apache.lucene.analysis)
    invert:812, DefaultIndexingChain$PerField (org.apache.lucene.index)
    processField:442, DefaultIndexingChain (org.apache.lucene.index)
    processDocument:406, DefaultIndexingChain (org.apache.lucene.index)
    updateDocument:250, DocumentsWriterPerThread (org.apache.lucene.index)
    updateDocument:495, DocumentsWriter (org.apache.lucene.index)
    updateDocument:1594, IndexWriter (org.apache.lucene.index)
    addDocument:1213, IndexWriter (org.apache.lucene.index)
    indexDoc:198, IndexFiles (com.dinosaur)
    visitFile:155, IndexFiles$1 (com.dinosaur)
    visitFile:151, IndexFiles$1 (com.dinosaur)
    walkFileTree:2670, Files (java.nio.file)
    walkFileTree:2742, Files (java.nio.file)
    indexDocs:151, IndexFiles (com.dinosaur)
    main:113, IndexFiles (com.dinosaur)
    + + +

    Lucene分词的核心在于incrementToken获取token

    +

    举个例子

    +

    Lucene的标准分词器

    +
    1
    2
    3
    4
    5
    6
    7
    private final CharTermAttribute termAtt = addAttribute(CharTermAttribute.class);  // final的单例
    @Override
    public final boolean incrementToken() throws IOException {
    ...
    scanner.getText(termAtt); // scanner 返回一个词并将那个词设置到termAtt上面
    ...
    }
    + + + + +
    + + + + +
    +
    +
    +
    + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    maven打包NoClassDefFoundError

    刚刚在学习怎么使用maven,可以编译通过,但是运行命令java -jar xxx.jar 的时候却报了错误NoClassDefFoundError

    +

    踩坑开始

    踩坑第一步是去stack overflow 找了一个答案,使用插件maven-shade-plugin,其实这个也是正确的答案

    +

    这是正确答案

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    <project>
    ...
    <build>
    <plugins>
    <plugin>
    <groupId>org.apache.maven.plugins</groupId>
    <artifactId>maven-shade-plugin</artifactId>
    <version>3.2.1</version>
    <executions>
    <execution>
    <phase>package</phase>
    <goals>
    <goal>shade</goal>
    </goals>
    </execution>
    </executions>
    </plugin>
    </plugins>
    </build>
    ...
    </project>
    + + +

    相关链接

    +

    我踩坑在哪里呢?

    我当时不了解xml节点<pluginManagement>下面的plugins节点

    +
      +
    • 这个是错误的写法
    • +
    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    <project>
    ...
    <build>
    <pluginManagement>
    <plugins>
    <plugin>
    <groupId>org.apache.maven.plugins</groupId>
    <artifactId>maven-shade-plugin</artifactId>
    <version>3.2.1</version>
    <executions>
    <execution>
    <phase>package</phase>
    <goals>
    <goal>shade</goal>
    </goals>
    </execution>
    </executions>
    </plugin>
    </plugins>
    </pluginManagement>
    </build>
    ...
    </project>
    + +

    最终我的写法

    最终写法就是得放在build 节点的下一级,不能放在pluginManagement里面的<plugins>节点里面

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    <project>
    ...
    <build>
    <plugins>
    <plugin>
    <groupId>org.apache.maven.plugins</groupId>
    <artifactId>maven-shade-plugin</artifactId>
    <version>3.2.1</version>
    <executions>
    <execution>
    <phase>package</phase>
    <goals>
    <goal>shade</goal>
    </goals>
    </execution>
    </executions>
    </plugin>
    </plugins>
    <pluginManagement>
    ...
    </pluginManagement>
    </build>
    ...
    </project>
    + + +

    然后运行mvn package 就能打包所有依赖进去

    + + +
    + + + + +
    +
    +
    +
    + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    hello

    java hello world

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    public class HelloWorld {

    public static void main(String[] args) {
    // Prints "Hello, World" to the terminal window.
    System.out.println("Hello, World");
    }

    }


    +

    编译

    编译 需要添加g 选项

    +
    1
    javac -g HelloWorld.java 
    +

    调试

    方法一:

    +

    使用jdb 调试hello wrold

    +
    1
    jdb -classpath . HelloWorld
    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    > stop  in HelloWorld.main                                
    Deferring breakpoint HelloWorld.main.
    It will be set after the class is loaded.
    > run
    run HelloWorld
    Set uncaught java.lang.Throwable
    Set deferred uncaught java.lang.Throwable
    >
    VM Started: Set deferred breakpoint HelloWorld.main

    Breakpoint hit: "thread=main", HelloWorld.main(), line=5 bci=0
    5 System.out.println("Hello, World");

    main[1]

    + +

    使用maven 编写helloworld

    当遇到maven package后,java -java some.jar 说找不到main的时候可以参考以下答案
    https://stackoverflow.com/a/9689877/6229548

    +

    加载类

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    (gdb) bt
    #0 open64 () at ../sysdeps/unix/syscall-template.S:84
    #1 0x00007ffff695b544 in os::open (path=0x7ffff7fcefd0 "/home/dinosaur/jdk8/build/linux-x86_64-normal-server-slowdebug/jdk/classes/java/lang/Class.class", oflag=0, mode=0)
    at /home/dinosaur/jdk8/hotspot/src/os/linux/vm/os_linux.cpp:5188
    #2 0x00007ffff63ffdfc in ClassPathDirEntry::open_stream (this=0x7ffff006f178, name=0x7ffff000cce8 "java/lang/Class.class", __the_thread__=0x7ffff000c000)
    at /home/dinosaur/jdk8/hotspot/src/share/vm/classfile/classLoader.cpp:210
    #3 0x00007ffff640055b in LazyClassPathEntry::open_stream (this=0x7ffff001ad48, name=0x7ffff000cce8 "java/lang/Class.class", __the_thread__=0x7ffff000c000)
    at /home/dinosaur/jdk8/hotspot/src/share/vm/classfile/classLoader.cpp:330
    #4 0x00007ffff640209b in ClassLoader::load_classfile (h_name=0x7ffff4062108, __the_thread__=0x7ffff000c000) at /home/dinosaur/jdk8/hotspot/src/share/vm/classfile/classLoader.cpp:909
    #5 0x00007ffff6a8570a in SystemDictionary::load_instance_class (class_name=0x7ffff4062108, class_loader=..., __the_thread__=0x7ffff000c000)
    at /home/dinosaur/jdk8/hotspot/src/share/vm/classfile/systemDictionary.cpp:1304
    #6 0x00007ffff6a838b8 in SystemDictionary::resolve_instance_class_or_null (name=0x7ffff4062108, class_loader=..., protection_domain=..., __the_thread__=0x7ffff000c000)
    at /home/dinosaur/jdk8/hotspot/src/share/vm/classfile/systemDictionary.cpp:779
    #7 0x00007ffff6a81ff7 in SystemDictionary::resolve_or_null (class_name=0x7ffff4062108, class_loader=..., protection_domain=..., __the_thread__=0x7ffff000c000)
    at /home/dinosaur/jdk8/hotspot/src/share/vm/classfile/systemDictionary.cpp:232
    #8 0x00007ffff6a819f2 in SystemDictionary::resolve_or_fail (class_name=0x7ffff4062108, class_loader=..., protection_domain=..., throw_error=true, __the_thread__=0x7ffff000c000)
    at /home/dinosaur/jdk8/hotspot/src/share/vm/classfile/systemDictionary.cpp:171
    #9 0x00007ffff6a81d64 in SystemDictionary::resolve_or_fail (class_name=0x7ffff4062108, throw_error=true, __the_thread__=0x7ffff000c000)
    at /home/dinosaur/jdk8/hotspot/src/share/vm/classfile/systemDictionary.cpp:212
    #10 0x00007ffff6a87277 in SystemDictionary::initialize_wk_klass (id=SystemDictionary::Class_klass_knum, init_opt=0, __the_thread__=0x7ffff000c000)
    at /home/dinosaur/jdk8/hotspot/src/share/vm/classfile/systemDictionary.cpp:1866
    #11 0x00007ffff6a873a7 in SystemDictionary::initialize_wk_klasses_until (limit_id=SystemDictionary::Cloneable_klass_knum, start_id=@0x7ffff7fd0a84: SystemDictionary::Object_klass_knum,
    __the_thread__=0x7ffff000c000) at /home/dinosaur/jdk8/hotspot/src/share/vm/classfile/systemDictionary.cpp:1882
    #12 0x00007ffff6a8b13c in SystemDictionary::initialize_wk_klasses_through (end_id=SystemDictionary::Class_klass_knum, start_id=@0x7ffff7fd0a84: SystemDictionary::Object_klass_knum,
    __the_thread__=0x7ffff000c000) at /home/dinosaur/jdk8/hotspot/src/share/vm/classfile/systemDictionary.hpp:408
    #13 0x00007ffff6a874e0 in SystemDictionary::initialize_preloaded_classes (__the_thread__=0x7ffff000c000) at /home/dinosaur/jdk8/hotspot/src/share/vm/classfile/systemDictionary.cpp:1901
    #14 0x00007ffff6a87199 in SystemDictionary::initialize (__the_thread__=0x7ffff000c000) at /home/dinosaur/jdk8/hotspot/src/share/vm/classfile/systemDictionary.cpp:1843
    #15 0x00007ffff6ad68c9 in Universe::genesis (__the_thread__=0x7ffff000c000) at /home/dinosaur/jdk8/hotspot/src/share/vm/memory/universe.cpp:288
    #16 0x00007ffff6ad8db6 in universe2_init () at /home/dinosaur/jdk8/hotspot/src/share/vm/memory/universe.cpp:991
    #17 0x00007ffff66463b3 in init_globals () at /home/dinosaur/jdk8/hotspot/src/share/vm/runtime/init.cpp:114
    #18 0x00007ffff6ab93ef in Threads::create_vm (args=0x7ffff7fd0e80, canTryAgain=0x7ffff7fd0e03) at /home/dinosaur/jdk8/hotspot/src/share/vm/runtime/thread.cpp:3424
    #19 0x00007ffff6702ed0 in JNI_CreateJavaVM (vm=0x7ffff7fd0ed8, penv=0x7ffff7fd0ee0, args=0x7ffff7fd0e80) at /home/dinosaur/jdk8/hotspot/src/share/vm/prims/jni.cpp:5166
    #20 0x00007ffff7bc3bda in InitializeJVM (pvm=0x7ffff7fd0ed8, penv=0x7ffff7fd0ee0, ifn=0x7ffff7fd0f30) at /home/dinosaur/jdk8/jdk/src/share/bin/java.c:1145
    #21 0x00007ffff7bc1a36 in JavaMain (_args=0x7fffffffa910) at /home/dinosaur/jdk8/jdk/src/share/bin/java.c:371
    #22 0x00007ffff73d66ba in start_thread (arg=0x7ffff7fd1700) at pthread_create.c:333
    #23 0x00007ffff78f741d in clone () at ../sysdeps/unix/sysv/linux/x86_64/clone.S:109

    + + +

    加载classloader

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    (gdb) bt
    #0 open64 () at ../sysdeps/unix/syscall-template.S:84
    #1 0x00007ffff695b544 in os::open (path=0x7ffff7fcefd0 "/home/dinosaur/jdk8/build/linux-x86_64-normal-server-slowdebug/jdk/classes/java/lang/ClassLoader.class", oflag=0, mode=0)
    at /home/dinosaur/jdk8/hotspot/src/os/linux/vm/os_linux.cpp:5188
    #2 0x00007ffff63ffdfc in ClassPathDirEntry::open_stream (this=0x7ffff006f178, name=0x7ffff000cd08 "java/lang/ClassLoader.class", __the_thread__=0x7ffff000c000)
    at /home/dinosaur/jdk8/hotspot/src/share/vm/classfile/classLoader.cpp:210
    #3 0x00007ffff640055b in LazyClassPathEntry::open_stream (this=0x7ffff001ad48, name=0x7ffff000cd08 "java/lang/ClassLoader.class", __the_thread__=0x7ffff000c000)
    at /home/dinosaur/jdk8/hotspot/src/share/vm/classfile/classLoader.cpp:330
    #4 0x00007ffff640209b in ClassLoader::load_classfile (h_name=0x7ffff40621c8, __the_thread__=0x7ffff000c000) at /home/dinosaur/jdk8/hotspot/src/share/vm/classfile/classLoader.cpp:909
    #5 0x00007ffff6a8570a in SystemDictionary::load_instance_class (class_name=0x7ffff40621c8, class_loader=..., __the_thread__=0x7ffff000c000)
    at /home/dinosaur/jdk8/hotspot/src/share/vm/classfile/systemDictionary.cpp:1304
    #6 0x00007ffff6a838b8 in SystemDictionary::resolve_instance_class_or_null (name=0x7ffff40621c8, class_loader=..., protection_domain=..., __the_thread__=0x7ffff000c000)
    at /home/dinosaur/jdk8/hotspot/src/share/vm/classfile/systemDictionary.cpp:779
    #7 0x00007ffff6a81ff7 in SystemDictionary::resolve_or_null (class_name=0x7ffff40621c8, class_loader=..., protection_domain=..., __the_thread__=0x7ffff000c000)
    at /home/dinosaur/jdk8/hotspot/src/share/vm/classfile/systemDictionary.cpp:232
    #8 0x00007ffff6a819f2 in SystemDictionary::resolve_or_fail (class_name=0x7ffff40621c8, class_loader=..., protection_domain=..., throw_error=true, __the_thread__=0x7ffff000c000)
    at /home/dinosaur/jdk8/hotspot/src/share/vm/classfile/systemDictionary.cpp:171
    #9 0x00007ffff6a81d64 in SystemDictionary::resolve_or_fail (class_name=0x7ffff40621c8, throw_error=true, __the_thread__=0x7ffff000c000)
    at /home/dinosaur/jdk8/hotspot/src/share/vm/classfile/systemDictionary.cpp:212
    #10 0x00007ffff6a87277 in SystemDictionary::initialize_wk_klass (id=SystemDictionary::ClassLoader_klass_knum, init_opt=0, __the_thread__=0x7ffff000c000)
    at /home/dinosaur/jdk8/hotspot/src/share/vm/classfile/systemDictionary.cpp:1866
    #11 0x00007ffff6a873a7 in SystemDictionary::initialize_wk_klasses_until (limit_id=SystemDictionary::SoftReference_klass_knum, start_id=@0x7ffff7fd0a84: SystemDictionary::Cloneable_klass_knum,
    __the_thread__=0x7ffff000c000) at /home/dinosaur/jdk8/hotspot/src/share/vm/classfile/systemDictionary.cpp:1882
    #12 0x00007ffff6a8b13c in SystemDictionary::initialize_wk_klasses_through (end_id=SystemDictionary::Reference_klass_knum, start_id=@0x7ffff7fd0a84: SystemDictionary::Cloneable_klass_knum,
    __the_thread__=0x7ffff000c000) at /home/dinosaur/jdk8/hotspot/src/share/vm/classfile/systemDictionary.hpp:408
    #13 0x00007ffff6a87553 in SystemDictionary::initialize_preloaded_classes (__the_thread__=0x7ffff000c000) at /home/dinosaur/jdk8/hotspot/src/share/vm/classfile/systemDictionary.cpp:1918
    #14 0x00007ffff6a87199 in SystemDictionary::initialize (__the_thread__=0x7ffff000c000) at /home/dinosaur/jdk8/hotspot/src/share/vm/classfile/systemDictionary.cpp:1843
    #15 0x00007ffff6ad68c9 in Universe::genesis (__the_thread__=0x7ffff000c000) at /home/dinosaur/jdk8/hotspot/src/share/vm/memory/universe.cpp:288
    #16 0x00007ffff6ad8db6 in universe2_init () at /home/dinosaur/jdk8/hotspot/src/share/vm/memory/universe.cpp:991
    #17 0x00007ffff66463b3 in init_globals () at /home/dinosaur/jdk8/hotspot/src/share/vm/runtime/init.cpp:114
    #18 0x00007ffff6ab93ef in Threads::create_vm (args=0x7ffff7fd0e80, canTryAgain=0x7ffff7fd0e03) at /home/dinosaur/jdk8/hotspot/src/share/vm/runtime/thread.cpp:3424
    #19 0x00007ffff6702ed0 in JNI_CreateJavaVM (vm=0x7ffff7fd0ed8, penv=0x7ffff7fd0ee0, args=0x7ffff7fd0e80) at /home/dinosaur/jdk8/hotspot/src/share/vm/prims/jni.cpp:5166
    #20 0x00007ffff7bc3bda in InitializeJVM (pvm=0x7ffff7fd0ed8, penv=0x7ffff7fd0ee0, ifn=0x7ffff7fd0f30) at /home/dinosaur/jdk8/jdk/src/share/bin/java.c:1145
    #21 0x00007ffff7bc1a36 in JavaMain (_args=0x7fffffffa910) at /home/dinosaur/jdk8/jdk/src/share/bin/java.c:371
    #22 0x00007ffff73d66ba in start_thread (arg=0x7ffff7fd1700) at pthread_create.c:333
    #23 0x00007ffff78f741d in clone () at ../sysdeps/unix/sysv/linux/x86_64/clone.S:109

    +

    打印hello world

    这是打印hello world 的堆栈,估计是被优化了打印不了完整堆栈

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    (gdb) bt
    #0 write () at ../sysdeps/unix/syscall-template.S:84
    #1 0x00007ffff556779a in handleWrite (fd=1, buf=0x7ffff7fce270, len=12)
    at /home/dinosaur/jdk8/jdk/src/solaris/native/java/io/io_util_md.c:164
    #2 0x00007ffff556710a in writeBytes (env=0x7ffff000c210, this=0x7ffff7fd0398, bytes=0x7ffff7fd0390, off=0, len=12, append=0 '\000',
    fid=0x47e1043) at /home/dinosaur/jdk8/jdk/src/share/native/java/io/io_util.c:189
    #3 0x00007ffff555a79c in Java_java_io_FileOutputStream_writeBytes (env=0x7ffff000c210, this=0x7ffff7fd0398, bytes=0x7ffff7fd0390,
    off=0, len=12, append=0 '\000') at /home/dinosaur/jdk8/jdk/src/solaris/native/java/io/FileOutputStream_md.c:70
    #4 0x00007fffe10298dc in ?? ()
    #5 0x0000000000000008 in ?? ()
    #6 0x0000000000000008 in ?? ()
    #7 0x00007ffff000c000 in ?? ()
    #8 0x00007fffe02c74d8 in ?? ()
    #9 0x00007fffe1028ee3 in ?? ()
    #10 0x00007ffff7fd0318 in ?? ()
    #11 0x00007fffe0173f60 in ?? ()
    #12 0x00007ffff7fd0398 in ?? ()
    #13 0x00007fffe0175120 in ?? ()
    #14 0x0000000000000000 in ?? ()
    (gdb) c
    Continuing.
    Hello, World

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    (gdb) bt
    #0 write () at ../sysdeps/unix/syscall-template.S:84
    #1 0x00007ffff556779a in handleWrite (fd=1, buf=0x7ffff7fce2d0, len=1)
    at /home/dinosaur/jdk8/jdk/src/solaris/native/java/io/io_util_md.c:164
    #2 0x00007ffff556710a in writeBytes (env=0x7ffff000c210, this=0x7ffff7fd0400, bytes=0x7ffff7fd03f8, off=0, len=1, append=0 '\000',
    fid=0x47e1043) at /home/dinosaur/jdk8/jdk/src/share/native/java/io/io_util.c:189
    #3 0x00007ffff555a79c in Java_java_io_FileOutputStream_writeBytes (env=0x7ffff000c210, this=0x7ffff7fd0400, bytes=0x7ffff7fd03f8,
    off=0, len=1, append=0 '\000') at /home/dinosaur/jdk8/jdk/src/solaris/native/java/io/FileOutputStream_md.c:70
    #4 0x00007fffe10298dc in ?? ()
    #5 0x00007ffff7fd0410 in ?? ()
    #6 0x00007ffff672dd43 in JVM_ArrayCopy (env=0x7ffff000c210, ignored=0x7ffff7fd0400, src=0x7ffff7fd03f8, src_pos=0,
    dst=0x7f00f6265bea, dst_pos=1, length=0) at /home/dinosaur/jdk8/hotspot/src/share/vm/prims/jvm.cpp:298
    #7 0x00007fffe1007500 in ?? ()
    #8 0x0000000000000000 in ?? ()

    + +

    java class file

    4.1 The ClassFile Structure

    +
    +

    A class file consists of a stream of 8-bit bytes. All 16-bit, 32-bit, and 64-bit
    quantities are constructed by reading in two, four, and eight consecutive 8-bit
    bytes, respectively. Multibyte data items are always stored in big-endian order,
    where the high bytes come first. In the Java SE platform, this format is supported
    by interfaces java.io.DataInput and java.io.DataOutput and classes such as
    java.io.DataInputStream and java.io.DataOutputStream.

    +
    +

    通过jvm文档,可以知道class文件存的magic number0xCAFEBABE,存储方式是大端的

    +

    4.1 The ClassFile Structure
    A class file consists of a single ClassFile structure:

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    ClassFile {
    u4 magic;
    u2 minor_version;
    u2 major_version;
    u2 constant_pool_count;
    cp_info constant_pool[constant_pool_count-1];
    u2 access_flags;
    u2 this_class;
    u2 super_class;
    u2 interfaces_count;
    u2 interfaces[interfaces_count];
    u2 fields_count;
    field_info fields[fields_count];
    u2 methods_count;
    method_info methods[methods_count];
    u2 attributes_count;
    attribute_info attributes[attributes_count];
    }
    + +
    +

    The magic item supplies the magic number identifying the class file format;
    it has the value 0xCAFEBABE.

    +
    +
    1
    dinosaur@dinosaur-X550VXK:~/jdk8/build$ hexdump  HelloWorld.class -C
    +

    使用hexdump查看class文件

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29

    00000000 ca fe ba be 00 00 00 34 00 1d 0a 00 06 00 0f 09 |.......4........|
    00000010 00 10 00 11 08 00 12 0a 00 13 00 14 07 00 15 07 |................|
    00000020 00 16 01 00 06 3c 69 6e 69 74 3e 01 00 03 28 29 |.....<init>...()|
    00000030 56 01 00 04 43 6f 64 65 01 00 0f 4c 69 6e 65 4e |V...Code...LineN|
    00000040 75 6d 62 65 72 54 61 62 6c 65 01 00 04 6d 61 69 |umberTable...mai|
    00000050 6e 01 00 16 28 5b 4c 6a 61 76 61 2f 6c 61 6e 67 |n...([Ljava/lang|
    00000060 2f 53 74 72 69 6e 67 3b 29 56 01 00 0a 53 6f 75 |/String;)V...Sou|
    00000070 72 63 65 46 69 6c 65 01 00 0f 48 65 6c 6c 6f 57 |rceFile...HelloW|
    00000080 6f 72 6c 64 2e 6a 61 76 61 0c 00 07 00 08 07 00 |orld.java.......|
    00000090 17 0c 00 18 00 19 01 00 0c 48 65 6c 6c 6f 2c 20 |.........Hello, |
    000000a0 57 6f 72 6c 64 07 00 1a 0c 00 1b 00 1c 01 00 0a |World...........|
    000000b0 48 65 6c 6c 6f 57 6f 72 6c 64 01 00 10 6a 61 76 |HelloWorld...jav|
    000000c0 61 2f 6c 61 6e 67 2f 4f 62 6a 65 63 74 01 00 10 |a/lang/Object...|
    000000d0 6a 61 76 61 2f 6c 61 6e 67 2f 53 79 73 74 65 6d |java/lang/System|
    000000e0 01 00 03 6f 75 74 01 00 15 4c 6a 61 76 61 2f 69 |...out...Ljava/i|
    000000f0 6f 2f 50 72 69 6e 74 53 74 72 65 61 6d 3b 01 00 |o/PrintStream;..|
    00000100 13 6a 61 76 61 2f 69 6f 2f 50 72 69 6e 74 53 74 |.java/io/PrintSt|
    00000110 72 65 61 6d 01 00 07 70 72 69 6e 74 6c 6e 01 00 |ream...println..|
    00000120 15 28 4c 6a 61 76 61 2f 6c 61 6e 67 2f 53 74 72 |.(Ljava/lang/Str|
    00000130 69 6e 67 3b 29 56 00 21 00 05 00 06 00 00 00 00 |ing;)V.!........|
    00000140 00 02 00 01 00 07 00 08 00 01 00 09 00 00 00 1d |................|
    00000150 00 01 00 01 00 00 00 05 2a b7 00 01 b1 00 00 00 |........*.......|
    00000160 01 00 0a 00 00 00 06 00 01 00 00 00 01 00 09 00 |................|
    00000170 0b 00 0c 00 01 00 09 00 00 00 25 00 02 00 01 00 |..........%.....|
    00000180 00 00 09 b2 00 02 12 03 b6 00 04 b1 00 00 00 01 |................|
    00000190 00 0a 00 00 00 0a 00 02 00 00 00 05 00 08 00 06 |................|
    000001a0 00 01 00 0d 00 00 00 02 00 0e |..........|

    + +

    我们来看看hello world这个class文件的各种内容

    +

    第一个是magic number: ca fe ba be 四个字节
    然后是minor_version:00 00
    major_version:00 34

    +

    相关阅读

    + + +
    + + + + +
    +
    +
    +
    + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    环境变量

    当我们使用shell 的命令env命令的时候可以看到很多字符串,那些就是这个进程的环境变量

    +

    环境变量怎么存

    +

    The first two arguments are just the same. The third argument envp gives the program’s environment; it is the same as the value of environ. See Environment Variables. POSIX.1 does not allow this three-argument form, so to be portable it is best to write main to take two arguments, and use the value of environ.

    +
    +

    posix 相关文档

    +
    +

    where argc is the argument count and argv is an array of character pointers to the arguments themselves. In addition, the following variable:

    +
    +
    1
    extern char **environ;
    +
    +

    is initialized as a pointer to an array of character pointers to the environment strings. The argv and environ arrays are each terminated by a null pointer. The null pointer terminating the argv array is not counted in argc.

    +
    +

    例子

    下面是例子

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    #include <stdio.h>

    extern char **environ;

    int main(int argc, const char *argv[]) {
    printf("environment variables:\n");
    int i = 0;
    while (environ[i]) {
    printf("%p\t%s\n", environ[i], environ[i]);
    i++;
    }

    printf("argv:\n");
    for (int i = 0; i < argc; i++) {
    printf("%p\t%s\n", argv[i], argv[i]);
    }
    }
    +

    编译后会把这些打印出来

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    gcc main.c -o main
    dinosaur@dinosaur-X550VXK:~/test$ ./main
    environment variables:
    0x7ffc250920c7 XDG_VTNR=7
    0x7ffc250920d2 LC_PAPER=zh_CN.UTF-8
    0x7ffc250920e7 LC_ADDRESS=zh_CN.UTF-8
    0x7ffc250920fe XDG_SESSION_ID=c1
    0x7ffc25092110 XDG_GREETER_DATA_DIR=/var/lib/lightdm-data/dinosaur
    0x7ffc25092144 LC_MONETARY=zh_CN.UTF-8
    0x7ffc2509215c CLUTTER_IM_MODULE=xim
    ...
    + +

    glibc变量

    定义

    定义在glibc-master/posix/environ.c

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    /* This file just defines the `__environ' variable (and alias `environ').  */

    #include <unistd.h>
    #include <stddef.h>

    /* This must be initialized; we cannot have a weak alias into bss. */
    char **__environ = NULL;
    weak_alias (__environ, environ) // 弱引用 其实environ 就是__environ
    ...
    + +

    读getenv

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    char *
    getenv (const char *name)
    {
    size_t len = strlen (name);
    char **ep;
    uint16_t name_start;


    ...
    name_start = *(const uint16_t *) name;
    ...
    len -= 2;
    name += 2;

    for (ep = __environ; *ep != NULL; ++ep)
    {
    uint16_t ep_start = *(uint16_t *) *ep;

    if (name_start == ep_start && !strncmp (*ep + 2, name, len)
    && (*ep)[len + 2] == '=')
    return &(*ep)[len + 3];
    }
    ...

    return NULL;
    }
    + +

    写 putenv setenv

    putenv setenv 都调用__add_to_environ

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    int
    __add_to_environ (const char *name, const char *value, const char *combined,
    int replace)
    {
    const size_t namelen = strlen (name);
    size_t vallen;
    ...
    vallen = strlen (value) + 1;
    ...
    const size_t varlen = namelen + 1 + vallen;
    ...
    memcpy (new_value, name, namelen);
    new_value[namelen] = '=';
    memcpy (&new_value[namelen + 1], value, vallen);
    ...
    }
    + +

    其实就是char ** environ 变量存着 key=value的字符串

    +

    如何以及什么时机继承

    // todo 有空扒一下

    +

    总结

    环境变量是一堆字符串,继承通过进程父子关系

    +

    1 环境变量的来源、原理与应用

    +

    2 glibc 文档

    + + +
    + + + + +
    +
    +
    +
    + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    前言

    谈到cors之前,必须先谈同源策略和跨域。

    +

    为什么要跨域

    同源策略限制了从同一个源加载的文档或脚本如何与来自另一个源的资源进行交互。这是一个用于隔离潜在恶意文件的重要安全机制。而且这些是浏览器自己限制的。

    +

    rfc 6454

    +
    +

    User agents interact with content created by a large number of
    authors. Although many of those authors are well-meaning, some
    authors might be malicious. To the extent that user agents undertake
    actions based on content they process, user agent implementors might
    wish to restrict the ability of malicious authors to disrupt the
    confidentiality or integrity of other content or servers.

    +
    +

    详细的相关内容

    +

    cors

    相关阅读

    +

    相关阅读2

    +

    简单请求

    简单请求就是不会触发cors预检请求的请求.

    +
    +

    某些请求不会触发 CORS 预检请求。本文称这样的请求为“简单请求”,请注意,该术语并不属于 Fetch (其中定义了 CORS)规范。若请求满足所有下述条件,则该请求可视为“简单请求”:

    +
    +

    使用下列方法之一:
    GET
    HEAD
    POST
    Fetch 规范定义了对 CORS 安全的首部字段集合,不得人为设置该集合之外的其他首部字段。该集合为:
    Accept
    Accept-Language
    Content-Language
    Content-Type (需要注意额外的限制)
    DPR
    Downlink
    Save-Data
    Viewport-Width
    Width
    Content-Type 的值仅限于下列三者之一:
    text/plain
    multipart/form-data
    application/x-www-form-urlencoded
    请求中的任意XMLHttpRequestUpload 对象均没有注册任何事件监听器;XMLHttpRequestUpload 对象可以使用 XMLHttpRequest.upload 属性访问。
    请求中没有使用 ReadableStream 对象。

    + + +
    + + + + +
    +
    +
    +
    + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    来源

    +
    1
    2
    3
    4
    5
    Interface values are comparable. Two interface values are equal if they have identical dynamic types and equal dynamic values or if both have value nil.
    A value x of non-interface type X and a value t of interface type T are comparable when values of type X are comparable and X implements T. They are equal if t's dynamic type is identical to X and t's dynamic value is equal to x.

    A comparison of two interface values with identical dynamic types causes a run-time panic if values of that type are not comparable. This behavior applies not only to direct interface value comparisons but also when comparing arrays of interface values or structs with interface-valued fields.

    + +

    interface 是可以比较的,当两个interface满足以下之一的时候两者相等:

    +
      +
    • 两个interface的动态类型和动态值两两相等
    • +
    • 两个interface值都是nil
    • +
    +

    当一个是interface,一个不是interface的时候,满足以下条件才能可比较:

    +
      +
    • x 是类型X的值,t是类型T的值。只有X是可比较且X是T的实现的时候,x和t是可比较的
    • +
    +

    那么当一个是interface,一个不是interface的时候,可比较的时候,怎么样才能相等呢?

    +
      +
    • 当t的动态类型和X相同且t的动态值与x相同
    • +
    +

    当比较两个interface的时候,如果他们的类型是不可比较的,那么会产生运行时panic,这种panic不仅仅会发生在interface直接比较。还会发生在interface数组比较或者包含interface作为structs中的字段时候的structs之间的比较。

    +

    相关分析

    + +
    + + + + +
    +
    +
    +
    + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    最近在学一点点go 相关的内容,遇到了很多坑

    +

    什么是mDNS

    相关阅读

    +

    gomicro 遇到的编译不过的问题(时间是2019/9/19)

    我但是用的go 版本是1.10,然后编译的时候报crypto/ed25519 这个包找不到

    +
      +
    • 怎么解决?
    • +
    +

    升级到go 1.13以上版本,好像1.13才有这个包

    +

    相关记录

    +
    1
    2
    3
    4
    // In Go 1.13, the ed25519 package was promoted to the standard library as
    // crypto/ed25519, and this package became a wrapper for the standard library one.
    //
    // +build !go1.13
    + +

    整个调用流程和抓包

    // todo

    + + +
    + + + + +
    +
    +
    +
    + + + + + + + + + + +
    + + + + +
    + + + + + + + + +
    +
    + +
    + +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/page/26/index.html b/page/26/index.html new file mode 100644 index 0000000000..5f78783541 --- /dev/null +++ b/page/26/index.html @@ -0,0 +1,792 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + dinosaur + + + + + + + + + + + + +
    +
    + +
    +
    + + + + + +
    + + + + + + + + +
    + +
    + +
    +
    + + +
    + + 0% +
    + + +
    +
    +
    + + +
    + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    编译和调试Linux内核

    相关参考

    +

    第一步

    1
    2
    3
    $ cd linux-4.16
    $ make menuconfig
    $ make -j8
    +
      +
    • 开启debug 信息
    • +
    +
    1
    2
    3
    4
    5
    Kernel hacking  ---> 
    [*] Kernel debugging
    Compile-time checks and compiler options --->
    [*] Compile the kernel with debug info
    [*] Provide GDB scripts for kernel debugging
    + +
      +
    • 关闭地址随机化
      选中Processor type and features 然后关闭
      1
      2
      3
      4
      5
      [*] 64-bit kernel                                                │ │  
      │ │ General setup ---> │ │
      │ │ [*] Enable loadable module support ---> │ │
      │ │ -*- Enable the block layer ---> │ │
      │ │ Processor type and features --->
    • +
    +

    反选 KASLR, 也就是关闭地址随机化

    +
    1
    │ │    [ ]   Randomize the address of the kernel image (KASLR)   
    + +

    为什么需要关闭地址随机

    +

    第二步

    编译busybox,注意是静态链接,然后构建initramfs根文件系统

    +

    第三步

    通过qemu 跑这个系统

    +
    1
    qemu-system-x86_64  -kernel  /home/dinosaur/Downloads/linux-4.16/arch/x86/boot/bzImage  -hda qemu_rootfs.img  -append "root=/dev/sda rootfstype=ext4 rw"   -gdb tcp::1234
    + +

    gdb 调试

    +
    1
    2
    3
    gdb vmlinux
    (gdb) target remote localhost:1234
    b vfs_write
    + +

    然后输出

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    (gdb) bt
    #0 vfs_write (file=0xffff880006431700, buf=0x66506a <error: Cannot access memory at address 0x66506a>,
    count=46, pos=0xffffc900000b7f08) at fs/read_write.c:529
    #1 0xffffffff811a08cd in SYSC_write (count=<optimized out>, buf=<optimized out>, fd=<optimized out>)
    at fs/read_write.c:589
    #2 SyS_write (fd=<optimized out>, buf=6705258, count=46) at fs/read_write.c:581
    #3 0xffffffff81001c8b in do_syscall_64 (regs=0xffff880006431700) at arch/x86/entry/common.c:287
    #4 0xffffffff81a00071 in entry_SYSCALL_64 () at arch/x86/entry/entry_64.S:237
    #5 0x0000000000000000 in ?? ()

    + + + + + + + +
    + + + + +
    +
    +
    +
    + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    tcpdump 解析redis 的RESP协议

    最近使用tcpdump 抓包发现居然支持RESP 协议的解析

    +
    1
    tcpdump -i lo port 6379
    +

    当我在redis-cli 敲 set a a时候,tcpdump返回

    +
    1
    seq 1023993873:1023993900, ack 4077734227, win 342, options [nop,nop,TS val 2912390058 ecr 2912384753], length 27: RESP "set" "a" "a"
    + +
      +
    • 如何实现?
    • +
    +

    去githup 上拿下来编译之后,发现是调用resp_print解析RESP协议的,而且是固定端口的,当端口是6379的时候会尝试用redis的协议解析

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    void
    tcp_print(netdissect_options *ndo,
    const u_char *bp, u_int length,
    const u_char *bp2, int fragmented)
    {
    ...
    if (ndo->ndo_packettype) {
    switch (ndo->ndo_packettype) {
    case PT_ZMTP1:
    zmtp1_print(ndo, bp, length);
    break;
    case PT_RESP: // 指定解析类型 -T
    resp_print(ndo, bp, length);
    break;
    }
    return;
    }
    ...
    else if (IS_SRC_OR_DST_PORT(REDIS_PORT)) //REDIS_PORT=6379
    resp_print(ndo, bp, length);
    ...
    }
    + +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    (gdb) bt
    #0 resp_print (ndo=0x7fffffffca40, bp=0x7ffff6e82088 "*3\r\n$3\r\nset\r\n$1\r\na\r\n$1\r\na\r\n", length=27) at ./print-resp.c:214
    #1 0x000000000045ae3f in tcp_print (ndo=0x7fffffffca40, bp=0x7ffff6e82088 "*3\r\n$3\r\nset\r\n$1\r\na\r\n$1\r\na\r\n", length=27, bp2=0x7ffff6e82054 "E", fragmented=0) at ./print-tcp.c:723
    #2 0x0000000000420f52 in ip_print_demux (ndo=0x7fffffffca40, bp=0x7ffff6e82068 "\242D\030\353=\b\350\021\363\rIS\200\030\001V\376C", length=59, ver=4, fragmented=0, ttl_hl=64, nh=6 '\006',
    iph=0x7ffff6e82054 "E") at ./print-ip-demux.c:100
    #3 0x0000000000420b1b in ip_print (ndo=0x7fffffffca40, bp=0x7ffff6e82054 "E", length=79) at ./print-ip.c:493
    #4 0x000000000041bf64 in ethertype_print (ndo=0x7fffffffca40, ether_type=2048, p=0x7ffff6e82054 "E", length=79, caplen=79, src=0x7fffffffc680, dst=0x7fffffffc690) at ./print-ether.c:490
    #5 0x000000000041bb03 in ether_print_common (ndo=0x7fffffffca40, p=0x7ffff6e82054 "E", length=79, caplen=79, print_switch_tag=0x0, switch_tag_len=0, print_encap_header=0x0, encap_header_arg=0x0)
    at ./print-ether.c:345
    #6 0x000000000041bc44 in ether_print (ndo=0x7fffffffca40, p=0x7ffff6e82046 "", length=93, caplen=93, print_encap_header=0x0, encap_header_arg=0x0) at ./print-ether.c:401
    #7 0x000000000041bc94 in ether_if_print (ndo=0x7fffffffca40, h=0x7fffffffc7e0, p=0x7ffff6e82046 "") at ./print-ether.c:416
    #8 0x00000000004078fe in pretty_print_packet (ndo=0x7fffffffca40, h=0x7fffffffc7e0, sp=0x7ffff6e82046 "", packets_captured=1) at ./print.c:414
    #9 0x0000000000406d84 in print_packet (user=0x7fffffffca40 "", h=0x7fffffffc7e0, sp=0x7ffff6e82046 "") at ./tcpdump.c:2984
    #10 0x00007ffff7755ef6 in ?? () from /usr/lib/x86_64-linux-gnu/libpcap.so.0.8
    #11 0x00007ffff775a4a3 in ?? () from /usr/lib/x86_64-linux-gnu/libpcap.so.0.8
    #12 0x00007ffff775f1fd in pcap_loop () from /usr/lib/x86_64-linux-gnu/libpcap.so.0.8
    #13 0x00000000004060b1 in main (argc=5, argv=0x7fffffffddd8) at ./tcpdump.c:2438

    + + +

    其他端口解析RESP

    如果你的redis-server 不是在6379,只要加上-T RESP 即可在其他端口解析RESP协议

    +
    1
    2
    3
    4
    5
    6
    sudo tcpdump -i lo port 7777 -T RESP
    tcpdump: verbose output suppressed, use -v or -vv for full protocol decode
    listening on lo, link-type EN10MB (Ethernet), capture size 262144 bytes
    13:07:31.737440 IP localhost.42466 > localhost.7777: Flags [P.], seq 2285148837:2285148864, ack 2982472773, win 342, options [nop,nop,TS val 2913788220 ecr 2913780890], length 27: RESP "set" "a" "a"
    13:07:31.737680 IP localhost.7777 > localhost.42466: Flags [P.], seq 1:6, ack 27, win 342, options [nop,nop,TS val 2913788220 ecr 2913788220], length 5: RESP "OK"
    13:07:31.737706 IP localhost.42466 > localhost.7777: Flags [.], ack 6, win 342, options [nop,nop,TS val 2913788220 ecr 2913788220], length 0
    + + +
    + + + + +
    +
    +
    +
    + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    将之前的内容迁移到hexo

    + + +
    + + + + +
    +
    +
    +
    + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    php大坑-隐式转换

    php是弱类型语言,其中一个坑是隐式转换

    +

    什么时候会触发隐式转换

    这个我只知道比较的时候如果有相应的隐式转换。

    +

    例子

    下面有个例子

    +

    php 代码

    +
    1
    2
    3
    <?php
    var_dump('1abc'== 1);
    // 返回 true
    + +

    经过_is_numeric_string_ex转换后,将1abc转换成了1

    +
    1
    2
    (gdb) p *lval
    $4 = 1
    + +

    堆栈如下

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    (gdb) bt
    #0 _is_numeric_string_ex (str=0x7fffef602b58 "1abc", length=4, lval=0x7fffffff99a0, dval=0x7fffffff99a0, allow_errors=1, oflow_info=0x0) at /home/dinosaur/Downloads/php-7.2.2/Zend/zend_operators.c:3001
    #1 0x0000000000938c52 in is_numeric_string_ex (str=0x7fffef602b58 "1abc", length=4, lval=0x7fffffff99a0, dval=0x7fffffff99a0, allow_errors=1, oflow_info=0x0)
    at /home/dinosaur/Downloads/php-7.2.2/Zend/zend_operators.h:142
    #2 0x0000000000938c94 in is_numeric_string (str=0x7fffef602b58 "1abc", length=4, lval=0x7fffffff99a0, dval=0x7fffffff99a0, allow_errors=1)
    at /home/dinosaur/Downloads/php-7.2.2/Zend/zend_operators.h:146
    #3 0x000000000094502b in compare_function (result=0x7fffffff9b78, op1=0x7fffffff9aa8, op2=0x7fffffff9ac8) at /home/dinosaur/Downloads/php-7.2.2/Zend/zend_operators.c:2069
    #4 0x0000000000945a32 in is_equal_function (result=0x7fffffff9b78, op1=0x7fffffff9aa8, op2=0x7fffffff9ac8) at /home/dinosaur/Downloads/php-7.2.2/Zend/zend_operators.c:2159
    #5 0x00000000009274e3 in zend_try_ct_eval_binary_op (result=0x7fffffff9b78, opcode=17, op1=0x7fffffff9aa8, op2=0x7fffffff9ac8) at /home/dinosaur/Downloads/php-7.2.2/Zend/zend_compile.c:6880
    #6 0x0000000000927a0d in zend_compile_binary_op (result=0x7fffffff9b70, ast=0x7fffef686090) at /home/dinosaur/Downloads/php-7.2.2/Zend/zend_compile.c:6999
    #7 0x000000000092b8d1 in zend_compile_expr (result=0x7fffffff9b70, ast=0x7fffef686090) at /home/dinosaur/Downloads/php-7.2.2/Zend/zend_compile.c:8235
    #8 0x000000000091b84f in zend_compile_args (ast=0x7fffef6860a8, fbc=0x167f050) at /home/dinosaur/Downloads/php-7.2.2/Zend/zend_compile.c:3202
    #9 0x000000000091baaf in zend_compile_call_common (result=0x7fffffff9d20, args_ast=0x7fffef6860a8, fbc=0x167f050) at /home/dinosaur/Downloads/php-7.2.2/Zend/zend_compile.c:3282
    #10 0x000000000091e44b in zend_compile_call (result=0x7fffffff9d20, ast=0x7fffef6860d8, type=0) at /home/dinosaur/Downloads/php-7.2.2/Zend/zend_compile.c:4009
    #11 0x000000000092bc3e in zend_compile_var (result=0x7fffffff9d20, ast=0x7fffef6860d8, type=0) at /home/dinosaur/Downloads/php-7.2.2/Zend/zend_compile.c:8339
    #12 0x000000000092b841 in zend_compile_expr (result=0x7fffffff9d20, ast=0x7fffef6860d8) at /home/dinosaur/Downloads/php-7.2.2/Zend/zend_compile.c:8217
    #13 0x000000000092b513 in zend_compile_stmt (ast=0x7fffef6860d8) at /home/dinosaur/Downloads/php-7.2.2/Zend/zend_compile.c:8186
    #14 0x000000000092b0de in zend_compile_top_stmt (ast=0x7fffef6860d8) at /home/dinosaur/Downloads/php-7.2.2/Zend/zend_compile.c:8072
    #15 0x000000000092b0c0 in zend_compile_top_stmt (ast=0x7fffef686018) at /home/dinosaur/Downloads/php-7.2.2/Zend/zend_compile.c:8067
    #16 0x00000000008ec355 in zend_compile (type=2) at Zend/zend_language_scanner.l:601
    #17 0x00000000008ec4e6 in compile_file (file_handle=0x7fffffffca10, type=8) at Zend/zend_language_scanner.l:635
    #18 0x00000000007296f0 in phar_compile_file (file_handle=0x7fffffffca10, type=8) at /home/dinosaur/Downloads/php-7.2.2/ext/phar/phar.c:3320
    #19 0x00007fffeeeca612 in opcache_compile_file (file_handle=0x7fffffffca10, type=8, key=0x7fffef16dd4c <accel_globals+556> "test.php:240416:240464", op_array_p=0x7fffffffa318)
    at /home/dinosaur/Downloads/php-7.2.2/ext/opcache/ZendAccelerator.c:1600
    #20 0x00007fffeeecb722 in persistent_compile_file (file_handle=0x7fffffffca10, type=8) at /home/dinosaur/Downloads/php-7.2.2/ext/opcache/ZendAccelerator.c:1941
    #21 0x000000000094ccb4 in zend_execute_scripts (type=8, retval=0x0, file_count=3) at /home/dinosaur/Downloads/php-7.2.2/Zend/zend.c:1490
    #22 0x00000000008b0b4a in php_execute_script (primary_file=0x7fffffffca10) at /home/dinosaur/Downloads/php-7.2.2/main/main.c:2590
    #23 0x0000000000a3fd23 in do_cli (argc=2, argv=0x1441f40) at /home/dinosaur/Downloads/php-7.2.2/sapi/cli/php_cli.c:1011
    #24 0x0000000000a40ee0 in main (argc=2, argv=0x1441f40) at /home/dinosaur/Downloads/php-7.2.2/sapi/cli/php_cli.c:1404


    +

    这是什么规则呢?
    1 如果一个操作数是string ,一个是number ,会将string 转换成int ,如果转换发现不是数字就转换成0
    然后他们就相等了

    +

    规则

    + + + + + + + + + + + + + +
    操作数1操作数2规则
    string,resource 或 numberstring,resource 或 number将字符串和资源转换成数字,按普通数学比较
    +

    相关阅读

    + + +
    + + + + +
    +
    +
    +
    + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    golang gc 关闭fd

    4月还是5月的时候写了个golang 的程序,因为要保证最多只有一个进程存在所以进程启动就去获取锁,没有获取文件锁的进程就退出。每分钟我会启动一次进程。目的就是为了进程保活。

    +

    使用文件锁就是为了他的特性:

    +
      +
    • 如果文件关闭,那么锁也会被回收
    • +
    +

    遇到的问题

      +
    • 问题是:过了半天之后ps aux 看启动的进程,发现居然有7-8个。按照预想应该只有一个。
      代码大概是长这样的
    • +
    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    func lockFile(){
    name := "lockfiletest.lock"
    file, err := os.OpenFile(name, syscall.O_CREAT|syscall.O_RDWR|syscall.O_CLOEXEC, 0666) //①打开文件
     ...
    err = syscall.FcntlFlock(file.Fd(), syscall.F_SETLK, &flockT)  //②加锁
    ...
    }
    func main(){
    err :=lockFile()
    if err!=nil{
    os.Exit(2) // ③加锁失败退出
    }
    }

    + +

    很简单的逻辑,就是获取文件锁,获取失败则退出

    +

    找问题

      +
    • 问题出在哪里呢?
    • +
    +

      

    +

    想了很久很久:难道是我用的库哪里fork了进程?文件被哪个第三方包关闭了?

    +

    想了很久很久一直怀疑第三方包有问题,但是最后经过google很多次后定位到是gc 的问题。

    +

    相关链接

    +

    在下面的例子里面编译后会在手动执行runtime.GC()后文件被回收

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    package main

    import (
    "os"
    "log"
    "time"
    "runtime"
    )

    func openFile(path string) error {
    _, err := os.Open(path)
    return err
    }

    func main() {
    if err := openFile(os.Args[1]); err != nil {
    log.Fatal(err)
    }
    // trigger GC below will also recycle the non-referenced fd opened before
    runtime.GC()
    time.Sleep(time.Hour)
    }
    + + +
      +
    • 怎么看进程打开的文件呢?  

      +

      通过proc文件系统就可以了,proc文件系统几乎把linux内核所有的统计量都导出来了哦

      +
    • +
    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    ## 8808 就是我的nginx 的master 的pid
    ll /proc/8808/fd/
    total 0
    dr-x------ 2 root root 0 8月 10 06:16 ./
    dr-xr-xr-x 9 root root 0 8月 9 07:35 ../
    lrwx------ 1 root root 64 8月 10 06:16 0 -> /dev/null
    lrwx------ 1 root root 64 8月 10 06:16 1 -> /dev/null
    l-wx------ 1 root root 64 8月 10 06:16 2 -> /usr/local/nginx/logs/error.log*
    lrwx------ 1 root root 64 8月 10 06:16 3 -> socket:[78178946]
    l-wx------ 1 root root 64 8月 10 06:16 4 -> /usr/local/nginx/logs/access.log*
    l-wx------ 1 root root 64 8月 10 06:16 5 -> /usr/local/nginx/logs/error.log*
    lrwx------ 1 root root 64 8月 10 06:16 6 -> socket:[78180730]
    lrwx------ 1 root root 64 8月 10 06:16 7 -> socket:[78178947]
    + +

    怎么解决

    第一:我们的问题是什么?  
    其实问题很简单:

    +
      +
    • 我们的fd这个对象被回收了
    • +
    • gc的调用fd对象回调函数
    • +
    • 回调函数把fd对象对应的文件描述符关闭了
    • +
    +

    解决方案:

    +

    把fd 弄成全局变量,全局变量一直被引用所以不会被gc回收掉

    +
    1
    2
    3
    4
    5
    6
    7
    8
    var file *File  // 加了一行变成全局变量
    func lockFile(){
    name := "lockfiletest.lock"
    file, err := os.OpenFile(name, syscall.O_CREAT|syscall.O_RDWR|syscall.O_CLOEXEC, 0666) //①打开文件
     ...
    err = syscall.FcntlFlock(file.Fd(), syscall.F_SETLK, &flockT)  //②加锁
    ...
    }
    + + +
    + + + + +
    +
    +
    +
    + + + + + + + + + + +
    + + + + +
    + + + + + + + + +
    +
    + +
    + +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/page/3/index.html b/page/3/index.html new file mode 100644 index 0000000000..eb253d1ebf --- /dev/null +++ b/page/3/index.html @@ -0,0 +1,1301 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + dinosaur + + + + + + + + + + + + +
    +
    + +
    +
    + + + + + +
    + + + + + + + + +
    + +
    + +
    +
    + + +
    + + 0% +
    + + +
    +
    +
    + + +
    + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    背景

    有下面的表:
    crated_at是自动由mysql填充的,但是使用mybatisplus的BatchSave的时候,发现
    Column 'created_at' cannot be null

    +
    1
    2
    3
    4
    5
    6
    7
    8

    CREATE TABLE `table` (
    `id` int(11) unsigned NOT NULL AUTO_INCREMENT COMMENT '自增id',
    `status` VARCHAR(100) NOT NULL AUTO_INCREMENT COMMENT '状态',
    `created_at` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
    `updated_at` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT '更新时间',
    PRIMARY KEY (`id`)
    ) ENGINE=InnoDB AUTO_INCREMENT=2 DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci
    + +

    mybatisplus生成的sql大概是把created_at 也作为sql字段然后拼入

    +
    1
    insert into table  (status , created_at,updated_at) values (1 , null ,null) 
    +

    也就是没有过滤掉空值

    +

    排查原因

    一开始找这个文档相关文档
    发现加了下面注解
    @TableField(insertStrategy = FieldStrategy.IGNORED)

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    @Data
    @TableName("table")
    public class TableDO {
    /**
    * 自增id
    */
    @TableId(value = "id", type = IdType.AUTO)
    private Integer id;

    /**
    * 状态 , WRITE_TO_DB RESUME_FROM_DB
    */
    private String status;

    /**
    * 延迟发送时间
    */
    private Date delayAt;

    /**
    * 创建时间
    */
    @TableField(insertStrategy = FieldStrategy.IGNORED) // 没有生效
    private Date createdAt;

    /**
    * 更新时间
    */

    private Date updatedAt;


    }

    +

    发现没有生效 , 然后一直改@TableName , 连这个注解也没有生效,所以发现是整个mybatisplus的注解都没有生效

    +

    排查

    根据上面现象,再继续排查,发现是idea自动生成的xml优先级更高,有resultMap ,把整个xml清空之后,整个insert正常了,最后sql变成了

    +
    1
    insert into table  (status ) values (1 ) 
    + +

    相关阅读

    + + +
    + + + + +
    +
    +
    +
    + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    背景

    了解java容器类

    +

    juc主要类

    Collection 接口:

    + + + + + + + + + + + + + + + + + + + +
    描述
    List列表
    Queue队列
    Set集合
    +

    Map接口:

    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    描述线程安全是否可以由null值
    HashMaphanshmap
    PriorityQueue优先队列,implement Queue否,因为要排序,null会抛异常
    HashSet一个set的实现This class permits the null element.
    +

    aqs

    aqs 主要是提供了三个property:

    +
      +
    • state: int 表示资源 ,维护一个volatile 的int
    • +
    • queue: 来block队列,一个普通队列,用来塞线程Thread这个变量
    • +
    • block: 获取不到资源就阻塞 , 使用park和unpark, jni实现
    • +
    +

    block

    park 和unpark 使用的是pthread_cond_wait 和pthread_cond_notify

    +

    park 源码分析

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    38
    39
    40
    41
    42
    43
    44
    45
    46
    47
    48
    49
    50
    51
    52
    53
    54
    55
    56
    57
    58
    59
    60
    61
    62
    63
    64
    65
    66
    67
    68
    69
    70
    71
    72
    73
    74
    75
    76
    77
    78
    79
    80
    81
    82
    83
    // Parker::park decrements count if > 0, else does a condvar wait.  Unpark
    // sets count to 1 and signals condvar. Only one thread ever waits
    // on the condvar. Contention seen when trying to park implies that someone
    // is unparking you, so don't wait. And spurious returns are fine, so there
    // is no need to track notifications.

    void Parker::park(bool isAbsolute, jlong time) {

    // Optional fast-path check:
    // Return immediately if a permit is available.
    // We depend on Atomic::xchg() having full barrier semantics
    // since we are doing a lock-free update to _counter.
    if (Atomic::xchg(&_counter, 0) > 0) return;

    JavaThread *jt = JavaThread::current();

    // Optional optimization -- avoid state transitions if there's
    // an interrupt pending.
    if (jt->is_interrupted(false)) {
    return;
    }

    // Next, demultiplex/decode time arguments
    struct timespec absTime;
    if (time < 0 || (isAbsolute && time == 0)) { // don't wait at all
    return;
    }
    if (time > 0) {
    to_abstime(&absTime, time, isAbsolute, false);
    }

    // Enter safepoint region
    // Beware of deadlocks such as 6317397.
    // The per-thread Parker:: mutex is a classic leaf-lock.
    // In particular a thread must never block on the Threads_lock while
    // holding the Parker:: mutex. If safepoints are pending both the
    // the ThreadBlockInVM() CTOR and DTOR may grab Threads_lock.
    ThreadBlockInVM tbivm(jt);

    // Can't access interrupt state now that we are _thread_blocked. If we've
    // been interrupted since we checked above then _counter will be > 0.

    // Don't wait if cannot get lock since interference arises from
    // unparking.
    if (pthread_mutex_trylock(_mutex) != 0) {
    return;
    }

    int status;
    if (_counter > 0) { // no wait needed
    _counter = 0;
    status = pthread_mutex_unlock(_mutex);
    assert_status(status == 0, status, "invariant");
    // Paranoia to ensure our locked and lock-free paths interact
    // correctly with each other and Java-level accesses.
    OrderAccess::fence();
    return;
    }

    OSThreadWaitState osts(jt->osthread(), false /* not Object.wait() */);

    assert(_cur_index == -1, "invariant");
    if (time == 0) {
    _cur_index = REL_INDEX; // arbitrary choice when not timed
    status = pthread_cond_wait(&_cond[_cur_index], _mutex);
    assert_status(status == 0 MACOS_ONLY(|| status == ETIMEDOUT),
    status, "cond_wait");
    }
    else {
    _cur_index = isAbsolute ? ABS_INDEX : REL_INDEX;
    status = pthread_cond_timedwait(&_cond[_cur_index], _mutex, &absTime);
    assert_status(status == 0 || status == ETIMEDOUT,
    status, "cond_timedwait");
    }
    _cur_index = -1;

    _counter = 0;
    status = pthread_mutex_unlock(_mutex);
    assert_status(status == 0, status, "invariant");
    // Paranoia to ensure our locked and lock-free paths interact
    // correctly with each other and Java-level accesses.
    OrderAccess::fence();
    }
    + +

    相关阅读

    + + +
    + + + + +
    +
    +
    +
    + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    背景

    本地电脑连接测试环境mysql,发现如下错误Communications link failure

    +

    测试环境jdbc连不上,最后发现是网络原因,不允许外网访问测试环境的mysql,找运维改了规则就而已访问了

    +

    堆栈

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    38
    39
    40
    41
    42
    43
    44
    45
    46
    47
    48
    49
    50
    51
    52
    53
    54
    55
    56
    57
    58
    59
    60
    61
    62
    63
    64
    65
    66
    67
    68
    69
    70
    71
    72
    73
    74
    75
    76
    77
    78
    79
    80
    81
    82
    83
    84
    85
    86
    87
    88
    89
    90
    91
    92
    93
    94
    95
    96
    97
    98
    99
    100
    101
    102
    103
    104
    105
    106
    107
    108
    109
    110
    111
    112
    113
    114
    115
    116
    117
    118
    119
    120
    121
    122
    123
    124
    125
    126
    127
    128
    129
    130
    131
    132
    133
    134
    135
    136
    137
    138
    139
    140
    141
    142
    143
    144
    145
    146
    147
    148
    149
    150
    151
    152
    153
    154
    155
    156
    157
    158
    159
    160
    161
    162
    163
    164
    165
    166
    167
    168
    169
    170
    171
    172
    173
    174
    175
    176
    177
    178
    179
    180
    181
    182
    183
    184
    185
    186
    187
    188
    189
    190
    191
    192
    193
    194
    195
    196
    197
    198
    199
    200
    201
    202
    203
    204
    205
    206
    207
    208
    209
    210
    211
    212
    213
    214
    215
    216
    217
    218
    219
    220
    221
    222
    223
    224
    225
    226
    227
    228
    229
    230
    231
    232
    233
    234
    235
    236
    237
    238
    239
    240
    241
    242
    243
    244
    245
    246
    247
    248
    249
    250
    251
    252
    253
    254
    255
    256
    257
    258
    259
    260
    261
    262
    263
    264
    265
    266
    267
    268
    269
    270
    271
    272
    273
    274
    275
    276
    277
    278
    279
    280
    281
    282
    283
    284
    285
    286
    287
    288
    289
    290
    291
    292
    293
    294
    295
    296
    297
    298
    299
    300
    301
    302
    303
    304
    305
    306
    307
    308
    309
    310
    311
    312
    313
    314
    315
    316
    317
    318
    319
    320
    321
    322
    [ERROR] 2023-08-10 15:01:16:468 [ip:] [TID: N/A] [main] [com.alibaba.druid.pool.DruidDataSource:916] [init] => init datasource error, url: jdbc:mysql://xxx.com/ods?useUnicode=true&characterEncoding=utf-8&useSSL=false&serverTimezone=UTC&allowMultiQueries=true
    com.mysql.cj.jdbc.exceptions.CommunicationsException: Communications link failure

    The last packet sent successfully to the server was 0 milliseconds ago. The driver has not received any packets from the server.
    at com.mysql.cj.jdbc.exceptions.SQLError.createCommunicationsException(SQLError.java:174)
    at com.mysql.cj.jdbc.exceptions.SQLExceptionsMapping.translateException(SQLExceptionsMapping.java:64)
    at com.mysql.cj.jdbc.ConnectionImpl.createNewIO(ConnectionImpl.java:835)
    at com.mysql.cj.jdbc.ConnectionImpl.<init>(ConnectionImpl.java:455)
    at com.mysql.cj.jdbc.ConnectionImpl.getInstance(ConnectionImpl.java:240)
    at com.mysql.cj.jdbc.NonRegisteringDriver.connect(NonRegisteringDriver.java:199)
    at com.alibaba.druid.filter.FilterChainImpl.connection_connect(FilterChainImpl.java:156)
    at com.alibaba.druid.filter.stat.StatFilter.connection_connect(StatFilter.java:218)
    at com.alibaba.druid.filter.FilterChainImpl.connection_connect(FilterChainImpl.java:150)
    at com.alibaba.druid.pool.DruidAbstractDataSource.createPhysicalConnection(DruidAbstractDataSource.java:1646)
    at com.alibaba.druid.pool.DruidAbstractDataSource.createPhysicalConnection(DruidAbstractDataSource.java:1710)
    at com.alibaba.druid.pool.DruidDataSource.init(DruidDataSource.java:912)
    at com.baomidou.dynamic.datasource.creator.DruidDataSourceCreator.doCreateDataSource(DruidDataSourceCreator.java:83)
    at com.baomidou.dynamic.datasource.creator.AbstractDataSourceCreator.createDataSource(AbstractDataSourceCreator.java:70)
    at com.baomidou.dynamic.datasource.creator.DefaultDataSourceCreator.createDataSource(DefaultDataSourceCreator.java:48)
    at com.baomidou.dynamic.datasource.provider.AbstractDataSourceProvider.createDataSourceMap(AbstractDataSourceProvider.java:47)
    at com.baomidou.dynamic.datasource.provider.YmlDynamicDataSourceProvider.loadDataSources(YmlDynamicDataSourceProvider.java:42)
    at com.baomidou.dynamic.datasource.DynamicRoutingDataSource.afterPropertiesSet(DynamicRoutingDataSource.java:219)
    at org.springframework.beans.factory.support.AbstractAutowireCapableBeanFactory.invokeInitMethods(AbstractAutowireCapableBeanFactory.java:1858)
    at org.springframework.beans.factory.support.AbstractAutowireCapableBeanFactory.initializeBean(AbstractAutowireCapableBeanFactory.java:1795)
    at org.springframework.beans.factory.support.AbstractAutowireCapableBeanFactory.doCreateBean(AbstractAutowireCapableBeanFactory.java:594)
    at org.springframework.beans.factory.support.AbstractAutowireCapableBeanFactory.createBean(AbstractAutowireCapableBeanFactory.java:516)
    at org.springframework.beans.factory.support.AbstractBeanFactory.lambda$doGetBean$0(AbstractBeanFactory.java:324)
    at org.springframework.beans.factory.support.DefaultSingletonBeanRegistry.getSingleton(DefaultSingletonBeanRegistry.java:234)
    at org.springframework.beans.factory.support.AbstractBeanFactory.doGetBean(AbstractBeanFactory.java:322)
    at org.springframework.beans.factory.support.AbstractBeanFactory.getBean(AbstractBeanFactory.java:202)
    at org.springframework.beans.factory.config.DependencyDescriptor.resolveCandidate(DependencyDescriptor.java:276)
    at org.springframework.beans.factory.support.DefaultListableBeanFactory.doResolveDependency(DefaultListableBeanFactory.java:1307)
    at org.springframework.beans.factory.support.DefaultListableBeanFactory.resolveDependency(DefaultListableBeanFactory.java:1227)
    at org.springframework.beans.factory.support.ConstructorResolver.resolveAutowiredArgument(ConstructorResolver.java:886)
    at org.springframework.beans.factory.support.ConstructorResolver.createArgumentArray(ConstructorResolver.java:790)
    at org.springframework.beans.factory.support.ConstructorResolver.instantiateUsingFactoryMethod(ConstructorResolver.java:540)
    at org.springframework.beans.factory.support.AbstractAutowireCapableBeanFactory.instantiateUsingFactoryMethod(AbstractAutowireCapableBeanFactory.java:1341)
    at org.springframework.beans.factory.support.AbstractAutowireCapableBeanFactory.createBeanInstance(AbstractAutowireCapableBeanFactory.java:1181)
    at org.springframework.beans.factory.support.AbstractAutowireCapableBeanFactory.doCreateBean(AbstractAutowireCapableBeanFactory.java:556)
    at org.springframework.beans.factory.support.AbstractAutowireCapableBeanFactory.createBean(AbstractAutowireCapableBeanFactory.java:516)
    at org.springframework.beans.factory.support.AbstractBeanFactory.lambda$doGetBean$0(AbstractBeanFactory.java:324)
    at org.springframework.beans.factory.support.DefaultSingletonBeanRegistry.getSingleton(DefaultSingletonBeanRegistry.java:234)
    at org.springframework.beans.factory.support.AbstractBeanFactory.doGetBean(AbstractBeanFactory.java:322)
    at org.springframework.beans.factory.support.AbstractBeanFactory.getBean(AbstractBeanFactory.java:202)
    at org.springframework.beans.factory.config.DependencyDescriptor.resolveCandidate(DependencyDescriptor.java:276)
    at org.springframework.beans.factory.support.DefaultListableBeanFactory.doResolveDependency(DefaultListableBeanFactory.java:1307)
    at org.springframework.beans.factory.support.DefaultListableBeanFactory.resolveDependency(DefaultListableBeanFactory.java:1227)
    at org.springframework.beans.factory.support.AbstractAutowireCapableBeanFactory.autowireByType(AbstractAutowireCapableBeanFactory.java:1514)
    at org.springframework.beans.factory.support.AbstractAutowireCapableBeanFactory.populateBean(AbstractAutowireCapableBeanFactory.java:1409)
    at org.springframework.beans.factory.support.AbstractAutowireCapableBeanFactory.doCreateBean(AbstractAutowireCapableBeanFactory.java:593)
    at org.springframework.beans.factory.support.AbstractAutowireCapableBeanFactory.createBean(AbstractAutowireCapableBeanFactory.java:516)
    at org.springframework.beans.factory.support.AbstractBeanFactory.lambda$doGetBean$0(AbstractBeanFactory.java:324)
    at org.springframework.beans.factory.support.DefaultSingletonBeanRegistry.getSingleton(DefaultSingletonBeanRegistry.java:234)
    at org.springframework.beans.factory.support.AbstractBeanFactory.doGetBean(AbstractBeanFactory.java:322)
    at org.springframework.beans.factory.support.AbstractBeanFactory.getBean(AbstractBeanFactory.java:202)
    at org.springframework.beans.factory.config.DependencyDescriptor.resolveCandidate(DependencyDescriptor.java:276)
    at org.springframework.beans.factory.support.DefaultListableBeanFactory.doResolveDependency(DefaultListableBeanFactory.java:1307)
    at org.springframework.beans.factory.support.DefaultListableBeanFactory.resolveDependency(DefaultListableBeanFactory.java:1227)
    at org.springframework.beans.factory.annotation.AutowiredAnnotationBeanPostProcessor$AutowiredFieldElement.resolveFieldValue(AutowiredAnnotationBeanPostProcessor.java:657)
    at org.springframework.beans.factory.annotation.AutowiredAnnotationBeanPostProcessor$AutowiredFieldElement.inject(AutowiredAnnotationBeanPostProcessor.java:640)
    at org.springframework.beans.factory.annotation.InjectionMetadata.inject(InjectionMetadata.java:119)
    at org.springframework.beans.factory.annotation.AutowiredAnnotationBeanPostProcessor.postProcessProperties(AutowiredAnnotationBeanPostProcessor.java:399)
    at org.springframework.beans.factory.support.AbstractAutowireCapableBeanFactory.populateBean(AbstractAutowireCapableBeanFactory.java:1425)
    at org.springframework.beans.factory.support.AbstractAutowireCapableBeanFactory.doCreateBean(AbstractAutowireCapableBeanFactory.java:593)
    at org.springframework.beans.factory.support.AbstractAutowireCapableBeanFactory.createBean(AbstractAutowireCapableBeanFactory.java:516)
    at org.springframework.beans.factory.support.AbstractBeanFactory.lambda$doGetBean$0(AbstractBeanFactory.java:324)
    at org.springframework.beans.factory.support.DefaultSingletonBeanRegistry.getSingleton(DefaultSingletonBeanRegistry.java:234)
    at org.springframework.beans.factory.support.AbstractBeanFactory.doGetBean(AbstractBeanFactory.java:322)
    at org.springframework.beans.factory.support.AbstractBeanFactory.getBean(AbstractBeanFactory.java:202)
    at org.springframework.beans.factory.config.DependencyDescriptor.resolveCandidate(DependencyDescriptor.java:276)
    at org.springframework.beans.factory.support.DefaultListableBeanFactory.doResolveDependency(DefaultListableBeanFactory.java:1307)
    at org.springframework.beans.factory.support.DefaultListableBeanFactory.resolveDependency(DefaultListableBeanFactory.java:1227)
    at org.springframework.beans.factory.annotation.AutowiredAnnotationBeanPostProcessor$AutowiredFieldElement.resolveFieldValue(AutowiredAnnotationBeanPostProcessor.java:657)
    at org.springframework.beans.factory.annotation.AutowiredAnnotationBeanPostProcessor$AutowiredFieldElement.inject(AutowiredAnnotationBeanPostProcessor.java:640)
    at org.springframework.beans.factory.annotation.InjectionMetadata.inject(InjectionMetadata.java:119)
    at org.springframework.beans.factory.annotation.AutowiredAnnotationBeanPostProcessor.postProcessProperties(AutowiredAnnotationBeanPostProcessor.java:399)
    at org.springframework.beans.factory.support.AbstractAutowireCapableBeanFactory.populateBean(AbstractAutowireCapableBeanFactory.java:1425)
    at org.springframework.beans.factory.support.AbstractAutowireCapableBeanFactory.doCreateBean(AbstractAutowireCapableBeanFactory.java:593)
    at org.springframework.beans.factory.support.AbstractAutowireCapableBeanFactory.createBean(AbstractAutowireCapableBeanFactory.java:516)
    at org.springframework.beans.factory.support.AbstractBeanFactory.lambda$doGetBean$0(AbstractBeanFactory.java:324)
    at org.springframework.beans.factory.support.DefaultSingletonBeanRegistry.getSingleton(DefaultSingletonBeanRegistry.java:234)
    at org.springframework.beans.factory.support.AbstractBeanFactory.doGetBean(AbstractBeanFactory.java:322)
    at org.springframework.beans.factory.support.AbstractBeanFactory.getBean(AbstractBeanFactory.java:202)
    at org.springframework.beans.factory.config.DependencyDescriptor.resolveCandidate(DependencyDescriptor.java:276)
    at org.springframework.beans.factory.support.DefaultListableBeanFactory.doResolveDependency(DefaultListableBeanFactory.java:1307)
    at org.springframework.beans.factory.support.DefaultListableBeanFactory.resolveDependency(DefaultListableBeanFactory.java:1227)
    at org.springframework.beans.factory.annotation.AutowiredAnnotationBeanPostProcessor$AutowiredFieldElement.resolveFieldValue(AutowiredAnnotationBeanPostProcessor.java:657)
    at org.springframework.beans.factory.annotation.AutowiredAnnotationBeanPostProcessor$AutowiredFieldElement.inject(AutowiredAnnotationBeanPostProcessor.java:640)
    at org.springframework.beans.factory.annotation.InjectionMetadata.inject(InjectionMetadata.java:119)
    at org.springframework.beans.factory.annotation.AutowiredAnnotationBeanPostProcessor.postProcessProperties(AutowiredAnnotationBeanPostProcessor.java:399)
    at org.springframework.beans.factory.support.AbstractAutowireCapableBeanFactory.populateBean(AbstractAutowireCapableBeanFactory.java:1425)
    at org.springframework.beans.factory.support.AbstractAutowireCapableBeanFactory.doCreateBean(AbstractAutowireCapableBeanFactory.java:593)
    at org.springframework.beans.factory.support.AbstractAutowireCapableBeanFactory.createBean(AbstractAutowireCapableBeanFactory.java:516)
    at org.springframework.beans.factory.support.AbstractBeanFactory.lambda$doGetBean$0(AbstractBeanFactory.java:324)
    at org.springframework.beans.factory.support.DefaultSingletonBeanRegistry.getSingleton(DefaultSingletonBeanRegistry.java:234)
    at org.springframework.beans.factory.support.AbstractBeanFactory.doGetBean(AbstractBeanFactory.java:322)
    at org.springframework.beans.factory.support.AbstractBeanFactory.getBean(AbstractBeanFactory.java:202)
    at org.springframework.beans.factory.support.DefaultListableBeanFactory.preInstantiateSingletons(DefaultListableBeanFactory.java:897)
    at org.springframework.context.support.AbstractApplicationContext.finishBeanFactoryInitialization(AbstractApplicationContext.java:879)
    at org.springframework.context.support.AbstractApplicationContext.refresh(AbstractApplicationContext.java:551)
    at org.springframework.boot.SpringApplication.refresh(SpringApplication.java:755)
    at org.springframework.boot.SpringApplication.refresh(SpringApplication.java:747)
    at org.springframework.boot.SpringApplication.refreshContext(SpringApplication.java:402)
    at org.springframework.boot.SpringApplication.run(SpringApplication.java:312)
    at org.springframework.boot.test.context.SpringBootContextLoader.loadContext(SpringBootContextLoader.java:120)
    at org.springframework.test.context.cache.DefaultCacheAwareContextLoaderDelegate.loadContextInternal(DefaultCacheAwareContextLoaderDelegate.java:99)
    at org.springframework.test.context.cache.DefaultCacheAwareContextLoaderDelegate.loadContext(DefaultCacheAwareContextLoaderDelegate.java:124)
    at org.springframework.test.context.support.DefaultTestContext.getApplicationContext(DefaultTestContext.java:123)
    at org.springframework.test.context.web.ServletTestExecutionListener.setUpRequestContextIfNecessary(ServletTestExecutionListener.java:190)
    at org.springframework.test.context.web.ServletTestExecutionListener.prepareTestInstance(ServletTestExecutionListener.java:132)
    at org.springframework.test.context.TestContextManager.prepareTestInstance(TestContextManager.java:244)
    at org.springframework.test.context.junit4.SpringJUnit4ClassRunner.createTest(SpringJUnit4ClassRunner.java:227)
    at org.springframework.test.context.junit4.SpringJUnit4ClassRunner$1.runReflectiveCall(SpringJUnit4ClassRunner.java:289)
    at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12)
    at org.springframework.test.context.junit4.SpringJUnit4ClassRunner.methodBlock(SpringJUnit4ClassRunner.java:291)
    at org.springframework.test.context.junit4.SpringJUnit4ClassRunner.runChild(SpringJUnit4ClassRunner.java:246)
    at org.springframework.test.context.junit4.SpringJUnit4ClassRunner.runChild(SpringJUnit4ClassRunner.java:97)
    at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331)
    at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79)
    at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329)
    at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66)
    at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293)
    at org.springframework.test.context.junit4.statements.RunBeforeTestClassCallbacks.evaluate(RunBeforeTestClassCallbacks.java:61)
    at org.springframework.test.context.junit4.statements.RunAfterTestClassCallbacks.evaluate(RunAfterTestClassCallbacks.java:70)
    at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306)
    at org.junit.runners.ParentRunner.run(ParentRunner.java:413)
    at org.springframework.test.context.junit4.SpringJUnit4ClassRunner.run(SpringJUnit4ClassRunner.java:190)
    at org.junit.runner.JUnitCore.run(JUnitCore.java:137)
    at com.intellij.junit4.JUnit4IdeaTestRunner.startRunnerWithArgs(JUnit4IdeaTestRunner.java:69)
    at com.intellij.rt.junit.IdeaTestRunner$Repeater$1.execute(IdeaTestRunner.java:38)
    at com.intellij.rt.execution.junit.TestsRepeater.repeat(TestsRepeater.java:11)
    at com.intellij.rt.junit.IdeaTestRunner$Repeater.startRunnerWithArgs(IdeaTestRunner.java:35)
    at com.intellij.rt.junit.JUnitStarter.prepareStreamsAndStart(JUnitStarter.java:232)
    at com.intellij.rt.junit.JUnitStarter.main(JUnitStarter.java:55)
    Caused by: com.mysql.cj.exceptions.CJCommunicationsException: Communications link failure

    The last packet sent successfully to the server was 0 milliseconds ago. The driver has not received any packets from the server.
    at java.base/jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method)
    at java.base/jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:62)
    at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45)
    at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:490)
    at com.mysql.cj.exceptions.ExceptionFactory.createException(ExceptionFactory.java:61)
    at com.mysql.cj.exceptions.ExceptionFactory.createException(ExceptionFactory.java:105)
    at com.mysql.cj.exceptions.ExceptionFactory.createException(ExceptionFactory.java:151)
    at com.mysql.cj.exceptions.ExceptionFactory.createCommunicationsException(ExceptionFactory.java:167)
    at com.mysql.cj.protocol.a.NativeSocketConnection.connect(NativeSocketConnection.java:91)
    at com.mysql.cj.NativeSession.connect(NativeSession.java:152)
    at com.mysql.cj.jdbc.ConnectionImpl.connectOneTryOnly(ConnectionImpl.java:955)
    at com.mysql.cj.jdbc.ConnectionImpl.createNewIO(ConnectionImpl.java:825)
    ... 127 common frames omitted
    Caused by: java.net.ConnectException: Connection timed out: connect
    at java.base/java.net.PlainSocketImpl.waitForConnect(Native Method)
    at java.base/java.net.PlainSocketImpl.socketConnect(PlainSocketImpl.java:107)
    at java.base/java.net.AbstractPlainSocketImpl.doConnect(AbstractPlainSocketImpl.java:399)
    at java.base/java.net.AbstractPlainSocketImpl.connectToAddress(AbstractPlainSocketImpl.java:242)
    at java.base/java.net.AbstractPlainSocketImpl.connect(AbstractPlainSocketImpl.java:224)
    at java.base/java.net.SocksSocketImpl.connect(SocksSocketImpl.java:403)
    at java.base/java.net.Socket.connect(Socket.java:591)
    at com.mysql.cj.protocol.StandardSocketFactory.connect(StandardSocketFactory.java:155)
    at com.mysql.cj.protocol.a.NativeSocketConnection.connect(NativeSocketConnection.java:65)
    ... 130 common frames omitted
    [ERROR] 2023-08-10 15:01:16:472 [ip:] [TID: N/A] [main] [com.alibaba.druid.pool.DruidDataSource:958] [init] => {dataSource-2} init error
    com.mysql.cj.jdbc.exceptions.CommunicationsException: Communications link failure

    The last packet sent successfully to the server was 0 milliseconds ago. The driver has not received any packets from the server.
    at com.mysql.cj.jdbc.exceptions.SQLError.createCommunicationsException(SQLError.java:174)
    at com.mysql.cj.jdbc.exceptions.SQLExceptionsMapping.translateException(SQLExceptionsMapping.java:64)
    at com.mysql.cj.jdbc.ConnectionImpl.createNewIO(ConnectionImpl.java:835)
    at com.mysql.cj.jdbc.ConnectionImpl.<init>(ConnectionImpl.java:455)
    at com.mysql.cj.jdbc.ConnectionImpl.getInstance(ConnectionImpl.java:240)
    at com.mysql.cj.jdbc.NonRegisteringDriver.connect(NonRegisteringDriver.java:199)
    at com.alibaba.druid.filter.FilterChainImpl.connection_connect(FilterChainImpl.java:156)
    at com.alibaba.druid.filter.stat.StatFilter.connection_connect(StatFilter.java:218)
    at com.alibaba.druid.filter.FilterChainImpl.connection_connect(FilterChainImpl.java:150)
    at com.alibaba.druid.pool.DruidAbstractDataSource.createPhysicalConnection(DruidAbstractDataSource.java:1646)
    at com.alibaba.druid.pool.DruidAbstractDataSource.createPhysicalConnection(DruidAbstractDataSource.java:1710)
    at com.alibaba.druid.pool.DruidDataSource.init(DruidDataSource.java:912)
    at com.baomidou.dynamic.datasource.creator.DruidDataSourceCreator.doCreateDataSource(DruidDataSourceCreator.java:83)
    at com.baomidou.dynamic.datasource.creator.AbstractDataSourceCreator.createDataSource(AbstractDataSourceCreator.java:70)
    at com.baomidou.dynamic.datasource.creator.DefaultDataSourceCreator.createDataSource(DefaultDataSourceCreator.java:48)
    at com.baomidou.dynamic.datasource.provider.AbstractDataSourceProvider.createDataSourceMap(AbstractDataSourceProvider.java:47)
    at com.baomidou.dynamic.datasource.provider.YmlDynamicDataSourceProvider.loadDataSources(YmlDynamicDataSourceProvider.java:42)
    at com.baomidou.dynamic.datasource.DynamicRoutingDataSource.afterPropertiesSet(DynamicRoutingDataSource.java:219)
    at org.springframework.beans.factory.support.AbstractAutowireCapableBeanFactory.invokeInitMethods(AbstractAutowireCapableBeanFactory.java:1858)
    at org.springframework.beans.factory.support.AbstractAutowireCapableBeanFactory.initializeBean(AbstractAutowireCapableBeanFactory.java:1795)
    at org.springframework.beans.factory.support.AbstractAutowireCapableBeanFactory.doCreateBean(AbstractAutowireCapableBeanFactory.java:594)
    at org.springframework.beans.factory.support.AbstractAutowireCapableBeanFactory.createBean(AbstractAutowireCapableBeanFactory.java:516)
    at org.springframework.beans.factory.support.AbstractBeanFactory.lambda$doGetBean$0(AbstractBeanFactory.java:324)
    at org.springframework.beans.factory.support.DefaultSingletonBeanRegistry.getSingleton(DefaultSingletonBeanRegistry.java:234)
    at org.springframework.beans.factory.support.AbstractBeanFactory.doGetBean(AbstractBeanFactory.java:322)
    at org.springframework.beans.factory.support.AbstractBeanFactory.getBean(AbstractBeanFactory.java:202)
    at org.springframework.beans.factory.config.DependencyDescriptor.resolveCandidate(DependencyDescriptor.java:276)
    at org.springframework.beans.factory.support.DefaultListableBeanFactory.doResolveDependency(DefaultListableBeanFactory.java:1307)
    at org.springframework.beans.factory.support.DefaultListableBeanFactory.resolveDependency(DefaultListableBeanFactory.java:1227)
    at org.springframework.beans.factory.support.ConstructorResolver.resolveAutowiredArgument(ConstructorResolver.java:886)
    at org.springframework.beans.factory.support.ConstructorResolver.createArgumentArray(ConstructorResolver.java:790)
    at org.springframework.beans.factory.support.ConstructorResolver.instantiateUsingFactoryMethod(ConstructorResolver.java:540)
    at org.springframework.beans.factory.support.AbstractAutowireCapableBeanFactory.instantiateUsingFactoryMethod(AbstractAutowireCapableBeanFactory.java:1341)
    at org.springframework.beans.factory.support.AbstractAutowireCapableBeanFactory.createBeanInstance(AbstractAutowireCapableBeanFactory.java:1181)
    at org.springframework.beans.factory.support.AbstractAutowireCapableBeanFactory.doCreateBean(AbstractAutowireCapableBeanFactory.java:556)
    at org.springframework.beans.factory.support.AbstractAutowireCapableBeanFactory.createBean(AbstractAutowireCapableBeanFactory.java:516)
    at org.springframework.beans.factory.support.AbstractBeanFactory.lambda$doGetBean$0(AbstractBeanFactory.java:324)
    at org.springframework.beans.factory.support.DefaultSingletonBeanRegistry.getSingleton(DefaultSingletonBeanRegistry.java:234)
    at org.springframework.beans.factory.support.AbstractBeanFactory.doGetBean(AbstractBeanFactory.java:322)
    at org.springframework.beans.factory.support.AbstractBeanFactory.getBean(AbstractBeanFactory.java:202)
    at org.springframework.beans.factory.config.DependencyDescriptor.resolveCandidate(DependencyDescriptor.java:276)
    at org.springframework.beans.factory.support.DefaultListableBeanFactory.doResolveDependency(DefaultListableBeanFactory.java:1307)
    at org.springframework.beans.factory.support.DefaultListableBeanFactory.resolveDependency(DefaultListableBeanFactory.java:1227)
    at org.springframework.beans.factory.support.AbstractAutowireCapableBeanFactory.autowireByType(AbstractAutowireCapableBeanFactory.java:1514)
    at org.springframework.beans.factory.support.AbstractAutowireCapableBeanFactory.populateBean(AbstractAutowireCapableBeanFactory.java:1409)
    at org.springframework.beans.factory.support.AbstractAutowireCapableBeanFactory.doCreateBean(AbstractAutowireCapableBeanFactory.java:593)
    at org.springframework.beans.factory.support.AbstractAutowireCapableBeanFactory.createBean(AbstractAutowireCapableBeanFactory.java:516)
    at org.springframework.beans.factory.support.AbstractBeanFactory.lambda$doGetBean$0(AbstractBeanFactory.java:324)
    at org.springframework.beans.factory.support.DefaultSingletonBeanRegistry.getSingleton(DefaultSingletonBeanRegistry.java:234)
    at org.springframework.beans.factory.support.AbstractBeanFactory.doGetBean(AbstractBeanFactory.java:322)
    at org.springframework.beans.factory.support.AbstractBeanFactory.getBean(AbstractBeanFactory.java:202)
    at org.springframework.beans.factory.config.DependencyDescriptor.resolveCandidate(DependencyDescriptor.java:276)
    at org.springframework.beans.factory.support.DefaultListableBeanFactory.doResolveDependency(DefaultListableBeanFactory.java:1307)
    at org.springframework.beans.factory.support.DefaultListableBeanFactory.resolveDependency(DefaultListableBeanFactory.java:1227)
    at org.springframework.beans.factory.annotation.AutowiredAnnotationBeanPostProcessor$AutowiredFieldElement.resolveFieldValue(AutowiredAnnotationBeanPostProcessor.java:657)
    at org.springframework.beans.factory.annotation.AutowiredAnnotationBeanPostProcessor$AutowiredFieldElement.inject(AutowiredAnnotationBeanPostProcessor.java:640)
    at org.springframework.beans.factory.annotation.InjectionMetadata.inject(InjectionMetadata.java:119)
    at org.springframework.beans.factory.annotation.AutowiredAnnotationBeanPostProcessor.postProcessProperties(AutowiredAnnotationBeanPostProcessor.java:399)
    at org.springframework.beans.factory.support.AbstractAutowireCapableBeanFactory.populateBean(AbstractAutowireCapableBeanFactory.java:1425)
    at org.springframework.beans.factory.support.AbstractAutowireCapableBeanFactory.doCreateBean(AbstractAutowireCapableBeanFactory.java:593)
    at org.springframework.beans.factory.support.AbstractAutowireCapableBeanFactory.createBean(AbstractAutowireCapableBeanFactory.java:516)
    at org.springframework.beans.factory.support.AbstractBeanFactory.lambda$doGetBean$0(AbstractBeanFactory.java:324)
    at org.springframework.beans.factory.support.DefaultSingletonBeanRegistry.getSingleton(DefaultSingletonBeanRegistry.java:234)
    at org.springframework.beans.factory.support.AbstractBeanFactory.doGetBean(AbstractBeanFactory.java:322)
    at org.springframework.beans.factory.support.AbstractBeanFactory.getBean(AbstractBeanFactory.java:202)
    at org.springframework.beans.factory.config.DependencyDescriptor.resolveCandidate(DependencyDescriptor.java:276)
    at org.springframework.beans.factory.support.DefaultListableBeanFactory.doResolveDependency(DefaultListableBeanFactory.java:1307)
    at org.springframework.beans.factory.support.DefaultListableBeanFactory.resolveDependency(DefaultListableBeanFactory.java:1227)
    at org.springframework.beans.factory.annotation.AutowiredAnnotationBeanPostProcessor$AutowiredFieldElement.resolveFieldValue(AutowiredAnnotationBeanPostProcessor.java:657)
    at org.springframework.beans.factory.annotation.AutowiredAnnotationBeanPostProcessor$AutowiredFieldElement.inject(AutowiredAnnotationBeanPostProcessor.java:640)
    at org.springframework.beans.factory.annotation.InjectionMetadata.inject(InjectionMetadata.java:119)
    at org.springframework.beans.factory.annotation.AutowiredAnnotationBeanPostProcessor.postProcessProperties(AutowiredAnnotationBeanPostProcessor.java:399)
    at org.springframework.beans.factory.support.AbstractAutowireCapableBeanFactory.populateBean(AbstractAutowireCapableBeanFactory.java:1425)
    at org.springframework.beans.factory.support.AbstractAutowireCapableBeanFactory.doCreateBean(AbstractAutowireCapableBeanFactory.java:593)
    at org.springframework.beans.factory.support.AbstractAutowireCapableBeanFactory.createBean(AbstractAutowireCapableBeanFactory.java:516)
    at org.springframework.beans.factory.support.AbstractBeanFactory.lambda$doGetBean$0(AbstractBeanFactory.java:324)
    at org.springframework.beans.factory.support.DefaultSingletonBeanRegistry.getSingleton(DefaultSingletonBeanRegistry.java:234)
    at org.springframework.beans.factory.support.AbstractBeanFactory.doGetBean(AbstractBeanFactory.java:322)
    at org.springframework.beans.factory.support.AbstractBeanFactory.getBean(AbstractBeanFactory.java:202)
    at org.springframework.beans.factory.config.DependencyDescriptor.resolveCandidate(DependencyDescriptor.java:276)
    at org.springframework.beans.factory.support.DefaultListableBeanFactory.doResolveDependency(DefaultListableBeanFactory.java:1307)
    at org.springframework.beans.factory.support.DefaultListableBeanFactory.resolveDependency(DefaultListableBeanFactory.java:1227)
    at org.springframework.beans.factory.annotation.AutowiredAnnotationBeanPostProcessor$AutowiredFieldElement.resolveFieldValue(AutowiredAnnotationBeanPostProcessor.java:657)
    at org.springframework.beans.factory.annotation.AutowiredAnnotationBeanPostProcessor$AutowiredFieldElement.inject(AutowiredAnnotationBeanPostProcessor.java:640)
    at org.springframework.beans.factory.annotation.InjectionMetadata.inject(InjectionMetadata.java:119)
    at org.springframework.beans.factory.annotation.AutowiredAnnotationBeanPostProcessor.postProcessProperties(AutowiredAnnotationBeanPostProcessor.java:399)
    at org.springframework.beans.factory.support.AbstractAutowireCapableBeanFactory.populateBean(AbstractAutowireCapableBeanFactory.java:1425)
    at org.springframework.beans.factory.support.AbstractAutowireCapableBeanFactory.doCreateBean(AbstractAutowireCapableBeanFactory.java:593)
    at org.springframework.beans.factory.support.AbstractAutowireCapableBeanFactory.createBean(AbstractAutowireCapableBeanFactory.java:516)
    at org.springframework.beans.factory.support.AbstractBeanFactory.lambda$doGetBean$0(AbstractBeanFactory.java:324)
    at org.springframework.beans.factory.support.DefaultSingletonBeanRegistry.getSingleton(DefaultSingletonBeanRegistry.java:234)
    at org.springframework.beans.factory.support.AbstractBeanFactory.doGetBean(AbstractBeanFactory.java:322)
    at org.springframework.beans.factory.support.AbstractBeanFactory.getBean(AbstractBeanFactory.java:202)
    at org.springframework.beans.factory.support.DefaultListableBeanFactory.preInstantiateSingletons(DefaultListableBeanFactory.java:897)
    at org.springframework.context.support.AbstractApplicationContext.finishBeanFactoryInitialization(AbstractApplicationContext.java:879)
    at org.springframework.context.support.AbstractApplicationContext.refresh(AbstractApplicationContext.java:551)
    at org.springframework.boot.SpringApplication.refresh(SpringApplication.java:755)
    at org.springframework.boot.SpringApplication.refresh(SpringApplication.java:747)
    at org.springframework.boot.SpringApplication.refreshContext(SpringApplication.java:402)
    at org.springframework.boot.SpringApplication.run(SpringApplication.java:312)
    at org.springframework.boot.test.context.SpringBootContextLoader.loadContext(SpringBootContextLoader.java:120)
    at org.springframework.test.context.cache.DefaultCacheAwareContextLoaderDelegate.loadContextInternal(DefaultCacheAwareContextLoaderDelegate.java:99)
    at org.springframework.test.context.cache.DefaultCacheAwareContextLoaderDelegate.loadContext(DefaultCacheAwareContextLoaderDelegate.java:124)
    at org.springframework.test.context.support.DefaultTestContext.getApplicationContext(DefaultTestContext.java:123)
    at org.springframework.test.context.web.ServletTestExecutionListener.setUpRequestContextIfNecessary(ServletTestExecutionListener.java:190)
    at org.springframework.test.context.web.ServletTestExecutionListener.prepareTestInstance(ServletTestExecutionListener.java:132)
    at org.springframework.test.context.TestContextManager.prepareTestInstance(TestContextManager.java:244)
    at org.springframework.test.context.junit4.SpringJUnit4ClassRunner.createTest(SpringJUnit4ClassRunner.java:227)
    at org.springframework.test.context.junit4.SpringJUnit4ClassRunner$1.runReflectiveCall(SpringJUnit4ClassRunner.java:289)
    at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12)
    at org.springframework.test.context.junit4.SpringJUnit4ClassRunner.methodBlock(SpringJUnit4ClassRunner.java:291)
    at org.springframework.test.context.junit4.SpringJUnit4ClassRunner.runChild(SpringJUnit4ClassRunner.java:246)
    at org.springframework.test.context.junit4.SpringJUnit4ClassRunner.runChild(SpringJUnit4ClassRunner.java:97)
    at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331)
    at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79)
    at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329)
    at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66)
    at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293)
    at org.springframework.test.context.junit4.statements.RunBeforeTestClassCallbacks.evaluate(RunBeforeTestClassCallbacks.java:61)
    at org.springframework.test.context.junit4.statements.RunAfterTestClassCallbacks.evaluate(RunAfterTestClassCallbacks.java:70)
    at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306)
    at org.junit.runners.ParentRunner.run(ParentRunner.java:413)
    at org.springframework.test.context.junit4.SpringJUnit4ClassRunner.run(SpringJUnit4ClassRunner.java:190)
    at org.junit.runner.JUnitCore.run(JUnitCore.java:137)
    at com.intellij.junit4.JUnit4IdeaTestRunner.startRunnerWithArgs(JUnit4IdeaTestRunner.java:69)
    at com.intellij.rt.junit.IdeaTestRunner$Repeater$1.execute(IdeaTestRunner.java:38)
    at com.intellij.rt.execution.junit.TestsRepeater.repeat(TestsRepeater.java:11)
    at com.intellij.rt.junit.IdeaTestRunner$Repeater.startRunnerWithArgs(IdeaTestRunner.java:35)
    at com.intellij.rt.junit.JUnitStarter.prepareStreamsAndStart(JUnitStarter.java:232)
    at com.intellij.rt.junit.JUnitStarter.main(JUnitStarter.java:55)
    Caused by: com.mysql.cj.exceptions.CJCommunicationsException: Communications link failure

    The last packet sent successfully to the server was 0 milliseconds ago. The driver has not received any packets from the server.
    at java.base/jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method)
    at java.base/jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:62)
    at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45)
    at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:490)
    at com.mysql.cj.exceptions.ExceptionFactory.createException(ExceptionFactory.java:61)
    at com.mysql.cj.exceptions.ExceptionFactory.createException(ExceptionFactory.java:105)
    at com.mysql.cj.exceptions.ExceptionFactory.createException(ExceptionFactory.java:151)
    at com.mysql.cj.exceptions.ExceptionFactory.createCommunicationsException(ExceptionFactory.java:167)
    at com.mysql.cj.protocol.a.NativeSocketConnection.connect(NativeSocketConnection.java:91)
    at com.mysql.cj.NativeSession.connect(NativeSession.java:152)
    at com.mysql.cj.jdbc.ConnectionImpl.connectOneTryOnly(ConnectionImpl.java:955)
    at com.mysql.cj.jdbc.ConnectionImpl.createNewIO(ConnectionImpl.java:825)
    ... 127 common frames omitted
    Caused by: java.net.ConnectException: Connection timed out: connect
    at java.base/java.net.PlainSocketImpl.waitForConnect(Native Method)
    at java.base/java.net.PlainSocketImpl.socketConnect(PlainSocketImpl.java:107)
    at java.base/java.net.AbstractPlainSocketImpl.doConnect(AbstractPlainSocketImpl.java:399)
    at java.base/java.net.AbstractPlainSocketImpl.connectToAddress(AbstractPlainSocketImpl.java:242)
    at java.base/java.net.AbstractPlainSocketImpl.connect(AbstractPlainSocketImpl.java:224)
    at java.base/java.net.SocksSocketImpl.connect(SocksSocketImpl.java:403)
    at java.base/java.net.Socket.connect(Socket.java:591)
    at com.mysql.cj.protocol.StandardSocketFactory.connect(StandardSocketFactory.java:155)
    at com.mysql.cj.protocol.a.NativeSocketConnection.connect(NativeSocketConnection.java:65)
    ... 130 common frames omitted
    + + +
    + + + + +
    +
    +
    +
    + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    背景

    了解java的nio,因为在看到lucene的MappedByteBuffer , 所以想了解一下nio的内容
    nio 主要包括三个内容:

    +
      +
    • Buffer
    • +
    • Selector
    • +
    • Channel
    • +
    +

    Buffer

    ByteBuffer

      +
    • MappedByteBuffer
    • +
    • HeapByteBuffer
    • +
    • DirectByteBuffer
    • +
    +

    例子

    简单的例子:

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    @Test
    public void fileChannel()
    throws IOException {
    try (FileChannel fc = FileChannel.open(Paths.get("ccc.cc"),StandardOpenOption.WRITE , StandardOpenOption.READ ,StandardOpenOption.CREATE) ) {
    MappedByteBuffer bb = fc.map(FileChannel.MapMode.READ_WRITE, 0, 1);
    byte b = 97;
    bb.put(0 ,b );
    fc.write(bb);
    var charset = Charset.defaultCharset();
    System.out.println( "res" + charset.decode(bb).toString());
    }
    }
    +

    其实调用:

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    @ForceInline
    public void put$Type$(Scope scope, Object base, long offset, $type$ value) {
    try {
    put$Type$Internal(scope, base, offset, value);
    } catch (Scope.ScopedAccessError ex) {
    throw new IllegalStateException("This segment is already closed");
    }
    }

    @ForceInline @Scoped
    private void put$Type$Internal(Scope scope, Object base, long offset, $type$ value) {
    try {
    if (scope != null) {
    scope.checkValidState();
    }
    UNSAFE.put$Type$(base, offset, value);
    } finally {
    Reference.reachabilityFence(scope);
    }
    }
    + +

    最后写在这里

    +
    1
    2
    3
    4
    void put(T x) {
    GuardUnsafeAccess guard(_thread);
    *addr() = normalize_for_write(x);
    }
    + + +

    整个流程就是:
    mmap返回的是一个指针 ,MappedByteBuffer 调用UNSAFE.put方法直接修改堆外内存的值,不经过堆

    +

    相关阅读

    + + +
    + + + + +
    +
    +
    +
    + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    java的泛型

    java的泛型是什么?
    泛型是class/interface/method/constructor的一个属性,简单来说是一个修饰符,所以当我们说java的泛型的时候,需要描述4个内容:class/interface/method/constructor

    +

    java 泛型

    术语表:

    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    名称翻译(我自己的翻译)定义来源举例
    parameterized type参数化类型A parameterized type is a class or interface type of the form C<T1,…,Tn>, where C is the name of a generic class or interface, and <T1,…,Tn> is a list of type arguments that denote a particular parameterization of the generic class or interface.4.5List<String>
    TypeIdentifier类型标识符TypeIdentifier is used in the declaration of classes, interfaces,and type parameters (§8.1,§9.1, §4.4), and when referring to types (§6.5)
    type variable类型变量A type variable is an unqualified identifier used as a type in class, interface, method, and constructor bodies. A type variable is introduced by the declaration of a type parameter of a generic class, interface, method, or constructor (§8.1.2, §9.1.2, §8.4.4, §8.8.4)$4.4
    +

    类型参数和类型变量

    两者的关系:
    泛型参数由很多东西组成,其中泛型参数可以由泛型变量组成.

    +

    类型参数(type parameter)

    类型参数可以由类型变量构成

    +

    类变量(type variable)

    类型变量加上一些其他标识符(identifier)可以组成类型参数

    +

    什么是类型变量(type variable):

    +
    +

    A type variable is an unqualified identifier used as a type in class, interface, method,
    and constructor bodies.

    +
    +

    类型变量是作用在类/接口/方法/构造函数的标识符(identifier)

    +

    所以得出结论:
    类型变量是标识符

    +
    类型变量的作用
    +

    A type variable is introduced by the declaration of a type parameter of a generic
    class, interface, method, or constructor

    +
    +
    类型参数的作用域(type parameter scope)

    分为两部分:
    作用在类/接口是一类,另外一类是作用在构造函数/方法的

    +

    作用在类上的:

    +
    +

    The scope of a class’s type parameter (§8.1.2) is the type parameter section of
    the class declaration, and the type parameter section of any superclass type or
    superinterface type of the class declaration, and the class body. If the class is a
    NAMES Scope of a Declaration 6.3
    record class (§8.10), then the scope of the type parameter additionally includes the
    header of the record declaration (§8.10.1).

    +
    +

    泛型类、接口、方法、构造函数

    关于泛型相关内容,分为四种:

    +
      +
    • 泛型类(generic class)
    • +
    • 泛型接口(generic interface)
    • +
    • 泛型方法(generic method)
    • +
    • 泛型构造函数(generic constructor)
    • +
    +

    泛型类:

    +
    +

    A class is generic if the class declaration declares one or more type variables

    +
    +

    泛型构造函数:

    +
    +

    A constructor is generic if it declares one or more type variable

    +
    +

    泛型接口:

    +
    +

    An interface is generic if the interface declaration declares one or more type
    variables

    +
    +

    泛型构造函数:

    +
    +

    A constructor is generic if it declares one or more type variables

    +
    +

    类型实参 TypeArguments

    类型描述符 TypeIdentifier

    区别

    泛型接口和泛型类是差不多,泛型构造函数和泛型方法是差不多.
    所以我们只要区分泛型类和泛型方法的区别就行

    +

    实现

    java 的实现,我们可以直接看javac的源码

    +
    1
    2
    3
    4
    5
    6
    7
    8
    /**
    * {@literal
    * TypeParametersOpt = ["<" TypeParameter {"," TypeParameter} ">"]
    * }
    */
    protected List<JCTypeParameter> typeParametersOpt() {
    return typeParametersOpt(false);
    }
    + +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23

    /**
    * {@literal
    * TypeParameter = [Annotations] TypeVariable [TypeParameterBound]
    * TypeParameterBound = EXTENDS Type {"&" Type}
    * TypeVariable = Ident
    * }
    */
    JCTypeParameter typeParameter() {
    int pos = token.pos;
    List<JCAnnotation> annos = typeAnnotationsOpt();
    Name name = typeName();
    ListBuffer<JCExpression> bounds = new ListBuffer<>();
    if (token.kind == EXTENDS) {
    nextToken();
    bounds.append(parseType());
    while (token.kind == AMP) {
    nextToken();
    bounds.append(parseType());
    }
    }
    return toP(F.at(pos).TypeParameter(name, bounds.toList(), annos));
    }
    + +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    Name typeName() {
    int pos = token.pos;
    Name name = ident();
    Source source = restrictedTypeNameStartingAtSource(name, pos, true);
    if (source != null) {
    reportSyntaxError(pos, Errors.RestrictedTypeNotAllowed(name, source));
    }
    return name;
    }
    + +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    38
    39
    40
    41
    protected Name ident(boolean allowClass) {
    if (token.kind == IDENTIFIER) {
    Name name = token.name();
    nextToken();
    return name;
    } else if (token.kind == ASSERT) {
    log.error(DiagnosticFlag.SYNTAX, token.pos, Errors.AssertAsIdentifier);
    nextToken();
    return names.error;
    } else if (token.kind == ENUM) {
    log.error(DiagnosticFlag.SYNTAX, token.pos, Errors.EnumAsIdentifier);
    nextToken();
    return names.error;
    } else if (token.kind == THIS) {
    if (allowThisIdent) {
    Name name = token.name();
    nextToken();
    return name;
    } else {
    log.error(DiagnosticFlag.SYNTAX, token.pos, Errors.ThisAsIdentifier);
    nextToken();
    return names.error;
    }
    } else if (token.kind == UNDERSCORE) {
    if (Feature.UNDERSCORE_IDENTIFIER.allowedInSource(source)) {
    log.warning(token.pos, Warnings.UnderscoreAsIdentifier);
    } else {
    log.error(DiagnosticFlag.SYNTAX, token.pos, Errors.UnderscoreAsIdentifier);
    }
    Name name = token.name();
    nextToken();
    return name;
    } else {
    accept(IDENTIFIER);
    if (allowClass && token.kind == CLASS) {
    nextToken();
    return names._class;
    }
    return names.error;
    }
    }
    + +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    main[1] stop in  com.sun.tools.javac.parser.Tokens$Token:315 
    Deferring breakpoint com.sun.tools.javac.parser.Tokens$Token:315.
    It will be set after the class is loaded.
    main[1] cont
    > Set deferred breakpoint com.sun.tools.javac.parser.Tokens$Token:315

    Breakpoint hit: "thread=main", com.sun.tools.javac.parser.Tokens$Token.<init>(), line=315 bci=14
    315 this.endPos = endPos;

    main[1] list
    311
    312 Token(TokenKind kind, int pos, int endPos, List<Comment> comments) {
    313 this.kind = kind;
    314 this.pos = pos;
    315 => this.endPos = endPos;
    316 this.comments = comments;
    317 checkKind();
    318 }
    319
    320 Token[] split(Tokens tokens) {
    main[1] where
    [1] com.sun.tools.javac.parser.Tokens$Token.<init> (Tokens.java:315)
    [2] com.sun.tools.javac.parser.Tokens.<clinit> (Tokens.java:457)
    [3] com.sun.tools.javac.parser.ParserFactory.<init> (ParserFactory.java:79)
    [4] com.sun.tools.javac.parser.ParserFactory.instance (ParserFactory.java:56)
    [5] com.sun.tools.javac.main.JavaCompiler.<init> (JavaCompiler.java:386)
    [6] com.sun.tools.javac.main.JavaCompiler.instance (JavaCompiler.java:115)
    [7] com.sun.tools.javac.processing.JavacProcessingEnvironment.<init> (JavacProcessingEnvironment.java:215)
    [8] com.sun.tools.javac.processing.JavacProcessingEnvironment.instance (JavacProcessingEnvironment.java:200)
    [9] com.sun.tools.javac.api.BasicJavacTask.initPlugins (BasicJavacTask.java:217)
    [10] com.sun.tools.javac.main.Main.compile (Main.java:292)
    [11] com.sun.tools.javac.main.Main.compile (Main.java:176)
    [12] com.sun.tools.javac.Main.compile (Main.java:64)
    [13] com.sun.tools.javac.Main.main (Main.java:50)
    + + +

    如何调试

    窗口1:

    +
    1
    2
    ##  用javac 编译Hello.java
    ./java -agentlib:jdwp=transport=dt_socket,server=y,address=8000 --module jdk.compiler/com.sun.tools.javac.Main com/Hello.java
    + +

    窗口2:

    +
    1
    2
    3
    4
    5
    6
    ### jdb断点
    ./jdb -attach 8000 -sourcepath /var/jdk/src/jdk.compiler/share/classes
    ### 在jdb中 断点main函数
    stop in com.sun.tools.javac.Main.main
    ### 继续执行 命令是cont , 也可以是continue
    continue
    + + +

    泛型方法的代码

    1
    2
    3
    4
    5
    class PARA{
    <TT> void test(TT para ){
    System.out.println(para);
    }
    }
    + + +

    满足了这个parseRule

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    ClassBodyDeclaration =
    ";"
    | [STATIC] Block
    | ModifiersOpt
    ( Type Ident
    ( VariableDeclaratorsRest ";" | MethodDeclaratorRest )
    | VOID Ident VoidMethodDeclaratorRest
    | TypeParameters [Annotations]
    ( Type Ident MethodDeclaratorRest
    | VOID Ident VoidMethodDeclaratorRest
    )
    | Ident ConstructorDeclaratorRest
    | TypeParameters Ident ConstructorDeclaratorRest
    | ClassOrInterfaceOrEnumDeclaration
    * )
    + +

    堆栈

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    Breakpoint hit: "thread=main", com.sun.tools.javac.parser.JavacParser.ident(), line=575 bci=0
    575 if (token.kind == IDENTIFIER) {

    main[1] print token.name()
    token.name() = "TT"
    main[1] where
    [1] com.sun.tools.javac.parser.JavacParser.ident (JavacParser.java:575)
    [2] com.sun.tools.javac.parser.JavacParser.ident (JavacParser.java:571)
    [3] com.sun.tools.javac.parser.JavacParser.typeName (JavacParser.java:3,979)
    [4] com.sun.tools.javac.parser.JavacParser.typeParameter (JavacParser.java:4,563)
    [5] com.sun.tools.javac.parser.JavacParser.typeParametersOpt (JavacParser.java:4,541)
    [6] com.sun.tools.javac.parser.JavacParser.classOrInterfaceOrRecordBodyDeclaration (JavacParser.java:4,277)
    [7] com.sun.tools.javac.parser.JavacParser.classInterfaceOrRecordBody (JavacParser.java:4,214)
    [8] com.sun.tools.javac.parser.JavacParser.classDeclaration (JavacParser.java:3,925)
    [9] com.sun.tools.javac.parser.JavacParser.classOrRecordOrInterfaceOrEnumDeclaration (JavacParser.java:3,866)
    [10] com.sun.tools.javac.parser.JavacParser.typeDeclaration (JavacParser.java:3,855)
    [11] com.sun.tools.javac.parser.JavacParser.parseCompilationUnit (JavacParser.java:3,699)
    [12] com.sun.tools.javac.main.JavaCompiler.parse (JavaCompiler.java:620)
    [13] com.sun.tools.javac.main.JavaCompiler.parse (JavaCompiler.java:657)
    [14] com.sun.tools.javac.main.JavaCompiler.parseFiles (JavaCompiler.java:1,006)
    [15] com.sun.tools.javac.main.JavaCompiler.parseFiles (JavaCompiler.java:993)
    [16] com.sun.tools.javac.main.JavaCompiler.compile (JavaCompiler.java:919)
    [17] com.sun.tools.javac.main.Main.compile (Main.java:317)
    [18] com.sun.tools.javac.main.Main.compile (Main.java:176)
    [19] com.sun.tools.javac.Main.compile (Main.java:64)
    [20] com.sun.tools.javac.Main.main (Main.java:50)
    + +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    main[1] print kind 
    kind = "token.identifier"
    main[1] print pos
    pos = 13
    main[1] list
    311
    312 Token(TokenKind kind, int pos, int endPos, List<Comment> comments) {
    313 this.kind = kind;
    314 this.pos = pos;
    315 => this.endPos = endPos;
    316 this.comments = comments;
    317 checkKind();
    318 }
    319
    320 Token[] split(Tokens tokens) {
    main[1] where
    [1] com.sun.tools.javac.parser.Tokens$Token.<init> (Tokens.java:315)
    [2] com.sun.tools.javac.parser.Tokens$NamedToken.<init> (Tokens.java:399)
    [3] com.sun.tools.javac.parser.JavaTokenizer.readToken (JavaTokenizer.java:1,046)
    [4] com.sun.tools.javac.parser.Scanner.nextToken (Scanner.java:115)
    [5] com.sun.tools.javac.parser.JavacParser.nextToken (JavacParser.java:275)
    [6] com.sun.tools.javac.parser.JavacParser.typeParametersOpt (JavacParser.java:4,540)
    [7] com.sun.tools.javac.parser.JavacParser.classOrInterfaceOrRecordBodyDeclaration (JavacParser.java:4,277)
    [8] com.sun.tools.javac.parser.JavacParser.classInterfaceOrRecordBody (JavacParser.java:4,214)
    [9] com.sun.tools.javac.parser.JavacParser.classDeclaration (JavacParser.java:3,925)
    [10] com.sun.tools.javac.parser.JavacParser.classOrRecordOrInterfaceOrEnumDeclaration (JavacParser.java:3,866)
    [11] com.sun.tools.javac.parser.JavacParser.typeDeclaration (JavacParser.java:3,855)
    [12] com.sun.tools.javac.parser.JavacParser.parseCompilationUnit (JavacParser.java:3,699)
    [13] com.sun.tools.javac.main.JavaCompiler.parse (JavaCompiler.java:620)
    [14] com.sun.tools.javac.main.JavaCompiler.parse (JavaCompiler.java:657)
    [15] com.sun.tools.javac.main.JavaCompiler.parseFiles (JavaCompiler.java:1,006)
    [16] com.sun.tools.javac.main.JavaCompiler.parseFiles (JavaCompiler.java:993)
    [17] com.sun.tools.javac.main.JavaCompiler.compile (JavaCompiler.java:919)
    [18] com.sun.tools.javac.main.Main.compile (Main.java:317)
    [19] com.sun.tools.javac.main.Main.compile (Main.java:176)
    [20] com.sun.tools.javac.Main.compile (Main.java:64)
    [21] com.sun.tools.javac.Main.main (Main.java:50)
    + +

    相关阅读

    + + +
    + + + + +
    +
    +
    +
    + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    背景

    System.arraycopy 是System包下面的函数,主要是从一个数组复制元素到另外一个数组

    +

    为什么要介绍一下这个函数呢?
    因为lucene经常会用到这个函数复制内容

    +

    注释

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    38
    39
    40
    41
    42
    public static void arraycopy(Object src,
    int srcPos,
    Object dest,
    int destPos,
    int length)

    Copies an array from the specified source array, beginning at the specified position, to the specified position of the destination array. A subsequence of array components are copied from the source array referenced by src to the destination array referenced by dest. The number of components copied is equal to the length argument. The components at positions srcPos through srcPos+length-1 in the source array are copied into positions destPos through destPos+length-1, respectively, of the destination array.

    If the src and dest arguments refer to the same array object, then the copying is performed as if the components at positions srcPos through srcPos+length-1 were first copied to a temporary array with length components and then the contents of the temporary array were copied into positions destPos through destPos+length-1 of the destination array.

    If dest is null, then a NullPointerException is thrown.

    If src is null, then a NullPointerException is thrown and the destination array is not modified.

    Otherwise, if any of the following is true, an ArrayStoreException is thrown and the destination is not modified:

    The src argument refers to an object that is not an array.
    The dest argument refers to an object that is not an array.
    The src argument and dest argument refer to arrays whose component types are different primitive types.
    The src argument refers to an array with a primitive component type and the dest argument refers to an array with a reference component type.
    The src argument refers to an array with a reference component type and the dest argument refers to an array with a primitive component type.

    Otherwise, if any of the following is true, an IndexOutOfBoundsException is thrown and the destination is not modified:

    The srcPos argument is negative.
    The destPos argument is negative.
    The length argument is negative.
    srcPos+length is greater than src.length, the length of the source array.
    destPos+length is greater than dest.length, the length of the destination array.

    Otherwise, if any actual component of the source array from position srcPos through srcPos+length-1 cannot be converted to the component type of the destination array by assignment conversion, an ArrayStoreException is thrown. In this case, let k be the smallest nonnegative integer less than length such that src[srcPos+k] cannot be converted to the component type of the destination array; when the exception is thrown, source array components from positions srcPos through srcPos+k-1 will already have been copied to destination array positions destPos through destPos+k-1 and no other positions of the destination array will have been modified. (Because of the restrictions already itemized, this paragraph effectively applies only to the situation where both arrays have component types that are reference types.)

    Parameters:
    src - the source array.
    srcPos - starting position in the source array.
    dest - the destination array.
    destPos - starting position in the destination data.
    length - the number of array elements to be copied.
    Throws:
    IndexOutOfBoundsException - if copying would cause access of data outside array bounds.
    ArrayStoreException - if an element in the src array could not be stored into the dest array because of a type mismatch.
    NullPointerException - if either src or dest is null.
    + + +
    + + + + +
    +
    +
    +
    + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    背景

    kmp 算法很多时候很多blog是没有写清楚正确性的证明

    +

    论文地址在这里:

    +

    + + +
    + + + + +
    +
    +
    +
    + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    shadow

    这个词是描述同一个类里面不同作用范围内的同名变量,越里面的变量越覆盖越外面的同名变量

    +

    shadow 会和几个词相关:

    +
      +
    • scope
    • +
    • simple name
    • +
    +

    原则: 在使用simple name的情况下 , scope 里面的会shadow scope外面的

    +
    +

    If the class declares a field with a certain name, then the declaration of that field is said to hide any and all accessible declarations of fields with the same name in superclasses, and superinterfaces of the class.

    +
    +
    +

    15.11. Field Access Expressions

    +
    +
    +

    6.2 Names and Identifiers
    A name is used to refer to an entity declared in a program.
    There are two forms of names: simple names and qualified names.
    A simple name is a single identifier.
    A qualified name consists of a name, a “.” token, and an identifier

    +
    +
    +

    The scope of a declaration is the region of the program within which the entity
    declared by the declaration can be referred to using a simple name, provided it is
    not shadowed (§6.4.1)

    +
    +
    +

    6.4.1 Shadowing
    Some declarations may be shadowed in part of their scope by another declaration of
    the same name, in which case a simple name cannot be used to refer to the declared
    entity.

    +
    + +

    hidden

    hidden描述的父类和子类同名变量的可读性

    + + +
    + + + + +
    +
    +
    +
    + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    背景

    在自学编译原理,所以了解了一下后端的内容

    +

    llvm ir

    llvm ir 会生成这样中间格式的内容,这样就可以交给后端处理了,如果只想自己写前端词法和解析树内容,后端的代码生成和优化都不处理的话,可以生成llvm ir,然后交给llvm ir 处理

    +
    1
    2
    3
    4
    5
    define i32 @sum(i32 %a, i32 %b) {
    entry:
    %result = add i32 %a, %b
    ret i32 %result
    }
    + +

    这段代码定义了一个名为sum的函数,它接受两个i32类型的参数%a和%b,并返回它们的和。下面是对代码的逐行解释:

    +

    define i32 @sum(i32 %a, i32 %b):这是函数的定义。
    define关键字用于定义函数,i32表示返回类型为32位整数,@sum是函数名,(i32 %a, i32 %b)表示函数接受两个32位整数类型的参数%a%b

    +

    entry::这是函数的入口标签。在这个简单的例子中,我们只有一个基本块。

    +

    %result = add i32 %a, %b:这一行使用add指令将参数%a%b相加,并将结果存储在%result变量中。add指令是LLVM IR中的算术指令之一。

    +

    ret i32 %result:这一行使用ret指令将%result的值作为函数的返回值。
    请注意,LLVM IR是一种低级中间表示,它不同于高级语言(如C++或Python)。它具有一种类似汇编语言的结构,但具有更高级别的抽象。LLVM IR具有丰富的指令集和类型系统,可以表示各种编程语言的代码。

    +

    编译

    要将LLVM IR转换为汇编代码,您可以使用LLVM工具链中的llc命令。llc是LLVM的静态编译器,它将LLVM IR转换为机器特定的汇编代码。

    +

    以下是将LLVM IR生成汇编代码的基本步骤:

    +

    编写LLVM IR文件:创建一个文本文件,使用LLVM IR语言编写您的程序代码。将其保存为.ll文件,例如example.ll。

    +

    使用llc命令生成汇编代码:打开终端或命令提示符,并导航到您的LLVM安装目录中的bin文件夹。然后运行以下命令:

    +
    1
    2
    llc -O3 example.ll -o example.s

    + +

    这将使用优化级别3(-O3)example.ll文件转换为汇编代码,并将结果保存为example.s文件。

    +

    图片

    + + +
    + + + + +
    +
    +
    +
    + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    背景

    了解分词过程

    +

    概述

    lucene的查询过程:

    +
    +

    (String query , String field ) -> Query

    +
    +

    整个过程是将字符串"how old" 切割成一个个Term Query

    +

    最后会构造成一棵语法树:

    +
    1
    should:[how,old]
    + +

    图片

    +

    背景

    lucene 的分词是一个基本的话题,主要是利用:incrementToken 这个抽象方法以及继承AttributeSource 这个类

    +
    1
    2
    3
    public abstract class TokenStream extends AttributeSource implements Closeable {
    public abstract boolean incrementToken() throws IOException;
    }
    + + +

    lucene boolean clause

    相关阅读

    +

    lucene 的bolean 子句有四种:

    +
      +
    • MUST
    • +
    • FILTER
    • +
    • SHOULD
    • +
    • MUST_NOT
      子句
    • +
    +

    堆栈

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    <init>:202, TermQuery (org.apache.lucene.search)
    newTermQuery:640, QueryBuilder (org.apache.lucene.util)
    add:408, QueryBuilder (org.apache.lucene.util)
    analyzeMultiBoolean:427, QueryBuilder (org.apache.lucene.util)
    createFieldQuery:364, QueryBuilder (org.apache.lucene.util)
    createFieldQuery:257, QueryBuilder (org.apache.lucene.util)
    newFieldQuery:468, QueryParserBase (org.apache.lucene.queryparser.classic)
    getFieldQuery:457, QueryParserBase (org.apache.lucene.queryparser.classic)
    MultiTerm:680, QueryParser (org.apache.lucene.queryparser.classic)
    Query:233, QueryParser (org.apache.lucene.queryparser.classic)
    TopLevelQuery:223, QueryParser (org.apache.lucene.queryparser.classic)
    parse:136, QueryParserBase (org.apache.lucene.queryparser.classic)
    testParse:20, ParseTest (com.dinosaur.lucene.demo)

    + + +

    排序算分

    BlockMaxMaxscoreScorermatches会将所有的分词算出来,然后计算分数总和

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    score:250, BM25Similarity$BM25Scorer (org.apache.lucene.search.similarities)
    score:60, LeafSimScorer (org.apache.lucene.search)
    score:75, TermScorer (org.apache.lucene.search)
    matches:240, BlockMaxMaxscoreScorer$2 (org.apache.lucene.search)
    doNext:85, TwoPhaseIterator$TwoPhaseIteratorAsDocIdSetIterator (org.apache.lucene.search)
    advance:78, TwoPhaseIterator$TwoPhaseIteratorAsDocIdSetIterator (org.apache.lucene.search)
    score:232, BooleanWeight$2 (org.apache.lucene.search)
    score:38, BulkScorer (org.apache.lucene.search)
    search:776, IndexSearcher (org.apache.lucene.search)
    search:694, IndexSearcher (org.apache.lucene.search)
    search:688, IndexSearcher (org.apache.lucene.search)
    searchAfter:523, IndexSearcher (org.apache.lucene.search)
    search:538, IndexSearcher (org.apache.lucene.search)
    doPagingSearch:161, SearchFiles (com.dinosaur.lucene.skiptest)
    testSearch:131, SearchFiles (com.dinosaur.lucene.skiptest)

    + + +

    相关阅读

    + + +
    + + + + +
    +
    +
    +
    + + + + + + + + + + +
    + + + + +
    + + + + + + + + +
    +
    + +
    + +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/page/4/index.html b/page/4/index.html new file mode 100644 index 0000000000..447f32a277 --- /dev/null +++ b/page/4/index.html @@ -0,0 +1,1112 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + dinosaur + + + + + + + + + + + + +
    +
    + +
    +
    + + + + + +
    + + + + + + + + +
    + +
    + +
    +
    + + +
    + + 0% +
    + + +
    +
    +
    + + +
    + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    背景

    在使用java的idea的时候,有如下代码

    +
    1
    2
    Date date = new Date();
    date.getTime() + 30 * 24 * 60 * 60 * 1000;
    +

    其中30 * 24 * 60 * 60 * 1000 会变成负数,而且idea会提示:

    +
    1
    Numeric overflow in expression
    +

    改成下面的样子就可以去掉警告:

    +
    1
    date.getTime() + 30L * 24 * 60 * 60 * 1000
    + + +

    原理

    原理就是变量提升的步骤和溢出的逻辑交叉在一起

    + + +
    + + + + +
    +
    +
    +
    + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    背景

    项目是springboot 框架
    发现下面错误: found duplicate key xxx

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    Caused by: while constructing a mapping
    in 'reader', line 1, column 1:
    management.server.port: 59326
    ^
    found duplicate key cms
    in 'reader', line 286, column 1:
    cms:
    ^

    at org.yaml.snakeyaml.constructor.SafeConstructor.processDuplicateKeys(SafeConstructor.java:106)
    at org.yaml.snakeyaml.constructor.SafeConstructor.flattenMapping(SafeConstructor.java:76)
    at org.yaml.snakeyaml.constructor.SafeConstructor.constructMapping2ndStep(SafeConstructor.java:189)
    at org.yaml.snakeyaml.constructor.BaseConstructor.constructMapping(BaseConstructor.java:460)
    at org.yaml.snakeyaml.constructor.SafeConstructor$ConstructYamlMap.construct(SafeConstructor.java:556)
    at org.yaml.snakeyaml.constructor.BaseConstructor.constructObjectNoCheck(BaseConstructor.java:230)
    at org.yaml.snakeyaml.constructor.BaseConstructor.constructObject(BaseConstructor.java:219)
    at org.springframework.boot.env.OriginTrackedYamlLoader$OriginTrackingConstructor.constructObject(OriginTrackedYamlLoader.java:105)
    at org.yaml.snakeyaml.constructor.BaseConstructor.constructDocument(BaseConstructor.java:173)
    at org.yaml.snakeyaml.constructor.BaseConstructor.getData(BaseConstructor.java:138)
    at org.yaml.snakeyaml.Yaml$1.next(Yaml.java:494)
    at org.springframework.beans.factory.config.YamlProcessor.process(YamlProcessor.java:200)
    at org.springframework.beans.factory.config.YamlProcessor.process(YamlProcessor.java:164)
    at org.springframework.boot.env.OriginTrackedYamlLoader.load(OriginTrackedYamlLoader.java:82)
    at org.springframework.boot.env.YamlPropertySourceLoader.load(YamlPropertySourceLoader.java:50)
    at org.springframework.boot.context.config.ConfigFileApplicationListener$Loader.loadDocuments(ConfigFileApplicationListener.java:632)
    at org.springframework.boot.context.config.ConfigFileApplicationListener$Loader.load(ConfigFileApplicationListener.java:534)
    ... 64 more


    + +

    解决方案

    yml文件上有多个相同的都叫cms的key,所以冲突了,去掉一个即可解决

    + + +
    + + + + +
    +
    +
    +
    + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    背景

    本地编译报错

    +
    1
    Caused by: java.lang.reflect.InaccessibleObjectException: Unable to make protected final java.lang.Class java.lang.ClassLoader.defineClass(java.lang.String,byte[],int,int,java.security.ProtectionDomain) throws java.lang.ClassFormatError accessible: module java.base does not "opens java.lang" to unnamed module @49dc7102
    + +

    排查

    本地编译的时候,发现报这个错,而同事不会有这个错误.
    类加载器相关,第一个怀疑的是jdk,于是最后找到原因是用的jdk版本不对,同事用的是jdk11
    我刚刚升级的idea不久,idea使用的是默认的jdk17

    +

    原因

    项目使用的是jdk11 ,而我idea使用的是jdk17

    +

    修改方式

    进入:

    +
    1
    File --> Project Structure
    +

    选择jdk11,然后问题解决
    图片

    + + +
    + + + + +
    +
    +
    +
    + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    背景

    1
    2
    3
    4
    5
    6
    7
    8
    9
    # Clone github repository.
    $ git clone https://github.com/milvus-io/milvus.git

    # Install third-party dependencies.
    $ cd milvus/
    $ ./scripts/install_deps.sh

    # Compile Milvus.
    $ make
    + + +

    相关错误

      +
    • Could NOT find BLAS (missing: BLAS_LIBRARIES)
      解决方案
    • +
    +
    1
    2
    sudo apt-get update
    sudo apt-get install -y libopenblas-dev
    + +
      +
    • ./milvus: error while loading shared libraries: libtbbmalloc.so.2: cannot open shared object file: No such file or directory
      解决方案:
      1
      sudo apt-get install libtbb2
    • +
    + + +
    + + + + +
    +
    +
    +
    + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    背景

    java 的Array.sort()或者.stream.sorted() 都会使用Comparable<T> 作为参数
    目前需要了解这些排序函数究竟是升序还是降序的

    +

    一句话答案

    所有的排序都是升序的ascending

    +

    原因

    Array.sort() , .stream.sorted() 都使用Comparable<T>这个类,都需要实现接口int compare(T o1, T o2);

    +

    我们看看接口compare的注释:

    +
    1
    2
    3
    4
    5
    Params:
    o1 – the first object to be compared. o2 – the second object to be compared.
    Returns:
    a negative integer, zero, or a positive integer as the first argument is less than, equal to, or greater than the second.
    int compare(T o1, T o2);
    + +

    入参有两个:第一个参数o1,第二个是o2
    返回值:

    +
      +
    • 如果o1 < o2 返回 负数
    • +
    • 如果o1 > o2 返回正数
    • +
    • 如果o1 = o2,返回0
    • +
    +

    再看看注释java.util.Comparator<T> , 默认都是natural ordering

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    /**
    * Returns a comparator that imposes the reverse of the <em>natural
    * ordering</em>.
    *
    * <p>The returned comparator is serializable and throws {@link
    * NullPointerException} when comparing {@code null}.
    *
    * @param <T> the {@link Comparable} type of element to be compared
    * @return a comparator that imposes the reverse of the <i>natural
    * ordering</i> on {@code Comparable} objects.
    * @see Comparable
    * @since 1.8
    */
    public static <T extends Comparable<? super T>> Comparator<T> reverseOrder() {
    return Collections.reverseOrder();
    }


    +

    相关阅读

    自然排序说明
    stackoverflow

    + + +
    + + + + +
    +
    +
    +
    + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    背景

    了解lucene的搜索过程:

    +
      +
    • 分词
    • +
    • 算每个分词的权重,排序取topk
    • +
    +

    代码堆栈

      +
    • 写入过程:

      +
      1
      2
      3
      4
      5
      6
      7
      8
      9
      10
      11
      12
      13
      14
      15
      16
      add:473, FSTCompiler (org.apache.lucene.util.fst)
      compileIndex:504, Lucene90BlockTreeTermsWriter$PendingBlock (org.apache.lucene.codecs.lucene90.blocktree)
      writeBlocks:725, Lucene90BlockTreeTermsWriter$TermsWriter (org.apache.lucene.codecs.lucene90.blocktree)
      finish:1105, Lucene90BlockTreeTermsWriter$TermsWriter (org.apache.lucene.codecs.lucene90.blocktree)
      write:370, Lucene90BlockTreeTermsWriter (org.apache.lucene.codecs.lucene90.blocktree)
      write:172, PerFieldPostingsFormat$FieldsWriter (org.apache.lucene.codecs.perfield)
      flush:135, FreqProxTermsWriter (org.apache.lucene.index)
      flush:310, IndexingChain (org.apache.lucene.index)
      flush:392, DocumentsWriterPerThread (org.apache.lucene.index)
      doFlush:492, DocumentsWriter (org.apache.lucene.index)
      flushAllThreads:671, DocumentsWriter (org.apache.lucene.index)
      doFlush:4194, IndexWriter (org.apache.lucene.index)
      flush:4168, IndexWriter (org.apache.lucene.index)
      shutdown:1322, IndexWriter (org.apache.lucene.index)
      close:1362, IndexWriter (org.apache.lucene.index)
      doTestSearch:133, FstTest (com.dinosaur.lucene.demo)
      +
    • +
    • 读的过程

      +
      1
      2
      3
      4
      5
      6
      7
      8
      9
      10
      11
      12
      findTargetArc:1418, FST (org.apache.lucene.util.fst)
      seekExact:511, SegmentTermsEnum (org.apache.lucene.codecs.lucene90.blocktree)
      loadTermsEnum:111, TermStates (org.apache.lucene.index)
      build:96, TermStates (org.apache.lucene.index)
      createWeight:227, TermQuery (org.apache.lucene.search)
      createWeight:904, IndexSearcher (org.apache.lucene.search)
      search:687, IndexSearcher (org.apache.lucene.search)
      searchAfter:523, IndexSearcher (org.apache.lucene.search)
      search:538, IndexSearcher (org.apache.lucene.search)
      doPagingSearch:158, SearchFiles (com.dinosaur.lucene.demo)
      testSearch:128, SearchFiles (com.dinosaur.lucene.demo)

    • +
    +

    例子

    cfe 文件

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    38
    39
    40
    41
    42
    43
    44
    45
    46
    47
    48
    49
    50
    51
    52
    53
    54
    55
    56
    57
    58
    59
    60
    61
    62
    63
    64
    65
    66
    67
    68
    69
    70
    71
    72
    73
    74
    75
    76
    77
    78
    79
    80
    81
    82
    83
    84
    85
    86
    87
    88
    89
    90
    91
    92
    93
    94
    95
    96
    97
    98
    99
    100
    101
    102
    103
    104
    105
    106
    107
    108
    109
    110
    111
    112
    113
    114
    115
    116
    117
    $ hexdump  app/index/_3.cfs
    000000 3f d7 6c 17 14 4c 75 63 65 6e 65 39 30 43 6f 6d
    000010 70 6f 75 6e 64 44 61 74 61 00 00 00 00 7a fc 30
    000020 52 e0 51 d2 54 be 49 7f 21 78 69 fe c4 00 00 00
    000030 3f d7 6c 17 11 4c 75 63 65 6e 65 39 30 4e 6f 72
    000040 6d 73 44 61 74 61 00 00 00 00 7a fc 30 52 e0 51
    000050 d2 54 be 49 7f 21 78 69 fe c4 00 04 03 c0 28 93
    000060 e8 00 00 00 00 00 00 00 00 f0 6a f4 62 00 00 00
    000070 3f d7 6c 17 16 4c 75 63 65 6e 65 39 30 46 69 65
    000080 6c 64 73 49 6e 64 65 78 49 64 78 00 00 00 00 7a
    000090 fc 30 52 e0 51 d2 54 be 49 7f 21 78 69 fe c4 00
    0000a0 c0 28 93 e8 00 00 00 00 00 00 00 00 92 7f 21 bb
    0000b0 3f d7 6c 17 19 4c 75 63 65 6e 65 39 30 50 6f 69
    0000c0 6e 74 73 46 6f 72 6d 61 74 49 6e 64 65 78 00 00
    0000d0 00 00 7a fc 30 52 e0 51 d2 54 be 49 7f 21 78 69
    0000e0 fe c4 00 32 c0 28 93 e8 00 00 00 00 00 00 00 00
    0000f0 f7 61 6e 2f 00 00 00 00 3f d7 6c 17 13 42 6c 6f
    000100 63 6b 54 72 65 65 54 65 72 6d 73 49 6e 64 65 78
    000110 00 00 00 00 7a fc 30 52 e0 51 d2 54 be 49 7f 21
    000120 78 69 fe c4 0a 4c 75 63 65 6e 65 39 30 5f 30 00
    000130 00 c0 28 93 e8 00 00 00 00 00 00 00 00 07 1a 7b
    000140 47 00 00 00 00 00 00 00 3f d7 6c 17 18 4c 75 63
    000150 65 6e 65 39 30 50 6f 69 6e 74 73 46 6f 72 6d 61
    000160 74 44 61 74 61 00 00 00 00 7a fc 30 52 e0 51 d2
    000170 54 be 49 7f 21 78 69 fe c4 00 02 fe 00 08 80 00
    000180 01 88 d2 0f 28 0d ff c0 28 93 e8 00 00 00 00 00
    000190 00 00 00 6d 43 fa 6e 00 3f d7 6c 17 19 4c 75 63
    0001a0 65 6e 65 39 30 50 6f 73 74 69 6e 67 73 57 72 69
    0001b0 74 65 72 44 6f 63 00 00 00 00 7a fc 30 52 e0 51
    0001c0 d2 54 be 49 7f 21 78 69 fe c4 0a 4c 75 63 65 6e
    0001d0 65 39 30 5f 30 01 03 01 03 c0 28 93 e8 00 00 00 <--- 右边的01 03 是you的两个docid
    0001e0 00 00 00 00 00 26 f5 75 88 00 00 00 00 00 00 00
    0001f0 3f d7 6c 17 19 4c 75 63 65 6e 65 39 30 50 6f 73
    000200 74 69 6e 67 73 57 72 69 74 65 72 50 6f 73 00 00
    000210 00 00 7a fc 30 52 e0 51 d2 54 be 49 7f 21 78 69
    000220 fe c4 0a 4c 75 63 65 6e 65 39 30 5f 30 02 00 00
    000230 01 02 03 01 c0 28 93 e8 00 00 00 00 00 00 00 00
    000240 c5 ac 32 b6 00 00 00 00 3f d7 6c 17 15 4c 75 63
    000250 65 6e 65 39 30 4e 6f 72 6d 73 4d 65 74 61 64 61
    000260 74 61 00 00 00 00 7a fc 30 52 e0 51 d2 54 be 49
    000270 7f 21 78 69 fe c4 00 02 00 00 00 ff ff ff ff ff
    000280 ff ff ff 00 00 00 00 00 00 00 00 ff ff ff 02 00
    000290 00 00 01 2b 00 00 00 00 00 00 00 ff ff ff ff c0
    0002a0 28 93 e8 00 00 00 00 00 00 00 00 1c 85 f4 99 00
    0002b0 3f d7 6c 17 1c 4c 75 63 65 6e 65 39 30 53 74 6f
    0002c0 72 65 64 46 69 65 6c 64 73 46 61 73 74 44 61 74
    0002d0 61 00 00 00 01 7a fc 30 52 e0 51 d2 54 be 49 7f
    0002e0 21 78 69 fe c4 00 00 0a 00 01 08 12 13 01 04 02
    0002f0 05 05 05 05 05 05 05 05 05 10 00 40 10 2e 2e 5c
    000300 40 64 6f 63 73 40 5c 64 65 6d 40 6f 2e 74 78 40
    000310 74 00 11 2e 40 2e 5c 64 6f 40 63 73 5c 64 40 65
    000320 6d 6f 32 40 2e 74 78 74 c0 28 93 e8 00 00 00 00
    000330 00 00 00 00 81 b0 7e 09 3f d7 6c 17 18 4c 75 63
    000340 65 6e 65 39 30 50 6f 69 6e 74 73 46 6f 72 6d 61
    000350 74 4d 65 74 61 00 00 00 00 7a fc 30 52 e0 51 d2
    000360 54 be 49 7f 21 78 69 fe c4 00 01 00 00 00 3f d7
    000370 6c 17 03 42 4b 44 00 00 00 09 01 01 80 04 08 01
    000380 80 00 01 88 d2 0f 28 0d 80 00 01 88 d2 0f 28 0d
    000390 02 02 01 32 00 00 00 00 00 00 00 33 00 00 00 00
    0003a0 00 00 00 ff ff ff ff 44 00 00 00 00 00 00 00 4f
    0003b0 00 00 00 00 00 00 00 c0 28 93 e8 00 00 00 00 00
    0003c0 00 00 00 02 3e 97 d6 00 3f d7 6c 17 17 4c 75 63
    0003d0 65 6e 65 39 30 46 69 65 6c 64 73 49 6e 64 65 78
    0003e0 4d 65 74 61 00 00 00 01 7a fc 30 52 e0 51 d2 54
    0003f0 be 49 7f 21 78 69 fe c4 00 80 80 05 02 00 00 00
    000400 0a 00 00 00 02 00 00 00 30 00 00 00 00 00 00 00
    000410 00 00 00 00 00 00 00 00 00 00 00 40 00 00 00 00
    000420 00 00 00 00 00 30 00 00 00 00 00 00 00 36 00 00
    000430 00 00 00 00 00 00 00 84 42 00 00 00 00 00 00 00
    000440 00 00 30 00 00 00 00 00 00 00 78 00 00 00 00 00
    000450 00 00 01 01 02 c0 28 93 e8 00 00 00 00 00 00 00
    000460 00 c3 23 d0 d6 00 00 00 3f d7 6c 17 12 42 6c 6f <------- 3f
    000470 63 6b 54 72 65 65 54 65 72 6d 73 44 69 63 74 00
    000480 00 00 00 7a fc 30 52 e0 51 d2 54 be 49 7f 21 78
    000490 69 fe c4 0a 4c 75 63 65 6e 65 39 30 5f 30 0b 9c <--------
    0004a0 01 61 72 65 68 6f 77 6f 6c 64 73 74 75 64 65 6e
    0004b0 74 79 6f 75 0a 03 03 03 07 03 05 04 00 05 04 00 <------ 05 04 00 05 04 是position
    0004c0 0b 7a 3d 04 00 02 01 01 05 01 00 01 05 8c 02 2e <------- 7a 3d 04 是很多位置信息
    0004d0 2e 5c 64 6f 63 73 5c 64 65 6d 6f 2e 74 78 74 2e
    0004e0 2e 5c 64 6f 63 73 5c 64 65 6d 6f 32 2e 74 78 74
    0004f0 04 10 11 01 03 04 82 01 00 05 c0 28 93 e8 00 00
    000500 00 00 00 00 00 00 1a 7f dc 45 00 00 00 00 00 00
    000510 3f d7 6c 17 12 42 6c 6f 63 6b 54 72 65 65 54 65
    000520 72 6d 73 4d 65 74 61 00 00 00 00 7a fc 30 52 e0
    000530 51 d2 54 be 49 7f 21 78 69 fe c4 0a 4c 75 63 65
    000540 6e 65 39 30 5f 30 3f d7 6c 17 1b 4c 75 63 65 6e
    000550 65 39 30 50 6f 73 74 69 6e 67 73 57 72 69 74 65
    000560 72 54 65 72 6d 73 00 00 00 00 7a fc 30 52 e0 51
    000570 d2 54 be 49 7f 21 78 69 fe c4 0a 4c 75 63 65 6e
    000580 65 39 30 5f 30 80 01 02 02 05 02 da 01 07 07 02
    000590 03 61 72 65 03 79 6f 75 37 3f d7 6c 17 03 46 53
    0005a0 54 00 00 00 08 01 03 01 da 02 00 00 01 00 02 02
    0005b0 92 03 02 02 10 2e 2e 5c 64 6f 63 73 5c 64 65 6d
    0005c0 6f 2e 74 78 74 11 2e 2e 5c 64 6f 63 73 5c 64 65
    0005d0 6d 6f 32 2e 74 78 74 38 3f d7 6c 17 03 46 53 54
    0005e0 00 00 00 08 01 03 03 92 02 00 00 01 49 00 00 00
    0005f0 00 00 00 00 a2 00 00 00 00 00 00 00 c0 28 93 e8
    000600 00 00 00 00 00 00 00 00 c9 44 df a8 00 00 00 00
    000610 3f d7 6c 17 12 4c 75 63 65 6e 65 39 34 46 69 65
    000620 6c 64 49 6e 66 6f 73 00 00 00 00 7a fc 30 52 e0
    000630 51 d2 54 be 49 7f 21 78 69 fe c4 00 03 04 70 61
    000640 74 68 00 02 01 00 ff ff ff ff ff ff ff ff 02 1d
    000650 50 65 72 46 69 65 6c 64 50 6f 73 74 69 6e 67 73
    000660 46 6f 72 6d 61 74 2e 66 6f 72 6d 61 74 08 4c 75
    000670 63 65 6e 65 39 30 1d 50 65 72 46 69 65 6c 64 50
    000680 6f 73 74 69 6e 67 73 46 6f 72 6d 61 74 2e 73 75
    000690 66 66 69 78 01 30 00 00 01 00 08 6d 6f 64 69 66
    0006a0 69 65 64 01 00 00 00 ff ff ff ff ff ff ff ff 00
    0006b0 01 01 08 00 01 00 08 63 6f 6e 74 65 6e 74 73 02
    0006c0 00 03 00 ff ff ff ff ff ff ff ff 02 1d 50 65 72
    0006d0 46 69 65 6c 64 50 6f 73 74 69 6e 67 73 46 6f 72
    0006e0 6d 61 74 2e 66 6f 72 6d 61 74 08 4c 75 63 65 6e
    0006f0 65 39 30 1d 50 65 72 46 69 65 6c 64 50 6f 73 74
    000700 69 6e 67 73 46 6f 72 6d 61 74 2e 73 75 66 66 69
    000710 78 01 30 00 00 01 00 c0 28 93 e8 00 00 00 00 00
    000720 00 00 00 36 55 24 d2 c0 28 93 e8 00 00 00 00 00
    000730 00 00 00 41 6a 49 d4
    + +

    tim文件的偏移是offset=1128
    tim 文件

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    score:250, BM25Similarity$BM25Scorer (org.apache.lucene.search.similarities)
    score:60, LeafSimScorer (org.apache.lucene.search)
    score:75, TermScorer (org.apache.lucene.search)
    collect:73, TopScoreDocCollector$SimpleTopScoreDocCollector$1 (org.apache.lucene.search)
    scoreAll:305, Weight$DefaultBulkScorer (org.apache.lucene.search)
    score:247, Weight$DefaultBulkScorer (org.apache.lucene.search)
    score:38, BulkScorer (org.apache.lucene.search)
    search:776, IndexSearcher (org.apache.lucene.search)
    search:694, IndexSearcher (org.apache.lucene.search)
    search:688, IndexSearcher (org.apache.lucene.search)
    searchAfter:523, IndexSearcher (org.apache.lucene.search)
    search:538, IndexSearcher (org.apache.lucene.search)
    doPagingSearch:161, SearchFiles (com.dinosaur.lucene.skiptest)

    + + +
    1
    2
    3
    4
    5
    6
    7
    8
    readField:248, Lucene90CompressingStoredFieldsReader (org.apache.lucene.codecs.lucene90.compressing)
    document:642, Lucene90CompressingStoredFieldsReader (org.apache.lucene.codecs.lucene90.compressing)
    document:253, SegmentReader (org.apache.lucene.index)
    document:171, BaseCompositeReader (org.apache.lucene.index)
    document:411, IndexReader (org.apache.lucene.index)
    doc:390, IndexSearcher (org.apache.lucene.search)
    doPagingSearch:195, SearchFiles (com.dinosaur.lucene.skiptest)

    +

    tim/tip/doc 关系

    tip 是描述一个term的指针
    tim 包含term的统计信息
    doc 描述的是term对应的docId

    +

    也就是说
    tip -> tim -> doc

    +
      +
    • 通过tip判断term是否存在
    • +
    • 然后通过tip找到tim获取统计信息
    • +
    • 然后通过doc 获取包含该term的docId的数组
    • +
    +

    doc file

      +
    • doc file open:
      1
      2
      3
      4
      5
      6
      7
      8
      9
      10
      11
      12
      13
      <init>:74, Lucene90PostingsReader (org.apache.lucene.codecs.lucene90)
      fieldsProducer:424, Lucene90PostingsFormat (org.apache.lucene.codecs.lucene90)
      <init>:330, PerFieldPostingsFormat$FieldsReader (org.apache.lucene.codecs.perfield)
      fieldsProducer:392, PerFieldPostingsFormat (org.apache.lucene.codecs.perfield)
      <init>:118, SegmentCoreReaders (org.apache.lucene.index)
      <init>:92, SegmentReader (org.apache.lucene.index)
      doBody:94, StandardDirectoryReader$1 (org.apache.lucene.index)
      doBody:77, StandardDirectoryReader$1 (org.apache.lucene.index)
      run:816, SegmentInfos$FindSegmentsFile (org.apache.lucene.index)
      open:109, StandardDirectoryReader (org.apache.lucene.index)
      open:67, StandardDirectoryReader (org.apache.lucene.index)
      open:60, DirectoryReader (org.apache.lucene.index)
      doSearchDemo:25, SimpleSearchTest (com.dinosaur.lucene.demo)
    • +
    +

    how to find the docId list

    +

    org/apache/lucene/codecs/lucene90/Lucene90PostingsReader.java

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
      final class BlockDocsEnum extends PostingsEnum {

    ...

    public PostingsEnum reset(IntBlockTermState termState, int flags) throws IOException {
    docFreq = termState.docFreq;
    totalTermFreq = indexHasFreq ? termState.totalTermFreq : docFreq;
    docTermStartFP = termState.docStartFP;
    skipOffset = termState.skipOffset;
    singletonDocID = termState.singletonDocID;
    if (docFreq > 1) {
    if (docIn == null) {
    // lazy init
    docIn = startDocIn.clone();
    }
    docIn.seek(docTermStartFP);
    }

    doc = -1;
    this.needsFreq = PostingsEnum.featureRequested(flags, PostingsEnum.FREQS);
    this.isFreqsRead = true;
    if (indexHasFreq == false || needsFreq == false) {
    for (int i = 0; i < ForUtil.BLOCK_SIZE; ++i) {
    freqBuffer[i] = 1;
    }
    }
    accum = 0;
    blockUpto = 0;
    nextSkipDoc = BLOCK_SIZE - 1; // we won't skip if target is found in first block
    docBufferUpto = BLOCK_SIZE;
    skipped = false;
    return this;
    }
    }
    + + + +

    相关阅读

    + + +
    + + + + +
    +
    +
    +
    + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    背景

    了解nacos 注册/发现/协议

    +

    java-nacos-client

    整个堆栈大概是到这里

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    38
    39
    40
    41
    42
    43
    44
    45
    46
    47
    48
    49
    50
    51
    52
    53
    54
    55
    56
    57
    58
    59
    60
    61
    62
    63
    64
    65
    66
    67
    68
    69
    70
    71
    72
    73
    74
    75
    76
    77
    78
    79
    80
    81
    82
    83
    84
    85
    write:35, NettyWritableBuffer (com.alibaba.nacos.shaded.io.grpc.netty.shaded.io.grpc.netty)
    writeRaw:290, MessageFramer (com.alibaba.nacos.shaded.io.grpc.internal)
    writeKnownLengthUncompressed:229, MessageFramer (com.alibaba.nacos.shaded.io.grpc.internal)
    writeUncompressed:168, MessageFramer (com.alibaba.nacos.shaded.io.grpc.internal)
    writePayload:141, MessageFramer (com.alibaba.nacos.shaded.io.grpc.internal)
    writeMessage:53, AbstractStream (com.alibaba.nacos.shaded.io.grpc.internal)
    writeMessage:37, ForwardingClientStream (com.alibaba.nacos.shaded.io.grpc.internal)
    sendMessageInternal:473, ClientCallImpl (com.alibaba.nacos.shaded.io.grpc.internal)
    sendMessage:457, ClientCallImpl (com.alibaba.nacos.shaded.io.grpc.internal)
    sendMessage:37, ForwardingClientCall (com.alibaba.nacos.shaded.io.grpc)
    sendMessage:37, ForwardingClientCall (com.alibaba.nacos.shaded.io.grpc)
    asyncUnaryRequestCall:284, ClientCalls (com.alibaba.nacos.shaded.io.grpc.stub)
    futureUnaryCall:191, ClientCalls (com.alibaba.nacos.shaded.io.grpc.stub)
    request:212, RequestGrpc$RequestFutureStub (com.alibaba.nacos.api.grpc.auto)
    request:73, GrpcConnection (com.alibaba.nacos.common.remote.client.grpc)
    request:657, RpcClient (com.alibaba.nacos.common.remote.client)
    requestToServer:269, NamingGrpcClientProxy (com.alibaba.nacos.client.naming.remote.gprc)
    queryInstancesOfService:169, NamingGrpcClientProxy (com.alibaba.nacos.client.naming.remote.gprc)
    queryInstancesOfService:111, NamingClientProxyDelegate (com.alibaba.nacos.client.naming.remote)
    run:182, ServiceInfoUpdateService$UpdateTask (com.alibaba.nacos.client.naming.core)
    call:515, Executors$RunnableAdapter (java.util.concurrent)
    run$$$capture:264, FutureTask (java.util.concurrent)
    run:-1, FutureTask (java.util.concurrent)
    - Async stack trace
    <init>:151, FutureTask (java.util.concurrent)
    <init>:215, ScheduledThreadPoolExecutor$ScheduledFutureTask (java.util.concurrent)
    schedule:561, ScheduledThreadPoolExecutor (java.util.concurrent)
    run:197, ServiceInfoUpdateService$UpdateTask (com.alibaba.nacos.client.naming.core)
    call:515, Executors$RunnableAdapter (java.util.concurrent)
    run$$$capture:264, FutureTask (java.util.concurrent)
    run:-1, FutureTask (java.util.concurrent)
    - Async stack trace
    <init>:151, FutureTask (java.util.concurrent)
    <init>:215, ScheduledThreadPoolExecutor$ScheduledFutureTask (java.util.concurrent)
    schedule:561, ScheduledThreadPoolExecutor (java.util.concurrent)
    run:197, ServiceInfoUpdateService$UpdateTask (com.alibaba.nacos.client.naming.core)
    call:515, Executors$RunnableAdapter (java.util.concurrent)
    run$$$capture:264, FutureTask (java.util.concurrent)
    run:-1, FutureTask (java.util.concurrent)
    - Async stack trace
    <init>:151, FutureTask (java.util.concurrent)
    <init>:215, ScheduledThreadPoolExecutor$ScheduledFutureTask (java.util.concurrent)
    schedule:561, ScheduledThreadPoolExecutor (java.util.concurrent)
    run:197, ServiceInfoUpdateService$UpdateTask (com.alibaba.nacos.client.naming.core)
    call:515, Executors$RunnableAdapter (java.util.concurrent)
    run$$$capture:264, FutureTask (java.util.concurrent)
    run:-1, FutureTask (java.util.concurrent)
    - Async stack trace
    <init>:151, FutureTask (java.util.concurrent)
    <init>:215, ScheduledThreadPoolExecutor$ScheduledFutureTask (java.util.concurrent)
    schedule:561, ScheduledThreadPoolExecutor (java.util.concurrent)
    run:197, ServiceInfoUpdateService$UpdateTask (com.alibaba.nacos.client.naming.core)
    call:515, Executors$RunnableAdapter (java.util.concurrent)
    run$$$capture:264, FutureTask (java.util.concurrent)
    run:-1, FutureTask (java.util.concurrent)
    - Async stack trace
    <init>:151, FutureTask (java.util.concurrent)
    <init>:215, ScheduledThreadPoolExecutor$ScheduledFutureTask (java.util.concurrent)
    schedule:561, ScheduledThreadPoolExecutor (java.util.concurrent)
    run:197, ServiceInfoUpdateService$UpdateTask (com.alibaba.nacos.client.naming.core)
    call:515, Executors$RunnableAdapter (java.util.concurrent)
    run$$$capture:264, FutureTask (java.util.concurrent)
    run:-1, FutureTask (java.util.concurrent)
    - Async stack trace
    <init>:151, FutureTask (java.util.concurrent)
    <init>:215, ScheduledThreadPoolExecutor$ScheduledFutureTask (java.util.concurrent)
    schedule:561, ScheduledThreadPoolExecutor (java.util.concurrent)
    addTask:104, ServiceInfoUpdateService (com.alibaba.nacos.client.naming.core)
    scheduleUpdateIfAbsent:98, ServiceInfoUpdateService (com.alibaba.nacos.client.naming.core)
    subscribe:144, NamingClientProxyDelegate (com.alibaba.nacos.client.naming.remote)
    subscribe:393, NacosNamingService (com.alibaba.nacos.client.naming)
    start:134, NacosWatch (com.alibaba.cloud.nacos.discovery)
    doStart:182, DefaultLifecycleProcessor (org.springframework.context.support)
    access$200:53, DefaultLifecycleProcessor (org.springframework.context.support)
    start:360, DefaultLifecycleProcessor$LifecycleGroup (org.springframework.context.support)
    startBeans:158, DefaultLifecycleProcessor (org.springframework.context.support)
    onRefresh:122, DefaultLifecycleProcessor (org.springframework.context.support)
    finishRefresh:895, AbstractApplicationContext (org.springframework.context.support)
    refresh:554, AbstractApplicationContext (org.springframework.context.support)
    refresh:143, ServletWebServerApplicationContext (org.springframework.boot.web.servlet.context)
    refresh:755, SpringApplication (org.springframework.boot)
    refresh:747, SpringApplication (org.springframework.boot)
    refreshContext:402, SpringApplication (org.springframework.boot)
    run:312, SpringApplication (org.springframework.boot)
    main:25, Application (com.patpat.mms)
    + + +
    + + + + +
    +
    +
    +
    + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    背景

    生产环境会有下面错误: Unknown error 1002, server ClickHouseNode , clickhouse使用的是http的协议

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    Caused by: java.sql.SQLException: Unknown error 1002, server ClickHouseNode(addr=http:xxxx.amazonaws.com:8123, db=personas)@1267316279
    at com.clickhouse.jdbc.SqlExceptionUtils.handle(SqlExceptionUtils.java:54)
    at com.clickhouse.jdbc.SqlExceptionUtils.handle(SqlExceptionUtils.java:69)
    at com.clickhouse.jdbc.internal.ClickHouseStatementImpl.executeStatement(ClickHouseStatementImpl.java:139)
    at com.clickhouse.jdbc.internal.SqlBasedPreparedStatement.executeBatch(SqlBasedPreparedStatement.java:158)
    at com.clickhouse.jdbc.internal.SqlBasedPreparedStatement.execute(SqlBasedPreparedStatement.java:382)
    at com.zaxxer.hikari.pool.ProxyPreparedStatement.execute(ProxyPreparedStatement.java:44)
    at com.zaxxer.hikari.pool.HikariProxyPreparedStatement.execute(HikariProxyPreparedStatement.java)
    at org.apache.ibatis.executor.statement.PreparedStatementHandler.update(PreparedStatementHandler.java:47)
    at org.apache.ibatis.executor.statement.RoutingStatementHandler.update(RoutingStatementHandler.java:74)
    at org.apache.ibatis.executor.SimpleExecutor.doUpdate(SimpleExecutor.java:50)
    at org.apache.ibatis.executor.BaseExecutor.update(BaseExecutor.java:117)
    at org.apache.ibatis.executor.CachingExecutor.update(CachingExecutor.java:76)
    at jdk.internal.reflect.GeneratedMethodAccessor202.invoke(Unknown Source)
    at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
    at java.base/java.lang.reflect.Method.invoke(Method.java:566)
    at org.apache.ibatis.plugin.Plugin.invoke(Plugin.java:64)
    at com.sun.proxy.$Proxy263.update(Unknown Source)
    at org.apache.ibatis.session.defaults.DefaultSqlSession.update(DefaultSqlSession.java:194)
    at org.apache.ibatis.session.defaults.DefaultSqlSession.insert(DefaultSqlSession.java:181)
    at jdk.internal.reflect.GeneratedMethodAccessor201.invoke(Unknown Source)
    at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
    at java.base/java.lang.reflect.Method.invoke(Method.java:566)
    at org.mybatis.spring.SqlSessionTemplate$SqlSessionInterceptor.invoke(SqlSessionTemplate.java:427)
    ... 18 common frames omitted
    + + +

    翻看源码

    一共有两处会抛出ERROR_UNKNOWN , 代码如下:

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    // 路径 : com\clickhouse\clickhouse-client\0.3.2\clickhouse-client-0.3.2-sources.jar!\com\clickhouse\client\ClickHouseException.java
    public static final int ERROR_UNKNOWN = 1002;
    private static int extractErrorCode(String errorMessage) {
    if (errorMessage == null || errorMessage.isEmpty()) {
    return ERROR_UNKNOWN; // 抛出unknow
    } else if (errorMessage.startsWith("Poco::Exception. Code: 1000, ")) {
    return ERROR_POCO;
    }

    int startIndex = errorMessage.indexOf(' ');
    if (startIndex >= 0) {
    for (int i = ++startIndex, len = errorMessage.length(); i < len; i++) {
    char ch = errorMessage.charAt(i);
    if (ch == '.' || ch == ',' || Character.isWhitespace(ch)) {
    try {
    return Integer.parseInt(errorMessage.substring(startIndex, i));
    } catch (NumberFormatException e) {
    // ignore
    }
    break;
    }
    }
    }

    // this is confusing as usually it's a client-side exception
    return ERROR_UNKNOWN; // 抛出unknown
    }


    + +

    看了代码,这个错误外部的异常,不是sql语法错误,所以一般是反向代理一侧有问题,也就是代理层或者clickhouse层有问题

    + + +
    + + + + +
    +
    +
    +
    + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    背景

    我添加cmake的时候,经常看看为什么find_package(bison)可以默认找到

    +

    原因

    cmake 有默认的xxx.cmake文件

    +
    1
    2
    3
    https://cmake.org/cmake/help/v2.8.9/cmake.html#module:FindBISON

    /usr/share/cmake-3.22/Modules/FindBISON.cmake
    + + +
    + + + + +
    +
    +
    +
    + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    背景

    有个update_rules字段是json类型

    +
    1
    {"type": "once", "values": []}
    +

    想用JSON_CONTAINS提取中间的once内容,发现下面这个sql一直报错:

    +
    1
    2
    SELECT  *  FROM  `usergroup`  WHERE JSON_CONTAINS(update_rules , 'once' , '$.type' )

    + +

    报错一直是Invalid JSON text in argument 2 , 看了很久也没有看出第二个参数哪里错了

    +

    mysql官网例子 , 我看官网也是这样

    +

    解决方式

    搜索了一下stackoverflow,原来字符串还要在里面加双引号

    +

    也就是'once' 改成'"once"'

    +

    完整的sql变成如下:

    +
    1
    2
    SELECT  *  FROM  `usergroup`  WHERE JSON_CONTAINS(update_rules , '"daily"' , '$.type' )

    + +
    + + + + +
    +
    +
    +
    + + + + + + + + + + +
    + + + + +
    + + + + + + + + +
    +
    + +
    + +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/page/5/index.html b/page/5/index.html new file mode 100644 index 0000000000..4d490404e8 --- /dev/null +++ b/page/5/index.html @@ -0,0 +1,1229 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + dinosaur + + + + + + + + + + + + +
    +
    + +
    +
    + + + + + +
    + + + + + + + + +
    + +
    + +
    +
    + + +
    + + 0% +
    + + +
    +
    +
    + + +
    + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    背景

    生产环境rabbitmq遇到错误:Payload value must not be empty

    +
    1
    2
    3
    4
    5
    6
    7
    Caused by: org.springframework.messaging.handler.annotation.support.MethodArgumentNotValidException: Could not resolve method parameter at index 0 in public void com.xxxx.consume(java.lang.String,org.springframework.amqp.core.Message,com.rabbitmq.client.Channel) throws java.lang.Exception: 1 error(s): [Error in object 'content': codes []; arguments []; default message [Payload value must not be empty]] 
    at org.springframework.messaging.handler.annotation.support.PayloadMethodArgumentResolver.resolveArgument(PayloadMethodArgumentResolver.java:122)
    at org.springframework.messaging.handler.invocation.HandlerMethodArgumentResolverComposite.resolveArgument(HandlerMethodArgumentResolverComposite.java:117)
    at org.springframework.messaging.handler.invocation.InvocableHandlerMethod.getMethodArgumentValues(InvocableHandlerMethod.java:147)
    at org.springframework.messaging.handler.invocation.InvocableHandlerMethod.invoke(InvocableHandlerMethod.java:115)
    at org.springframework.amqp.rabbit.listener.adapter.HandlerAdapter.invoke(HandlerAdapter.java:75)
    at org.springframework.amqp.rabbit.listener.adapter.MessagingMessageListenerAdapter.invokeHandler(MessagingMessageListenerAdapter.java:261)
    + +

    解决方式

    添加注解@Payload

    +
    1
    2
    3
    4
    @RabbitListener(queues = "queue")
    public void consume( @Payload(required = false) String content, Message message, Channel channel) throws Exception { // 将@Payload 塞到 content 字段
    ...
    }
    + +

    查看源码

    扒了一下代码查看原因:
    路径

    +
      +
    • org\springframework\messaging\handler\annotation\support\PayloadMethodArgumentResolver.java
      1
      2
      3
      4
      5
      6
      7
      8
      9
      10
      11
      12
      13
      14
      15
      16
      17
      18
      19
      public Object resolveArgument(MethodParameter parameter, Message<?> message) throws Exception {
      Payload ann = parameter.getParameterAnnotation(Payload.class);
      if (ann != null && StringUtils.hasText(ann.expression())) {
      throw new IllegalStateException("@Payload SpEL expressions not supported by this resolver");
      }

      Object payload = message.getPayload();
      if (isEmptyPayload(payload)) { // 条件1
      if (ann == null || ann.required()) { // 条件2
      String paramName = getParameterName(parameter);
      BindingResult bindingResult = new BeanPropertyBindingResult(payload, paramName);
      bindingResult.addError(new ObjectError(paramName, "Payload value must not be empty")); // 在这里会校验空
      throw new MethodArgumentNotValidException(message, parameter, bindingResult);
      }
      else {
      return null;
      }
      }
      }
    • +
    • 条件1 : payload 是空
    • +
    • 条件2 : 没有@Payload 注解或者 @Payload(required = ture)
    • +
    +

    原因: 满足条件1和条件2就会抛出异常

    +

    默认不加注解的时候会满足条件1和2 , 所以解决方案:
    添加注解@Payload

    +
    1
    2
    3
    4
    @RabbitListener(queues = "queue")
    public void consume( @Payload(required = false) String content, Message message, Channel channel) throws Exception { // 将@Payload 塞到 content 字段
    ...
    }
    + +

    堆栈

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    resolveArgument:111, PayloadMethodArgumentResolver (org.springframework.messaging.handler.annotation.support)
    resolveArgument:117, HandlerMethodArgumentResolverComposite (org.springframework.messaging.handler.invocation)
    getMethodArgumentValues:147, InvocableHandlerMethod (org.springframework.messaging.handler.invocation)
    invoke:115, InvocableHandlerMethod (org.springframework.messaging.handler.invocation)
    invoke:75, HandlerAdapter (org.springframework.amqp.rabbit.listener.adapter)
    invokeHandler:261, MessagingMessageListenerAdapter (org.springframework.amqp.rabbit.listener.adapter)
    invokeHandlerAndProcessResult:207, MessagingMessageListenerAdapter (org.springframework.amqp.rabbit.listener.adapter)
    onMessage:146, MessagingMessageListenerAdapter (org.springframework.amqp.rabbit.listener.adapter)
    doInvokeListener:1665, AbstractMessageListenerContainer (org.springframework.amqp.rabbit.listener)
    actualInvokeListener:1584, AbstractMessageListenerContainer (org.springframework.amqp.rabbit.listener)
    invokeListener:-1, 1393414871 (org.springframework.amqp.rabbit.listener.AbstractMessageListenerContainer$$Lambda$1549)
    invoke0:-1, NativeMethodAccessorImpl (jdk.internal.reflect)
    invoke:62, NativeMethodAccessorImpl (jdk.internal.reflect)
    invoke:43, DelegatingMethodAccessorImpl (jdk.internal.reflect)
    invoke:566, Method (java.lang.reflect)
    invokeJoinpointUsingReflection:344, AopUtils (org.springframework.aop.support)
    invokeJoinpoint:198, ReflectiveMethodInvocation (org.springframework.aop.framework)
    proceed:163, ReflectiveMethodInvocation (org.springframework.aop.framework)
    doWithRetry:93, RetryOperationsInterceptor$1 (org.springframework.retry.interceptor)
    doExecute:329, RetryTemplate (org.springframework.retry.support)
    execute:225, RetryTemplate (org.springframework.retry.support)
    invoke:116, RetryOperationsInterceptor (org.springframework.retry.interceptor)
    proceed:186, ReflectiveMethodInvocation (org.springframework.aop.framework)
    invoke:215, JdkDynamicAopProxy (org.springframework.aop.framework)
    invokeListener:-1, $Proxy247 (org.springframework.amqp.rabbit.listener)
    invokeListener:1572, AbstractMessageListenerContainer (org.springframework.amqp.rabbit.listener)
    doExecuteListener:1563, AbstractMessageListenerContainer (org.springframework.amqp.rabbit.listener)
    executeListener:1507, AbstractMessageListenerContainer (org.springframework.amqp.rabbit.listener)
    doReceiveAndExecute:967, SimpleMessageListenerContainer (org.springframework.amqp.rabbit.listener)
    receiveAndExecute:914, SimpleMessageListenerContainer (org.springframework.amqp.rabbit.listener)
    access$1600:83, SimpleMessageListenerContainer (org.springframework.amqp.rabbit.listener)
    mainLoop:1291, SimpleMessageListenerContainer$AsyncMessageProcessingConsumer (org.springframework.amqp.rabbit.listener)
    run:1197, SimpleMessageListenerContainer$AsyncMessageProcessingConsumer (org.springframework.amqp.rabbit.listener)
    run:834, Thread (java.lang)
    + + + + + + + + + +
    + + + + +
    +
    +
    +
    + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    在WFST(Weighted Finite State Transducer,加权有限状态转换器)中,”All Pairs Shortest Path”(APSP)算法用于计算任意两个状态之间的最短路径。在WFST中,每个状态之间都有一条带有权重的边,表示从一个状态到另一个状态的转换。APSP算法的目标是找到连接任意两个状态的最短路径,即具有最小总权重的路径。

    +

    相关阅读

    + + +
    + + + + +
    +
    +
    +
    + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    背景

    了解java源码包里面线程池的细节

    +

    线程池

    线程池是管理一堆线程的对象。对于线程池来说,不同线程池主要是他们创建、调度、销毁的的各种策略的不一样

    +

    基础类

    Executors

    executor/executors/executorService:

    +

    executor 是一个接口,类似Runable , 实际上也是差不多功能

    +
    1
    2
    3
    public interface Executor {
    void execute(Runnable var1);
    }
    + + +
    + + + + +
    +
    +
    +
    + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    背景

    了解lucene 的fst结构

    +

    核心函数

    freeezeTail -> compileNode

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    private void freezeTail(int prefixLenPlus1) throws IOException {  // 入参是一个偏移值:  公共前缀+ 1
    final int downTo = Math.max(1, prefixLenPlus1);
    for (int idx = lastInput.length(); idx >= downTo; idx--) {

    boolean doPrune = false;
    boolean doCompile = false;


    if (doCompile) {
    ...
    parent.replaceLast(
    lastInput.intAt(idx - 1),
    compileNode(node, 1 + lastInput.length() - idx),
    nextFinalOutput,
    isFinal);
    ...
    }
    }
    }
    }
    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    private CompiledNode compileNode(UnCompiledNode<T> nodeIn, int tailLength) throws IOException {
    final long node;
    long bytesPosStart = bytes.getPosition();
    if (dedupHash != null
    && (doShareNonSingletonNodes || nodeIn.numArcs <= 1)
    && tailLength <= shareMaxTailLength) {
    if (nodeIn.numArcs == 0) {
    node = fst.addNode(this, nodeIn);
    lastFrozenNode = node;
    } else {
    node = dedupHash.add(this, nodeIn);
    }
    } else {
    node = fst.addNode(this, nodeIn);
    }
    assert node != -2;

    long bytesPosEnd = bytes.getPosition();
    if (bytesPosEnd != bytesPosStart) {
    // The FST added a new node:
    assert bytesPosEnd > bytesPosStart;
    lastFrozenNode = node;
    }

    nodeIn.clear();

    final CompiledNode fn = new CompiledNode();
    fn.node = node;
    return fn;
    }
    + + +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    38
    39
    40
    41
    42
    43
    44
    45
    46
    47
    48
    49
    50
    51
    52
    53
    54
    55
    56
    57
    58
    59
    60
    61
    62
    63
    64
    65
    66
    67
    68
    69
    70
    71
    72
    73
    74
    75
    76
    77
    78
    79
    80
    81
    82
    83
    84
    85
    86
    87
    88
    89
    90
    91
    92
    93
    94
    95
    96
    97
    98
    99
    100
    101
    102
    103
    104
    105
    106
    107
    108
    109
    110
    111
    112
    113
    114
    115
    116
    117
    118
    119
    120
    121
    122
    123
    124
    125
    126
    127
    128
    129
    130
    131
    132
    133
    134
    135
    136
    137
    138
    139
    140
    141
    142
    143
    144
    145
    146
    147
    148
    149
    150
    151
    // serializes new node by appending its bytes to the end
    // of the current byte[]
    long addNode(FSTCompiler<T> fstCompiler, FSTCompiler.UnCompiledNode<T> nodeIn)
    throws IOException {
    T NO_OUTPUT = outputs.getNoOutput();

    // System.out.println("FST.addNode pos=" + bytes.getPosition() + " numArcs=" + nodeIn.numArcs);
    if (nodeIn.numArcs == 0) {
    if (nodeIn.isFinal) {
    return FINAL_END_NODE;
    } else {
    return NON_FINAL_END_NODE;
    }
    }
    final long startAddress = fstCompiler.bytes.getPosition();
    // System.out.println(" startAddr=" + startAddress);

    final boolean doFixedLengthArcs = shouldExpandNodeWithFixedLengthArcs(fstCompiler, nodeIn);
    if (doFixedLengthArcs) {
    // System.out.println(" fixed length arcs");
    if (fstCompiler.numBytesPerArc.length < nodeIn.numArcs) {
    fstCompiler.numBytesPerArc = new int[ArrayUtil.oversize(nodeIn.numArcs, Integer.BYTES)];
    fstCompiler.numLabelBytesPerArc = new int[fstCompiler.numBytesPerArc.length];
    }
    }

    fstCompiler.arcCount += nodeIn.numArcs;

    final int lastArc = nodeIn.numArcs - 1;

    long lastArcStart = fstCompiler.bytes.getPosition();
    int maxBytesPerArc = 0;
    int maxBytesPerArcWithoutLabel = 0;
    for (int arcIdx = 0; arcIdx < nodeIn.numArcs; arcIdx++) {
    final FSTCompiler.Arc<T> arc = nodeIn.arcs[arcIdx];
    final FSTCompiler.CompiledNode target = (FSTCompiler.CompiledNode) arc.target;
    int flags = 0;
    // System.out.println(" arc " + arcIdx + " label=" + arc.label + " -> target=" +
    // target.node);

    if (arcIdx == lastArc) {
    flags += BIT_LAST_ARC;
    }

    if (fstCompiler.lastFrozenNode == target.node && !doFixedLengthArcs) {
    // TODO: for better perf (but more RAM used) we
    // could avoid this except when arc is "near" the
    // last arc:
    flags += BIT_TARGET_NEXT;
    }

    if (arc.isFinal) {
    flags += BIT_FINAL_ARC;
    if (arc.nextFinalOutput != NO_OUTPUT) {
    flags += BIT_ARC_HAS_FINAL_OUTPUT;
    }
    } else {
    assert arc.nextFinalOutput == NO_OUTPUT;
    }

    boolean targetHasArcs = target.node > 0;

    if (!targetHasArcs) {
    flags += BIT_STOP_NODE;
    }

    if (arc.output != NO_OUTPUT) {
    flags += BIT_ARC_HAS_OUTPUT;
    }

    fstCompiler.bytes.writeByte((byte) flags);
    long labelStart = fstCompiler.bytes.getPosition();
    writeLabel(fstCompiler.bytes, arc.label);
    int numLabelBytes = (int) (fstCompiler.bytes.getPosition() - labelStart);

    // System.out.println(" write arc: label=" + (char) arc.label + " flags=" + flags + "
    // target=" + target.node + " pos=" + bytes.getPosition() + " output=" +
    // outputs.outputToString(arc.output));

    if (arc.output != NO_OUTPUT) {
    outputs.write(arc.output, fstCompiler.bytes);
    // System.out.println(" write output");
    }

    if (arc.nextFinalOutput != NO_OUTPUT) {
    // System.out.println(" write final output");
    outputs.writeFinalOutput(arc.nextFinalOutput, fstCompiler.bytes);
    }

    if (targetHasArcs && (flags & BIT_TARGET_NEXT) == 0) {
    assert target.node > 0;
    // System.out.println(" write target");
    fstCompiler.bytes.writeVLong(target.node);
    }

    // just write the arcs "like normal" on first pass, but record how many bytes each one took
    // and max byte size:
    if (doFixedLengthArcs) {
    int numArcBytes = (int) (fstCompiler.bytes.getPosition() - lastArcStart);
    fstCompiler.numBytesPerArc[arcIdx] = numArcBytes;
    fstCompiler.numLabelBytesPerArc[arcIdx] = numLabelBytes;
    lastArcStart = fstCompiler.bytes.getPosition();
    maxBytesPerArc = Math.max(maxBytesPerArc, numArcBytes);
    maxBytesPerArcWithoutLabel =
    Math.max(maxBytesPerArcWithoutLabel, numArcBytes - numLabelBytes);
    // System.out.println(" arcBytes=" + numArcBytes + " labelBytes=" + numLabelBytes);
    }
    }

    // TODO: try to avoid wasteful cases: disable doFixedLengthArcs in that case
    /*
    *
    * LUCENE-4682: what is a fair heuristic here?
    * It could involve some of these:
    * 1. how "busy" the node is: nodeIn.inputCount relative to frontier[0].inputCount?
    * 2. how much binSearch saves over scan: nodeIn.numArcs
    * 3. waste: numBytes vs numBytesExpanded
    *
    * the one below just looks at #3
    if (doFixedLengthArcs) {
    // rough heuristic: make this 1.25 "waste factor" a parameter to the phd ctor????
    int numBytes = lastArcStart - startAddress;
    int numBytesExpanded = maxBytesPerArc * nodeIn.numArcs;
    if (numBytesExpanded > numBytes*1.25) {
    doFixedLengthArcs = false;
    }
    }
    */

    if (doFixedLengthArcs) {
    assert maxBytesPerArc > 0;
    // 2nd pass just "expands" all arcs to take up a fixed byte size

    int labelRange = nodeIn.arcs[nodeIn.numArcs - 1].label - nodeIn.arcs[0].label + 1;
    assert labelRange > 0;
    if (shouldExpandNodeWithDirectAddressing(
    fstCompiler, nodeIn, maxBytesPerArc, maxBytesPerArcWithoutLabel, labelRange)) {
    writeNodeForDirectAddressing(
    fstCompiler, nodeIn, startAddress, maxBytesPerArcWithoutLabel, labelRange);
    fstCompiler.directAddressingNodeCount++;
    } else {
    writeNodeForBinarySearch(fstCompiler, nodeIn, startAddress, maxBytesPerArc);
    fstCompiler.binarySearchNodeCount++;
    }
    }

    final long thisNodeAddress = fstCompiler.bytes.getPosition() - 1;
    fstCompiler.bytes.reverse(startAddress, thisNodeAddress);
    fstCompiler.nodeCount++;
    return thisNodeAddress;
    }
    + +

    Arc<T> 描述的是一个弧

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    //package org.apache.lucene.util.fst;
    // org\apache\lucene\util\fst\FSTCompiler.java
    /** Expert: holds a pending (seen but not yet serialized) arc. */
    static class Arc<T> {
    int label; // really an "unsigned" byte // 一个label
    Node target; // 举例:a -> b 那么target 就是b
    boolean isFinal;
    T output;
    T nextFinalOutput;
    }
    +

    例子

    cat的权重是5
    dog的权重是7
    dogs的权重是13

    +
    1
    2
    String inputValues[] = {"cat", "dog", "dogs"};
    long[] outputValues = {5, 7, 13};
    + +

    下面是一个cat的例子
    cat包含三个字符c,a ,t,分别代表三个ASCII码:

    +
      +
    • c:99
    • +
    • a:97
    • +
    • t:116
    • +
    +

    定义Arc: 一个弧包含三个内容: [a:label1]->[b:label2] 来描述一个弧:a指向b .其中a的值是label1, b的值是label2

    +

    下面是idea的截图,
    [2747:c] -> [2856:a] -> [2860:t]

    +

    fst linklist

    +

    下面是fst.bytes 的例子

    +

    fst bytes

    +

    最后是变成下列格式:

    +
    1
    [0, 116, 15, 97, 6, 6, 115, 31, 103, 7, 111, 6, 7, 100, 22, 4, 5, 99, 16]
    +

    相关阅读

    + + +
    + + + + +
    +
    +
    +
    + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    背景

    1
    String inputValues[] = {"cat", "dog", "dogs"};
    +

    当我声明一个数组,方括号在右边的时候, idea会有个很小警告:

    +
    1
    C-style array declaration of local variable 'inputValues' 
    + +

    IDE劝我改成

    +
    1
    String[] inputValues = {"cat", "dog", "dogs"};
    + +

    那么他们有什么区别呢?

    +

    我查了stackoverflow

    +

    这两个是等价的

    +

    jls是如何定义的?

    相关引用

    + + +
    + + + + +
    +
    +
    +
    + + + + + + + + + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    背景

    了解flink的使用

    +

    编译

    下载代码:

    +
    1
    2
    3
    4
    git clone https://github.com/apache/flink.git
    cd flink
    ## 编译
    ./mvnw -T1C package -DskipTests
    + + +

    启动

    1
    2
    3
    4
    5
    6
    ## 切换目录
    cd build-target
    ## 启动
    ./bin/start-cluster.sh
    ## 执行例子
    ./bin/flink run ./examples/batch/WordCount.jar
    + + +

    输出

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    38
    39
    40
    41
    42
    43
    44
    45
    46
    47
    48
    49
    50
    51
    52
    53
    54
    55
    56
    57
    58
    59
    60
    61
    62
    63
    64
    65
    66
    67
    68
    69
    70
    71
    72
    73
    74
    75
    76
    77
    78
    79
    80
    81
    82
    83
    84
    85
    86
    87
    88
    89
    90
    91
    92
    93
    94
    95
    96
    97
    98
    99
    100
    101
    102
    103
    104
    105
    106
    107
    108
    109
    110
    111
    112
    113
    114
    115
    116
    117
    118
    119
    120
    121
    122
    123
    124
    125
    126
    127
    128
    129
    130
    131
    132
    133
    134
    135
    136
    137
    138
    139
    140
    141
    142
    143
    144
    145
    146
    147
    148
    149
    150
    151
    152
    153
    154
    155
    156
    157
    158
    159
    160
    161
    162
    163
    164
    165
    166
    167
    168
    169
    170
    171
    172
    173
    174
    175
    176
    177
    178
    179
    180
    181
    182
    183
    ./bin/flink run ./examples/batch/WordCount.jar

    Executing WordCount example with default input data set.
    Use --input to specify file input.
    Printing result to stdout. Use --output to specify output path.
    Job has been submitted with JobID 9451e7d1ac07642311caa0633429027f
    Program execution finished
    Job with JobID 9451e7d1ac07642311caa0633429027f has finished.
    Job Runtime: 218 ms
    Accumulator Results:
    - a6ac7b0991b6510ab27930ef3590fe1f (java.util.ArrayList) [170 elements]


    (a,5)
    (action,1)
    (after,1)
    (against,1)
    (all,2)
    (and,12)
    (arms,1)
    (arrows,1)
    (awry,1)
    (ay,1)
    (bare,1)
    (be,4)
    (bear,3)
    (bodkin,1)
    (bourn,1)
    (but,1)
    (by,2)
    (calamity,1)
    (cast,1)
    (coil,1)
    (come,1)
    (conscience,1)
    (consummation,1)
    (contumely,1)
    (country,1)
    (cowards,1)
    (currents,1)
    (d,4)
    (death,2)
    (delay,1)
    (despis,1)
    (devoutly,1)
    (die,2)
    (does,1)
    (dread,1)
    (dream,1)
    (dreams,1)
    (end,2)
    (enterprises,1)
    (er,1)
    (fair,1)
    (fardels,1)
    (flesh,1)
    (fly,1)
    (for,2)
    (fortune,1)
    (from,1)
    (give,1)
    (great,1)
    (grunt,1)
    (have,2)
    (he,1)
    (heartache,1)
    (heir,1)
    (himself,1)
    (his,1)
    (hue,1)
    (ills,1)
    (in,3)
    (insolence,1)
    (is,3)
    (know,1)
    (law,1)
    (life,2)
    (long,1)
    (lose,1)
    (love,1)
    (make,2)
    (makes,2)
    (man,1)
    (may,1)
    (merit,1)
    (might,1)
    (mind,1)
    (moment,1)
    (more,1)
    (mortal,1)
    (must,1)
    (my,1)
    (name,1)
    (native,1)
    (natural,1)
    (no,2)
    (nobler,1)
    (not,2)
    (now,1)
    (nymph,1)
    (o,1)
    (of,15)
    (off,1)
    (office,1)
    (ophelia,1)
    (opposing,1)
    (oppressor,1)
    (or,2)
    (orisons,1)
    (others,1)
    (outrageous,1)
    (pale,1)
    (pangs,1)
    (patient,1)
    (pause,1)
    (perchance,1)
    (pith,1)
    (proud,1)
    (puzzles,1)
    (question,1)
    (quietus,1)
    (rather,1)
    (regard,1)
    (remember,1)
    (resolution,1)
    (respect,1)
    (returns,1)
    (rub,1)
    (s,5)
    (say,1)
    (scorns,1)
    (sea,1)
    (shocks,1)
    (shuffled,1)
    (sicklied,1)
    (sins,1)
    (sleep,5)
    (slings,1)
    (so,1)
    (soft,1)
    (something,1)
    (spurns,1)
    (suffer,1)
    (sweat,1)
    (take,1)
    (takes,1)
    (than,1)
    (that,7)
    (the,22)
    (their,1)
    (them,1)
    (there,2)
    (these,1)
    (this,2)
    (those,1)
    (thought,1)
    (thousand,1)
    (thus,2)
    (thy,1)
    (time,1)
    (tis,2)
    (to,15)
    (traveller,1)
    (troubles,1)
    (turn,1)
    (under,1)
    (undiscover,1)
    (unworthy,1)
    (us,3)
    (we,4)
    (weary,1)
    (what,1)
    (when,2)
    (whether,1)
    (whips,1)
    (who,2)
    (whose,1)
    (will,1)
    (wish,1)
    (with,3)
    (would,2)
    (wrong,1)
    (you,1)
    + + +

    相关阅读

    + + +
    + + + + +
    +
    +
    +
    + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    背景

    insert ignore sql死锁了

    +

    排查

    日志

    排查了死锁日志:
    我们定位到是有个表,简单来说,是有两个字段:

    +
      +
    • id : 主键
    • +
    • email_address: 唯一索引
    • +
    +

    一共有两个线程在写入,每次的操作就是批量用insert ignore写入,初步看都是很简单的sql, 后面google一下发现是和间隙锁有关:
    相关blog

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    38
    39
    40
    41
    42
    43
    44
    45
    46
    47
    48
    49
    50
    51
    52
    53
    54
    55
    2023-05-03 05:06:45 0x4002719ffef0
    *** (1) TRANSACTION:
    TRANSACTION 218982374, ACTIVE 0 sec inserting
    mysql tables in use 1, locked 1
    LOCK WAIT 7 lock struct(s), heap size 1136, 4 row lock(s)
    MySQL thread id 481964, OS thread handle 70370189385456, query id 1320790141 10.4.3.228 app_cdp_0 update
    insert ignore into cdp_user_email
    (email_address,
    clean_tag,
    is_upload_emarsys,
    created_at,
    upload_at
    )
    value


    *** (1) HOLDS THE LOCK(S):
    RECORD LOCKS space id 189 page no 328322 n bits 272 index PRIMARY of table `customer_data_platform`.`cdp_user_email` trx id 218982374 lock_mode X
    Record lock, heap no 1 PHYSICAL RECORD: n_fields 1; compact format; info bits 0
    0: len 8; hex 73757072656d756d; asc supremum;;


    *** (1) WAITING FOR THIS LOCK TO BE GRANTED:
    RECORD LOCKS space id 189 page no 328322 n bits 272 index PRIMARY of table `customer_data_platform`.`cdp_user_email` trx id 218982374 lock_mode X insert intention waiting
    Record lock, heap no 1 PHYSICAL RECORD: n_fields 1; compact format; info bits 0
    0: len 8; hex 73757072656d756d; asc supremum;;


    *** (2) TRANSACTION:
    TRANSACTION 218982373, ACTIVE 0 sec inserting
    mysql tables in use 1, locked 1
    LOCK WAIT 11 lock struct(s), heap size 1136, 6 row lock(s)
    MySQL thread id 481963, OS thread handle 70370189655792, query id 1320790139 10.4.3.228 app_cdp_0 update
    insert ignore into cdp_user_email
    (email_address,
    clean_tag,
    is_upload_emarsys,
    created_at,
    upload_at
    )
    value


    *** (2) HOLDS THE LOCK(S):
    RECORD LOCKS space id 189 page no 328322 n bits 272 index PRIMARY of table `customer_data_platform`.`cdp_user_email` trx id 218982373 lock_mode X
    Record lock, heap no 1 PHYSICAL RECORD: n_fields 1; compact format; info bits 0
    0: len 8; hex 73757072656d756d; asc supremum;;


    *** (2) WAITING FOR THIS LOCK TO BE GRANTED:
    RECORD LOCKS space id 189 page no 328322 n bits 272 index PRIMARY of table `customer_data_platform`.`cdp_user_email` trx id 218982373 lock_mode X insert intention waiting
    Record lock, heap no 1 PHYSICAL RECORD: n_fields 1; compact format; info bits 0
    0: len 8; hex 73757072656d756d; asc supremum;;

    *** WE ROLL BACK TRANSACTION (1)
    + +

    修改方式

    将批量insert 改成每次只写入一条

    + + +
    + + + + +
    +
    +
    +
    + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    背景

    了解java volatile实现

    +

    实现

    核心在这里: is_volatile_shift

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    void TemplateTable::putfield_or_static(int byte_no, bool is_static, RewriteControl rc) {
    transition(vtos, vtos);

    const Register cache = rcx;
    const Register index = rdx;
    const Register obj = rcx;
    const Register off = rbx;
    const Register flags = rax;

    resolve_cache_and_index(byte_no, cache, index, sizeof(u2));
    jvmti_post_field_mod(cache, index, is_static);
    load_field_cp_cache_entry(obj, cache, index, off, flags, is_static);

    Label notVolatile, Done;
    __ movl(rdx, flags);
    __ shrl(rdx, ConstantPoolCacheEntry::is_volatile_shift); // 右移volatile 标志位 , 然后如果是volatile ,则走到不太分支
    __ andl(rdx, 0x1);

    // Check for volatile store
    __ testl(rdx, rdx);
    __ jcc(Assembler::zero, notVolatile);

    putfield_or_static_helper(byte_no, is_static, rc, obj, off, flags);
    volatile_barrier(Assembler::Membar_mask_bits(Assembler::StoreLoad |
    Assembler::StoreStore));
    __ jmp(Done);
    __ bind(notVolatile);

    putfield_or_static_helper(byte_no, is_static, rc, obj, off, flags);

    __ bind(Done);
    }
    + +

    这是具体的代码:

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    // ----------------------------------------------------------------------------
    // Volatile variables demand their effects be made known to all CPU's
    // in order. Store buffers on most chips allow reads & writes to
    // reorder; the JMM's ReadAfterWrite.java test fails in -Xint mode
    // without some kind of memory barrier (i.e., it's not sufficient that
    // the interpreter does not reorder volatile references, the hardware
    // also must not reorder them).
    //
    // According to the new Java Memory Model (JMM):
    // (1) All volatiles are serialized wrt to each other. ALSO reads &
    // writes act as aquire & release, so:
    // (2) A read cannot let unrelated NON-volatile memory refs that
    // happen after the read float up to before the read. It's OK for
    // non-volatile memory refs that happen before the volatile read to
    // float down below it.
    // (3) Similar a volatile write cannot let unrelated NON-volatile
    // memory refs that happen BEFORE the write float down to after the
    // write. It's OK for non-volatile memory refs that happen after the
    // volatile write to float up before it.
    //
    // We only put in barriers around volatile refs (they are expensive),
    // not _between_ memory refs (that would require us to track the
    // flavor of the previous memory refs). Requirements (2) and (3)
    // require some barriers before volatile stores and after volatile
    // loads. These nearly cover requirement (1) but miss the
    // volatile-store-volatile-load case. This final case is placed after
    // volatile-stores although it could just as well go before
    // volatile-loads.

    void TemplateTable::volatile_barrier(Assembler::Membar_mask_bits order_constraint ) {
    // Helper function to insert a is-volatile test and memory barrier
    __ membar(order_constraint);
    }
    + +

    还有这段:

    +

    ConstantPoolCache 看上去是运行时从.class文件读取的内容, 这里就有volatile的标志位,我们看看是在哪里设置这个值ConstantPoolCacheEntry::is_volatile_shift

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    38
    39
    40
    41
    42
    43
    44
    45
    46
    47
    48
    49
    50
    51
    52
    53
    54
    55
    56
    57
    58
    59
    60
    61
    62
    63
    64
    65
    66
    67
    68
    69
    70
    71
    72
    73
    74
    75
    76
    77
    78
    79
    80
    81
    82
    83
    84
    85
    86
    87
    88
    89
    90
    91
    92
    93
    94
    95
    96
    // The ConstantPoolCache is not a cache! It is the resolution table that the
    // interpreter uses to avoid going into the runtime and a way to access resolved
    // values.

    // A ConstantPoolCacheEntry describes an individual entry of the constant
    // pool cache. There's 2 principal kinds of entries: field entries for in-
    // stance & static field access, and method entries for invokes. Some of
    // the entry layout is shared and looks as follows:
    //
    // bit number |31 0|
    // bit length |-8--|-8--|---16----|
    // --------------------------------
    // _indices [ b2 | b1 | index ] index = constant_pool_index
    // _f1 [ entry specific ] metadata ptr (method or klass)
    // _f2 [ entry specific ] vtable or res_ref index, or vfinal method ptr
    // _flags [tos|0|F=1|0|0|0|f|v|0 |0000|field_index] (for field entries)
    // bit length [ 4 |1| 1 |1|1|1|1|1|1 |1 |-3-|----16-----]
    // _flags [tos|0|F=0|S|A|I|f|0|vf|indy_rf|000|00000|psize] (for method entries)
    // bit length [ 4 |1| 1 |1|1|1|1|1|1 |-4--|--8--|--8--]

    // --------------------------------
    //
    // with:
    // index = original constant pool index
    // b1 = bytecode 1
    // b2 = bytecode 2
    // psize = parameters size (method entries only)
    // field_index = index into field information in holder InstanceKlass
    // The index max is 0xffff (max number of fields in constant pool)
    // and is multiplied by (InstanceKlass::next_offset) when accessing.
    // tos = TosState
    // F = the entry is for a field (or F=0 for a method)
    // A = call site has an appendix argument (loaded from resolved references)
    // I = interface call is forced virtual (must use a vtable index or vfinal)
    // f = field or method is final
    // v = field is volatile
    // vf = virtual but final (method entries only: is_vfinal())
    // indy_rf = call site specifier method resolution failed
    //
    // The flags after TosState have the following interpretation:
    // bit 27: 0 for fields, 1 for methods
    // f flag true if field is marked final
    // v flag true if field is volatile (only for fields)
    // f2 flag true if f2 contains an oop (e.g., virtual final method)
    // fv flag true if invokeinterface used for method in class Object
    //
    // The flags 31, 30, 29, 28 together build a 4 bit number 0 to 16 with the
    // following mapping to the TosState states:
    //
    // btos: 0
    // ztos: 1
    // ctos: 2
    // stos: 3
    // itos: 4
    // ltos: 5
    // ftos: 6
    // dtos: 7
    // atos: 8
    // vtos: 9
    //
    // Entry specific: field entries:
    // _indices = get (b1 section) and put (b2 section) bytecodes, original constant pool index
    // _f1 = field holder (as a java.lang.Class, not a Klass*)
    // _f2 = field offset in bytes
    // _flags = field type information, original FieldInfo index in field holder
    // (field_index section)
    //
    // Entry specific: method entries:
    // _indices = invoke code for f1 (b1 section), invoke code for f2 (b2 section),
    // original constant pool index
    // _f1 = Method* for non-virtual calls, unused by virtual calls.
    // for interface calls, which are essentially virtual but need a klass,
    // contains Klass* for the corresponding interface.
    // for invokedynamic and invokehandle, f1 contains the adapter method which
    // manages the actual call. The appendix is stored in the ConstantPool
    // resolved_references array.
    // (upcoming metadata changes will move the appendix to a separate array)
    // _f2 = vtable/itable index (or final Method*) for virtual calls only,
    // unused by non-virtual. The is_vfinal flag indicates this is a
    // method pointer for a final method, not an index.
    // _flags = has local signature (MHs and indy),
    // virtual final bit (vfinal),
    // parameter size (psize section)
    //
    // Note: invokevirtual & invokespecial bytecodes can share the same constant
    // pool entry and thus the same constant pool cache entry. All invoke
    // bytecodes but invokevirtual use only _f1 and the corresponding b1
    // bytecode, while invokevirtual uses only _f2 and the corresponding
    // b2 bytecode. The value of _flags is shared for both types of entries.
    //
    // The fields are volatile so that they are stored in the order written in the
    // source code. The _indices field with the bytecode must be written last.

    class CallInfo;

    class ConstantPoolCacheEntry {
    + + +

    然后我们看看设置值的地方在这里

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
     //源码地址   src/hotspot/share/oops/cpCache.cpp

    // Note that concurrent update of both bytecodes can leave one of them
    // reset to zero. This is harmless; the interpreter will simply re-resolve
    // the damaged entry. More seriously, the memory synchronization is needed
    // to flush other fields (f1, f2) completely to memory before the bytecodes
    // are updated, lest other processors see a non-zero bytecode but zero f1/f2.
    void ConstantPoolCacheEntry::set_field(Bytecodes::Code get_code,
    Bytecodes::Code put_code,
    Klass* field_holder,
    int field_index,
    int field_offset,
    TosState field_type,
    bool is_final,
    bool is_volatile) {
    set_f1(field_holder);
    set_f2(field_offset);
    assert((field_index & field_index_mask) == field_index,
    "field index does not fit in low flag bits");
    set_field_flags(field_type,
    ((is_volatile ? 1 : 0) << is_volatile_shift) | <--- 在这里会设置volatile 的值
    ((is_final ? 1 : 0) << is_final_shift),
    field_index);
    set_bytecode_1(get_code);
    set_bytecode_2(put_code);
    NOT_PRODUCT(verify(tty));
    }
    + + +

    然后用gdb调试一下:

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    (gdb) bt
    #0 b ::set_field (this=0x7fffb4151b50, get_code=Bytecodes::_nop, put_code=Bytecodes::_nop, field_holder=0x800042fa0, field_index=2, field_offset=120, field_type=atos,
    is_final=true, is_volatile=false) at /home/dai/jdk/src/hotspot/share/oops/cpCache.cpp:139
    #1 0x00007ffff65d2561 in InterpreterRuntime::resolve_get_put (current=0x7ffff0028f70, bytecode=Bytecodes::_putstatic) at /home/dai/jdk/src/hotspot/share/interpreter/interpreterRuntime.cpp:708
    #2 0x00007ffff65d3e14 in InterpreterRuntime::resolve_from_cache (current=0x7ffff0028f70, bytecode=Bytecodes::_putstatic) at /home/dai/jdk/src/hotspot/share/interpreter/interpreterRuntime.cpp:959
    #3 0x00007fffe10203e3 in ?? ()
    #4 0x00007ffff7bcc0a0 in TemplateInterpreter::_active_table () from /home/dai/jdk/build/linux-x86_64-server-slowdebug/jdk/lib/server/libjvm.so
    #5 0x00007fffe1020362 in ?? ()
    #6 0x0000000000000000 in ?? ()
    + +

    我们看看ConstantPoolCacheEntry是从哪里取的

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    // Helper class to access current interpreter state
    class LastFrameAccessor : public StackObj {
    frame _last_frame;
    public:
    ...
    ConstantPoolCacheEntry* cache_entry_at(int i) const
    { return method()->constants()->cache()->entry_at(i); }
    ConstantPoolCacheEntry* cache_entry() const { return cache_entry_at(Bytes::get_native_u2(bcp() + 1)); }
    }
    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    38
    (gdb) p m->print()
    {method}
    - this oop: 0x00007fffd14108e8
    - method holder: 'java/lang/String'
    - constants: 0x00007fffd14013b8 constant pool [1396]/operands[28] {0x00007fffd14013b8} for 'java/lang/String' cache=0x00007fffd1544158
    - access: 0x8 static
    - name: '<clinit>'
    - signature: '()V'
    - max stack: 3
    - max locals: 0
    - size of params: 0
    - method size: 13
    - vtable index: -2
    - i2i entry: 0x00007fffe100dbe0
    - adapters: AHE@0x00007ffff009b550: 0x i2c: 0x00007fffe1115060 c2i: 0x00007fffe111510d c2iUV: 0x00007fffe11150e0 c2iNCI: 0x00007fffe111514a
    - compiled entry 0x00007fffe111510d
    - code size: 22
    - code start: 0x00007fffd14108c0
    - code end (excl): 0x00007fffd14108d6
    - checked ex length: 0
    - linenumber start: 0x00007fffd14108d6
    - localvar length: 0
    $7 = void
    (gdb) bt
    #0 frame::interpreter_frame_method (this=0x7ffff5aa43f8) at /home/ubuntu/daixiao/jdk/src/hotspot/share/runtime/frame.cpp:332
    #1 0x00007ffff672253c in LastFrameAccessor::method (this=0x7ffff5aa43f0) at /home/ubuntu/daixiao/jdk/src/hotspot/share/interpreter/interpreterRuntime.cpp:90
    #2 0x00007ffff671d2fb in InterpreterRuntime::resolve_get_put (current=0x7ffff0028940, bytecode=Bytecodes::_putstatic) at /home/ubuntu/daixiao/jdk/src/hotspot/share/interpreter/interpreterRuntime.cpp:650
    #3 0x00007ffff671ec81 in InterpreterRuntime::resolve_from_cache (current=0x7ffff0028940, bytecode=Bytecodes::_putstatic) at /home/ubuntu/daixiao/jdk/src/hotspot/share/interpreter/interpreterRuntime.cpp:948
    #4 0x00007fffe10203a3 in ?? ()
    #5 0x00007ffff7d4b280 in TemplateInterpreter::_active_table () from /home/ubuntu/daixiao/jdk/build/linux-x86_64-server-slowdebug/jdk/lib/server/libjvm.so
    #6 0x00007fffe1020322 in ?? ()
    #7 0x0000000000000001 in ?? ()
    #8 0x00007ffff5aa44d8 in ?? ()
    #9 0x00007fffd14108c1 in ?? ()
    #10 0x00007ffff5aa4528 in ?? ()
    #11 0x00007fffd1544158 in ?? ()
    #12 0x0000000000000000 in ?? ()
    (gdb)
    + + +

    解析cache

    +
    1
    2
    3
    InterpreterRuntime::resolve_from_cache

    LinkResolver::resolve_field_access
    + +

    相关类在

    +
    1
    2
    3
    //   src/hotspot/share/oops/cpCache.cpp
    // 头文件 在 src/hotspot/share/oops/cpCache.hpp
    ConstantPoolCacheEntry
    + +

    问了一下chatgpt

    +
    +

    在HotSpot JVM中,ConstantPoolCacheEntry对象是在解析类文件时动态创建的。具体来说,在解析类文件时,JVM会为类文件中的每个常量创建一个对应的ConstantPoolCacheEntry对象,并将其添加到类的常量池缓存中。

    +
    +
    +

    ConstantPoolCacheEntry对象的实例化主要是在ClassFileParser类的parse_constant_pool_entry()函数中完成的。在解析常量池时,parse_constant_pool_entry()函数会根据常量的类型和值动态创建一个对应的ConstantPoolCacheEntry对象,并将其添加到类的常量池缓存中。

    +
    +
    +

    需要注意的是,在解析过程中,JVM可能会使用占位符对象来表示无效的或未知的常量,而不是创建ConstantPoolCacheEntry对象。在这种情况下,JVM会将占位符对象添加到常量池缓存中,以便在解析和执行时可以快速跳过无效的或未知的常量。

    +
    +
    +

    总之,在HotSpot JVM中,ConstantPoolCacheEntry对象的实例化是在解析类文件时完成的,而具体实现是在ClassFileParser类的parse_constant_pool_entry()函数中完成的。

    +
    +

    查看堆栈

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    (gdb) p stream->_source
    $4 = 0x7ffff00bdd40 "/home/ubuntu/daixiao/jdk/build/linux-x86_64-server-slowdebug/jdk/modules/java.base"
    (gdb) bt
    #0 ClassFileParser::parse_constant_pool_entries (this=0x7ffff0028940, stream=0x0, cp=0x7ffff5fadff1 <Thread::as_Java_thread()+39>, length=32767, __the_thread__=0x7ffff0028940) at /home/ubuntu/daixiao/jdk/src/hotspot/share/classfile/classFileParser.cpp:150
    #1 0x00007ffff6378b5e in ClassFileParser::parse_constant_pool (this=0x7ffff5aa27e0, stream=0x7ffff00298e0, cp=0x7fffd169b1c8, length=31, __the_thread__=0x7ffff0028940) at /home/ubuntu/daixiao/jdk/src/hotspot/share/classfile/classFileParser.cpp:424
    #2 0x00007ffff638aacc in ClassFileParser::parse_stream (this=0x7ffff5aa27e0, stream=0x7ffff00298e0, __the_thread__=0x7ffff0028940) at /home/ubuntu/daixiao/jdk/src/hotspot/share/classfile/classFileParser.cpp:5720
    #3 0x00007ffff638a2b1 in ClassFileParser::ClassFileParser (this=0x7ffff5aa27e0, stream=0x7ffff00298e0, name=0x7fffb005b0a0, loader_data=0x7ffff0091890, cl_info=0x7ffff5aa2a10, pub_level=ClassFileParser::BROADCAST, __the_thread__=0x7ffff0028940)
    at /home/ubuntu/daixiao/jdk/src/hotspot/share/classfile/classFileParser.cpp:5590
    #4 0x00007ffff69d5f1f in KlassFactory::create_from_stream (stream=0x7ffff00298e0, name=0x7fffb005b0a0, loader_data=0x7ffff0091890, cl_info=..., __the_thread__=0x7ffff0028940) at /home/ubuntu/daixiao/jdk/src/hotspot/share/classfile/klassFactory.cpp:199
    #5 0x00007ffff639a9e5 in ClassLoader::load_class (name=0x7fffb005b0a0, search_append_only=false, __the_thread__=0x7ffff0028940) at /home/ubuntu/daixiao/jdk/src/hotspot/share/classfile/classLoader.cpp:1222
    #6 0x00007ffff6e97300 in SystemDictionary::load_instance_class_impl (class_name=0x7fffb005b0a0, class_loader=..., __the_thread__=0x7ffff0028940) at /home/ubuntu/daixiao/jdk/src/hotspot/share/classfile/systemDictionary.cpp:1290
    #7 0x00007ffff6e976d1 in SystemDictionary::load_instance_class (name_hash=1923324215, name=0x7fffb005b0a0, class_loader=..., __the_thread__=0x7ffff0028940) at /home/ubuntu/daixiao/jdk/src/hotspot/share/classfile/systemDictionary.cpp:1356
    #8 0x00007ffff6e95874 in SystemDictionary::resolve_instance_class_or_null (name=0x7fffb005b0a0, class_loader=..., protection_domain=..., __the_thread__=0x7ffff0028940) at /home/ubuntu/daixiao/jdk/src/hotspot/share/classfile/systemDictionary.cpp:724
    #9 0x00007ffff6e94481 in SystemDictionary::resolve_instance_class_or_null_helper (class_name=0x7fffb005b0a0, class_loader=..., protection_domain=..., __the_thread__=0x7ffff0028940) at /home/ubuntu/daixiao/jdk/src/hotspot/share/classfile/systemDictionary.cpp:295
    #10 0x00007ffff6e94330 in SystemDictionary::resolve_or_null (class_name=0x7fffb005b0a0, class_loader=..., protection_domain=..., __the_thread__=0x7ffff0028940) at /home/ubuntu/daixiao/jdk/src/hotspot/share/classfile/systemDictionary.cpp:278
    #11 0x00007ffff6e94273 in SystemDictionary::resolve_or_fail (class_name=0x7fffb005b0a0, class_loader=..., protection_domain=..., throw_error=true, __the_thread__=0x7ffff0028940) at /home/ubuntu/daixiao/jdk/src/hotspot/share/classfile/systemDictionary.cpp:264
    #12 0x00007ffff64314d1 in ConstantPool::klass_at_impl (this_cp=..., which=506, __the_thread__=0x7ffff0028940) at /home/ubuntu/daixiao/jdk/src/hotspot/share/oops/constantPool.cpp:512
    #13 0x00007ffff62aca88 in ConstantPool::klass_at (this=0x7fffd1692c38, which=506, __the_thread__=0x7ffff0028940) at /home/ubuntu/daixiao/jdk/src/hotspot/share/oops/constantPool.hpp:420
    #14 0x00007ffff671af3a in InterpreterRuntime::_new (current=0x7ffff0028940, pool=0x7fffd1692c38, index=506) at /home/ubuntu/daixiao/jdk/src/hotspot/share/interpreter/interpreterRuntime.cpp:219
    #15 0x00007fffe1023b92 in ?? ()
    #16 0x00007fffe1023b06 in ?? ()
    #17 0x00007ffff5aa3368 in ?? ()
    #18 0x00007fffd1699fe1 in ?? ()
    #19 0x00007ffff5aa33b8 in ?? ()
    #20 0x00007fffd169a0a0 in ?? ()
    #21 0x0000000000000000 in ?? ()
    + + +

    设置断点

    +
    1
    2
    3
    4
    5
    6
    (gdb) commands
    Type commands for breakpoint(s) 5, one per line.
    End with a line saying just "end".
    >p name->print()
    >c
    >end
    +

    打印

    +
    1
    2
    3
    Thread 2 "java" hit Breakpoint 5, KlassFactory::create_from_stream (stream=0x7ffff5aa2f20, name=0x7ffff044c4a0, loader_data=0x7ffff03ddf90, cl_info=..., __the_thread__=0x7ffff0028940) at /home/ubuntu/daixiao/jdk/src/hotspot/share/classfile/klassFactory.cpp:172
    172 assert(loader_data != NULL, "invariant");
    Symbol: 'com/HelloWorld' count 2$1035 = void
    + + +

    类加载

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    $801 = 0x7ffff59fd0b0 "file:/home/dai/jdk/build/linux-x86_64-server-slowdebug/jdk/bin/"
    (gdb) bt
    #0 KlassFactory::create_from_stream (stream=0x7ffff59fce00, name=0x7ffff057a9d0, loader_data=0x7ffff05185c0, cl_info=..., __the_thread__=0x7ffff0028f50)
    at /home/dai/jdk/src/hotspot/share/classfile/klassFactory.cpp:179
    #1 0x00007ffff6d4248e in SystemDictionary::resolve_class_from_stream (st=0x7ffff59fce00, class_name=0x7ffff057a9d0, class_loader=..., cl_info=..., __the_thread__=0x7ffff0028f50)
    at /home/dai/jdk/src/hotspot/share/classfile/systemDictionary.cpp:914
    #2 0x00007ffff6d42708 in SystemDictionary::resolve_from_stream (st=0x7ffff59fce00, class_name=0x7ffff057a9d0, class_loader=..., cl_info=..., __the_thread__=0x7ffff0028f50)
    at /home/dai/jdk/src/hotspot/share/classfile/systemDictionary.cpp:952
    #3 0x00007ffff66e1b51 in jvm_define_class_common (name=0x7ffff59fd030 "Hello", loader=0x7ffff59fd5a0, buf=0x7ffff04a4b90 "\312\376\272\276", len=409, pd=0x7ffff59fd578,
    source=0x7ffff59fd0b0 "file:/home/dai/jdk/build/linux-x86_64-server-slowdebug/jdk/bin/", __the_thread__=0x7ffff0028f50) at /home/dai/jdk/src/hotspot/share/prims/jvm.cpp:883
    #4 0x00007ffff66e2832 in JVM_DefineClassWithSource (env=0x7ffff0029230, name=0x7ffff59fd030 "Hello", loader=0x7ffff59fd5a0, buf=0x7ffff04a4b90 "\312\376\272\276", len=409, pd=0x7ffff59fd578,
    source=0x7ffff59fd0b0 "file:/home/dai/jdk/build/linux-x86_64-server-slowdebug/jdk/bin/") at /home/dai/jdk/src/hotspot/share/prims/jvm.cpp:1047
    #5 0x00007ffff58db1f5 in Java_java_lang_ClassLoader_defineClass1 (env=0x7ffff0029230, cls=0x7ffff59fd560, loader=0x7ffff59fd5a0, name=0x7ffff59fd598, data=0x7ffff59fd590, offset=0, length=409,
    pd=0x7ffff59fd578, source=0x7ffff59fd570) at /home/dai/jdk/src/java.base/share/native/libjava/ClassLoader.c:132
    #6 0x00007fffe100f6cb in ?? ()
    #7 0x0000000000000199 in ?? ()
    #8 0x00007ffff59fd578 in ?? ()
    #9 0x00007ffff59fd570 in ?? ()
    #10 0x0000555555581230 in ?? ()
    #11 0x00007ffff0028f50 in ?? ()
    #12 0x00007fffb445ba08 in ?? ()
    #13 0x00007fffe100f199 in ?? ()
    #14 0x00007ffff59fd508 in ?? ()
    #15 0x00007fffb4025170 in ?? ()
    #16 0x00007ffff59fd5a0 in ?? ()
    #17 0x00007fffb4147588 in ?? ()
    #18 0x0000000000000000 in ?? ()
    + + +

    fileinfo 如何初始化

    ConstantPoolCacheEntry 是由fieldDescriptor 的字段传进去的 , 那么我们看看fieldDescriptor是怎么初始化的

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
      fieldDescriptor info;
    ...
    void InterpreterRuntime::resolve_get_put(JavaThread* current, Bytecodes::Code bytecode) {
    ...

    LinkResolver::resolve_field_access(info, pool, last_frame.get_index_u2_cpcache(bytecode),
    m, bytecode, CHECK);
    ...


    }
    + + +

    我们最后找到初始化的地方

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    bool InstanceKlass::find_local_field(Symbol* name, Symbol* sig, fieldDescriptor* fd) const {
    for (JavaFieldStream fs(this); !fs.done(); fs.next()) {
    Symbol* f_name = fs.name();
    Symbol* f_sig = fs.signature();
    if (f_name == name && f_sig == sig) {
    fd->reinitialize(const_cast<InstanceKlass*>(this), fs.index()); // 初始化的地方
    return true;
    }
    }
    return false;
    }
    + +

    初始化就在这里: fieldDescriptor::reinitialize

    +
    1
    2
    3
    4
    5
    6
    7
    8
    void fieldDescriptor::reinitialize(InstanceKlass* ik, int index) {
    ...
    FieldInfo* f = ik->field(index);
    _access_flags = accessFlags_from(f->access_flags());
    guarantee(f->name_index() != 0 && f->signature_index() != 0, "bad constant pool index for fieldDescriptor");
    _index = index;
    ...
    }
    + + +

    jvm 堆栈结构

    1
    2
    3
    4
    5
    6
    inline frame ContinuationEntry::to_frame() const {
    static CodeBlob* cb = CodeCache::find_blob_fast(entry_pc());
    assert(cb != nullptr, "");
    assert(cb->as_compiled_method()->method()->is_continuation_enter_intrinsic(), "");
    return frame(entry_sp(), entry_sp(), entry_fp(), entry_pc(), cb);
    }
    + + + +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    (gdb) bt
    #0 frame::frame (this=0x7ffff59fe348) at /home/dai/jdk/src/hotspot/cpu/x86/frame_x86.inline.hpp:37
    #1 0x00007ffff65d76f1 in LastFrameAccessor::LastFrameAccessor (this=0x7ffff59fe340, current=0x7ffff0028f70)
    at /home/dai/jdk/src/hotspot/share/interpreter/interpreterRuntime.cpp:84
    #2 0x00007ffff65d212e in InterpreterRuntime::resolve_get_put (current=0x7ffff0028f70,
    bytecode=Bytecodes::_putstatic) at /home/dai/jdk/src/hotspot/share/interpreter/interpreterRuntime.cpp:652
    #3 0x00007ffff65d3e14 in InterpreterRuntime::resolve_from_cache (current=0x7ffff0028f70,
    bytecode=Bytecodes::_putstatic) at /home/dai/jdk/src/hotspot/share/interpreter/interpreterRuntime.cpp:959
    #4 0x00007fffe10203e3 in ?? ()
    #5 0x00007ffff7bca0a0 in TemplateInterpreter::_active_table ()
    from /home/dai/jdk/build/linux-x86_64-server-slowdebug/jdk/lib/server/libjvm.so
    #6 0x00007fffe1020362 in ?? ()
    #7 0x0000000000000001 in ?? ()
    #8 0x00007ffff59fe438 in ?? ()
    #9 0x00007fffb4010821 in ?? ()
    #10 0x00007ffff59fe488 in ?? ()
    #11 0x00007fffb4149b38 in ?? ()
    #12 0x0000000000000000 in ?? ()
    + +

    后面我们能看到初始化是在这里:

    +
    1
    2
    3
    4
    5
    frame JavaThread::pd_last_frame() {
    assert(has_last_Java_frame(), "must have last_Java_sp() when suspended");
    vmassert(_anchor.last_Java_pc() != NULL, "not walkable");
    return frame(_anchor.last_Java_sp(), _anchor.last_Java_fp(), _anchor.last_Java_pc());
    }
    + +

    frame 的初始化

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    38
    39
    40
    41
    42
    43
    44
    45
    46
    47
    48
    49
    50
    51
    52
    (gdb) bt
    #0 JavaThread::pd_last_frame (this=0x7ffff0028f70) at /home/dai/jdk/src/hotspot/os_cpu/linux_x86/thread_linux_x86.cpp:30
    #1 0x00007ffff612888b in JavaThread::last_frame (this=0x7ffff0028f70) at /home/dai/jdk/src/hotspot/share/runtime/thread.hpp:1407
    #2 0x00007ffff65d7757 in LastFrameAccessor::LastFrameAccessor (this=0x7ffff59fe340, current=0x7ffff0028f70) at /home/dai/jdk/src/hotspot/share/interpreter/interpreterRuntime.cpp:86
    #3 0x00007ffff65d212e in InterpreterRuntime::resolve_get_put (current=0x7ffff0028f70, bytecode=Bytecodes::_putstatic) at /home/dai/jdk/src/hotspot/share/interpreter/interpreterRuntime.cpp:652
    #4 0x00007ffff65d3e14 in InterpreterRuntime::resolve_from_cache (current=0x7ffff0028f70, bytecode=Bytecodes::_putstatic) at /home/dai/jdk/src/hotspot/share/interpreter/interpreterRuntime.cpp:959
    #5 0x00007fffe10203e3 in ?? ()
    #6 0x00007ffff7bca0a0 in TemplateInterpreter::_active_table () from /home/dai/jdk/build/linux-x86_64-server-slowdebug/jdk/lib/server/libjvm.so
    #7 0x00007fffe1020362 in ?? ()
    #8 0x0000000000000001 in ?? ()
    #9 0x00007ffff59fe438 in ?? ()
    #10 0x00007fffb4010821 in ?? ()
    #11 0x00007ffff59fe488 in ?? ()
    #12 0x00007fffb4149b38 in ?? ()
    #13 0x0000000000000000 in ?? ()
    (gdb) list
    35 // For Forte Analyzer AsyncGetCallTrace profiling support - thread is
    36 // currently interrupted by SIGPROF
    37 bool JavaThread::pd_get_top_frame_for_signal_handler(frame* fr_addr,
    38 void* ucontext, bool isInJava) {
    39
    40 assert(Thread::current() == this, "caller must be current thread");
    41 return pd_get_top_frame(fr_addr, ucontext, isInJava);
    42 }
    43
    44 bool JavaThread::pd_get_top_frame_for_profiling(frame* fr_addr, void* ucontext, bool isInJava) {
    (gdb) info registers
    rax 0x7ffff59fe220 140737314284064
    rbx 0x7ffff59fe340 140737314284352
    rcx 0x7ffff00153f0 140737220006896
    rdx 0x7ffff0028f70 140737220087664
    rsi 0x7ffff0028f70 140737220087664
    rdi 0x7ffff59fe220 140737314284064
    rbp 0x7ffff59fe1f0 0x7ffff59fe1f0
    rsp 0x7ffff59fe1d0 0x7ffff59fe1d0
    r8 0x8 8
    r9 0x0 0
    r10 0x7ffff7bca0a0 140737349722272
    r11 0x7ffff0000090 140737219920016
    r12 0x0 0
    r13 0x7fffb4010821 140736213354529
    r14 0x7ffff59fe488 140737314284680
    r15 0x7ffff0028f70 140737220087664
    rip 0x7ffff6da2ea3 0x7ffff6da2ea3 <JavaThread::pd_last_frame()+23>
    eflags 0x202 [ IF ]
    cs 0x33 51
    ss 0x2b 43
    ds 0x0 0
    es 0x0 0
    fs 0x0 0
    gs 0x0 0

    + + +

    然后我们看到在

    +
    1
    JavaFrameAnchor _anchor;    
    + +
    1
    2
    3
    4
    5
    6
    7
    8
    9

    class JavaFrameAnchor {
    ...

    private:
    ...
    intptr_t* volatile _last_Java_sp; //stack_pointer
    ...
    }
    + +

    JavaFrameAnchor 初始化

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    JavaFrameAnchor::clear (this=0x7ffff0029230) at /home/dai/jdk/src/hotspot/cpu/x86/javaFrameAnchor_x86.hpp:44
    44 _last_Java_fp = NULL;
    (gdb) bt
    #0 JavaFrameAnchor::clear (this=0x7ffff0029230) at /home/dai/jdk/src/hotspot/cpu/x86/javaFrameAnchor_x86.hpp:44
    #1 0x00007ffff65df910 in JavaFrameAnchor::JavaFrameAnchor (this=0x7ffff0029230) at /home/dai/jdk/src/hotspot/share/runtime/javaFrameAnchor.hpp:88
    #2 0x00007ffff6d8730c in JavaThread::JavaThread (this=0x7ffff0028f70) at /home/dai/jdk/src/hotspot/share/runtime/thread.cpp:1076
    #3 0x00007ffff6d8d0ee in Threads::create_vm (args=0x7ffff59fed50, canTryAgain=0x7ffff59fec5b) at /home/dai/jdk/src/hotspot/share/runtime/thread.cpp:2817
    #4 0x00007ffff66b243b in JNI_CreateJavaVM_inner (vm=0x7ffff59feda8, penv=0x7ffff59fedb0, args=0x7ffff59fed50) at /home/dai/jdk/src/hotspot/share/prims/jni.cpp:3613
    #5 0x00007ffff66b2787 in JNI_CreateJavaVM (vm=0x7ffff59feda8, penv=0x7ffff59fedb0, args=0x7ffff59fed50) at /home/dai/jdk/src/hotspot/share/prims/jni.cpp:3701
    #6 0x00007ffff7faca6a in InitializeJVM (pvm=0x7ffff59feda8, penv=0x7ffff59fedb0, ifn=0x7ffff59fee00) at /home/dai/jdk/src/java.base/share/native/libjli/java.c:1459
    #7 0x00007ffff7fa95ec in JavaMain (_args=0x7fffffffa9a0) at /home/dai/jdk/src/java.base/share/native/libjli/java.c:411
    #8 0x00007ffff7fb05ec in ThreadJavaMain (args=0x7fffffffa9a0) at /home/dai/jdk/src/java.base/unix/native/libjli/java_md.c:651
    #9 0x00007ffff7c94b43 in start_thread (arg=<optimized out>) at ./nptl/pthread_create.c:442
    #10 0x00007ffff7d26a00 in clone3 () at ../sysdeps/unix/sysv/linux/x86_64/clone3.S:81

    +

    在这里会设置last_java_sp

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    38
    39
    40
    41
    42
    43
    44
    45
    46
    47
    48
    49
    50
    51
    52
    53
    54
    55
    56
    57
    58
    59
    60
    61
    62
    63
    64
    65
    66
    67
    68
    69
    70
    71
    72
    73
    74
    75
    76
    77
    78
    79
    80
    81
    82
    83
    84
    85
    86
    87
    88
    89
    90
    91
    92
    93
    94
    95
    96
    void MacroAssembler::call_VM_base(Register oop_result,
    Register java_thread,
    Register last_java_sp,
    address entry_point,
    int number_of_arguments,
    bool check_exceptions) {
    // determine java_thread register
    if (!java_thread->is_valid()) {
    #ifdef _LP64
    java_thread = r15_thread;
    #else
    java_thread = rdi;
    get_thread(java_thread);
    #endif // LP64
    }
    // determine last_java_sp register
    if (!last_java_sp->is_valid()) {
    last_java_sp = rsp;
    }
    // debugging support
    assert(number_of_arguments >= 0 , "cannot have negative number of arguments");
    LP64_ONLY(assert(java_thread == r15_thread, "unexpected register"));
    #ifdef ASSERT
    // TraceBytecodes does not use r12 but saves it over the call, so don't verify
    // r12 is the heapbase.
    LP64_ONLY(if (UseCompressedOops && !TraceBytecodes) verify_heapbase("call_VM_base: heap base corrupted?");)
    #endif // ASSERT

    assert(java_thread != oop_result , "cannot use the same register for java_thread & oop_result");
    assert(java_thread != last_java_sp, "cannot use the same register for java_thread & last_java_sp");

    // push java thread (becomes first argument of C function)

    NOT_LP64(push(java_thread); number_of_arguments++);
    LP64_ONLY(mov(c_rarg0, r15_thread));

    // set last Java frame before call
    assert(last_java_sp != rbp, "can't use ebp/rbp");

    // Only interpreter should have to set fp
    set_last_Java_frame(java_thread, last_java_sp, rbp, NULL);

    // do the call, remove parameters
    MacroAssembler::call_VM_leaf_base(entry_point, number_of_arguments);

    // restore the thread (cannot use the pushed argument since arguments
    // may be overwritten by C code generated by an optimizing compiler);
    // however can use the register value directly if it is callee saved.
    if (LP64_ONLY(true ||) java_thread == rdi || java_thread == rsi) {
    // rdi & rsi (also r15) are callee saved -> nothing to do
    #ifdef ASSERT
    guarantee(java_thread != rax, "change this code");
    push(rax);
    { Label L;
    get_thread(rax);
    cmpptr(java_thread, rax);
    jcc(Assembler::equal, L);
    STOP("MacroAssembler::call_VM_base: rdi not callee saved?");
    bind(L);
    }
    pop(rax);
    #endif
    } else {
    get_thread(java_thread);
    }
    // reset last Java frame
    // Only interpreter should have to clear fp
    reset_last_Java_frame(java_thread, true);

    // C++ interp handles this in the interpreter
    check_and_handle_popframe(java_thread);
    check_and_handle_earlyret(java_thread);

    if (check_exceptions) {
    // check for pending exceptions (java_thread is set upon return)
    cmpptr(Address(java_thread, Thread::pending_exception_offset()), (int32_t) NULL_WORD);
    #ifndef _LP64
    jump_cc(Assembler::notEqual,
    RuntimeAddress(StubRoutines::forward_exception_entry()));
    #else
    // This used to conditionally jump to forward_exception however it is
    // possible if we relocate that the branch will not reach. So we must jump
    // around so we can always reach

    Label ok;
    jcc(Assembler::equal, ok);
    jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
    bind(ok);
    #endif // LP64
    }

    // get oop result if there is one and reset the value in the thread
    if (oop_result->is_valid()) {
    get_vm_result(oop_result, java_thread);
    }
    }
    +

    volatile 标志位

    1
    2
    ///home/dai/jdk/src/hotspot/share/utilities/accessFlags.hpp
    bool is_volatile () const { return (_flags & JVM_ACC_VOLATILE ) != 0; }
    + +

    相关阅读

    + + +
    + + + + +
    +
    +
    +
    + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    背景

    了解java整除除法的规则

    +

    jls 描述

    1
    2
    3
    4
    5
    Integer division rounds toward 0. That is, the quotient produced for operands n
    and d that are integers after binary numeric promotion (§5.6) is an integer value q
    whose magnitude is as large as possible while satisfying |d ⋅ q| ≤ |n|. Moreover, q
    is positive when |n| ≥ |d| and n and d have the same sign, but q is negative when
    |n| ≥ |d| and n and d have opposite signs.
    + +

    也就是

    +
    1
    int res = 3 / 5 = 0 ;
    + +

    最后使用的bytecode 是idiv

    +

    我们看看idiv 这个bytecode 是怎么实现的吧:

    +

    jvm 实现

    1
    2
    3
    4
    5
    6
    7
    8
    9
    void Assembler::idivl(Register src) {
    int encode = prefix_and_encode(src->encoding());
    emit_int16((unsigned char)0xF7, (0xF8 | encode));
    }

    void Assembler::divl(Register src) { // Unsigned
    int encode = prefix_and_encode(src->encoding());
    emit_int16((unsigned char)0xF7, (0xF0 | encode));
    }
    + +

    查看intel的文档:
    intel

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    2.1.5 Addressing-Mode Encoding of ModR/M and SIB Bytes
    The values and corresponding addressing forms of the ModR/M and SIB bytes are shown in Table 2-1 through Table
    2-3: 16-bit addressing forms specified by the ModR/M byte are in Table 2-1 and 32-bit addressing forms are in
    Table 2-2. Table 2-3 shows 32-bit addressing forms specified by the SIB byte. In cases where the reg/opcode field
    in the ModR/M byte represents an extended opcode, valid encodings are shown in Appendix B.
    In Table 2-1 and Table 2-2, the Effective Address column lists 32 effective addresses that can be assigned to the
    first operand of an instruction by using the Mod and R/M fields of the ModR/M byte. The first 24 options provide
    ways of specifying a memory location; the last eight (Mod = 11B) provide ways of specifying general-purpose, MMX
    technology and XMM registers.
    The Mod and R/M columns in Table 2-1 and Table 2-2 give the binary encodings of the Mod and R/M fields required
    to obtain the effective address listed in the first column. For example: see the row indicated by Mod = 11B, R/M =
    000B. The row identifies the general-purpose registers EAX, AX or AL; MMX technology register MM0; or XMM
    register XMM0. The register used is determined by the opcode byte and the operand-size attribute.
    Now look at the seventh row in either table (labeled “REG =”). This row specifies the use of the 3-bit Reg/Opcode
    field when the field is used to give the location of a second operand. The second operand must be a generalpurpose, MMX technology, or XMM register. Rows one through five list the registers that may correspond to the
    value in the table. Again, the register used is determined by the opcode byte along with the operand-size attribute.
    If the instruction does not require a second operand, then the Reg/Opcode field may be used as an opcode extension. This use is represented by the sixth row in the tables (labeled “/digit (Opcode)”). Note that values in row six
    are represented in decimal form.
    The body of Table 2-1 and Table 2-2 (under the label “Value of ModR/M Byte (in Hexadecimal)”) contains a 32 by
    8 array that presents all of 256 values of the ModR/M byte (in hexadecimal). Bits 3, 4 and 5 are specified by the
    column of the table in which a byte resides. The row specifies bits 0, 1 and 2; and bits 6 and 7. The figure below
    demonstrates interpretation of one table value.
    +

    register
    寄存器会从0编码到16,编码顺序如上述所示

    +

    例子

    1
    2
    3
    4
    5
    6
    int main(void){                        
    int num0 = 5;
    int num1 = 2;
    int num = num0 / num1 ;
    return num;
    }
    + +
    1
    2
    3
    4
    5
    6
    ## 编译
    gcc -O0 test.c -o test

    ## 用objdump 列出汇编内容
    objdump -d test | grep -A20 '<main>:'

    +

    结果为

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    0000000000001129 <main>:
    1129: f3 0f 1e fa endbr64
    112d: 55 push %rbp
    112e: 48 89 e5 mov %rsp,%rbp
    1131: c7 45 f4 05 00 00 00 movl $0x5,-0xc(%rbp)
    1138: c7 45 f8 02 00 00 00 movl $0x2,-0x8(%rbp)
    113f: 8b 45 f4 mov -0xc(%rbp),%eax
    1142: 99 cltd
    1143: f7 7d f8 idivl -0x8(%rbp)
    1146: 89 45 fc mov %eax,-0x4(%rbp)
    1149: 8b 45 fc mov -0x4(%rbp),%eax
    114c: 5d pop %rbp
    114d: c3 retq
    114e: 66 90 xchg %ax,%ax
    + +

    相关阅读

    + + +
    + + + + +
    +
    +
    +
    + + + + + + + + + + +
    + + + + +
    + + + + + + + + +
    +
    + +
    + +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/page/6/index.html b/page/6/index.html new file mode 100644 index 0000000000..3eed193c89 --- /dev/null +++ b/page/6/index.html @@ -0,0 +1,1384 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + dinosaur + + + + + + + + + + + + +
    +
    + +
    +
    + + + + + +
    + + + + + + + + +
    + +
    + +
    +
    + + +
    + + 0% +
    + + +
    +
    +
    + + +
    + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    背景

    了解java的spring boot 的rabbitmq的启动流程

    +

    堆栈

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    38
    39
    40
    41
    42
    43
    44
    45
    46
    47
    48
    49
    50
    51
    52
    53
    54
    55
    56
    57
    58
    59
    60
    61
    62
    63
    64
    65
    66
    67
    68
    69
    70
    71
    72
    73
    74
    75
    76
    77
    78
    79
    80
    81
    82
    83
    declareQueues:700, RabbitAdmin (org.springframework.amqp.rabbit.core)
    lambda$initialize$12:606, RabbitAdmin (org.springframework.amqp.rabbit.core)
    doInRabbit:-1, 1826902085 (org.springframework.amqp.rabbit.core.RabbitAdmin$$Lambda$965)
    invokeAction:2151, RabbitTemplate (org.springframework.amqp.rabbit.core)
    doExecute:2110, RabbitTemplate (org.springframework.amqp.rabbit.core)
    execute:2062, RabbitTemplate (org.springframework.amqp.rabbit.core)
    execute:2042, RabbitTemplate (org.springframework.amqp.rabbit.core)
    initialize:604, RabbitAdmin (org.springframework.amqp.rabbit.core)
    lambda$null$10:532, RabbitAdmin (org.springframework.amqp.rabbit.core)
    doWithRetry:-1, 999782961 (org.springframework.amqp.rabbit.core.RabbitAdmin$$Lambda$957)
    doExecute:287, RetryTemplate (org.springframework.retry.support)
    execute:164, RetryTemplate (org.springframework.retry.support)
    lambda$afterPropertiesSet$11:531, RabbitAdmin (org.springframework.amqp.rabbit.core)
    onCreate:-1, 1185831500 (org.springframework.amqp.rabbit.core.RabbitAdmin$$Lambda$950)
    lambda$onCreate$0:38, CompositeConnectionListener (org.springframework.amqp.rabbit.connection)
    accept:-1, 1588281004 (org.springframework.amqp.rabbit.connection.CompositeConnectionListener$$Lambda$956)
    forEach:803, CopyOnWriteArrayList (java.util.concurrent)
    onCreate:38, CompositeConnectionListener (org.springframework.amqp.rabbit.connection)
    createConnection:757, CachingConnectionFactory (org.springframework.amqp.rabbit.connection)
    createConnection:216, ConnectionFactoryUtils (org.springframework.amqp.rabbit.connection)
    doExecute:2089, RabbitTemplate (org.springframework.amqp.rabbit.core)
    execute:2062, RabbitTemplate (org.springframework.amqp.rabbit.core)
    execute:2042, RabbitTemplate (org.springframework.amqp.rabbit.core)
    declareExchange:221, RabbitAdmin (org.springframework.amqp.rabbit.core)
    cdpOrderTopicExchange:27, RabbitConfig (com.patpat.mms.mdp.base.core.rest.config)
    CGLIB$cdpOrderTopicExchange$7:-1, RabbitConfig$$EnhancerBySpringCGLIB$$65dbc353 (com.patpat.mms.mdp.base.core.rest.config)
    invoke:-1, RabbitConfig$$EnhancerBySpringCGLIB$$65dbc353$$FastClassBySpringCGLIB$$bdc910b3 (com.patpat.mms.mdp.base.core.rest.config)
    invokeSuper:244, MethodProxy (org.springframework.cglib.proxy)
    intercept:331, ConfigurationClassEnhancer$BeanMethodInterceptor (org.springframework.context.annotation)
    cdpOrderTopicExchange:-1, RabbitConfig$$EnhancerBySpringCGLIB$$65dbc353 (com.patpat.mms.mdp.base.core.rest.config)
    invoke0:-1, NativeMethodAccessorImpl (jdk.internal.reflect)
    invoke:62, NativeMethodAccessorImpl (jdk.internal.reflect)
    invoke:43, DelegatingMethodAccessorImpl (jdk.internal.reflect)
    invoke:566, Method (java.lang.reflect)
    instantiate:154, SimpleInstantiationStrategy (org.springframework.beans.factory.support)
    instantiate:652, ConstructorResolver (org.springframework.beans.factory.support)
    instantiateUsingFactoryMethod:637, ConstructorResolver (org.springframework.beans.factory.support)
    instantiateUsingFactoryMethod:1341, AbstractAutowireCapableBeanFactory (org.springframework.beans.factory.support)
    createBeanInstance:1181, AbstractAutowireCapableBeanFactory (org.springframework.beans.factory.support)
    doCreateBean:556, AbstractAutowireCapableBeanFactory (org.springframework.beans.factory.support)
    createBean:516, AbstractAutowireCapableBeanFactory (org.springframework.beans.factory.support)
    lambda$doGetBean$0:324, AbstractBeanFactory (org.springframework.beans.factory.support)
    getObject:-1, 1735872041 (org.springframework.beans.factory.support.AbstractBeanFactory$$Lambda$295)
    getSingleton:234, DefaultSingletonBeanRegistry (org.springframework.beans.factory.support)
    doGetBean:322, AbstractBeanFactory (org.springframework.beans.factory.support)
    getBean:202, AbstractBeanFactory (org.springframework.beans.factory.support)
    preInstantiateSingletons:897, DefaultListableBeanFactory (org.springframework.beans.factory.support)
    finishBeanFactoryInitialization:879, AbstractApplicationContext (org.springframework.context.support)
    refresh:551, AbstractApplicationContext (org.springframework.context.support)
    refresh:755, SpringApplication (org.springframework.boot)
    refresh:747, SpringApplication (org.springframework.boot)
    refreshContext:402, SpringApplication (org.springframework.boot)
    run:312, SpringApplication (org.springframework.boot)
    loadContext:120, SpringBootContextLoader (org.springframework.boot.test.context)
    loadContextInternal:99, DefaultCacheAwareContextLoaderDelegate (org.springframework.test.context.cache)
    loadContext:124, DefaultCacheAwareContextLoaderDelegate (org.springframework.test.context.cache)
    getApplicationContext:123, DefaultTestContext (org.springframework.test.context.support)
    setUpRequestContextIfNecessary:190, ServletTestExecutionListener (org.springframework.test.context.web)
    prepareTestInstance:132, ServletTestExecutionListener (org.springframework.test.context.web)
    prepareTestInstance:244, TestContextManager (org.springframework.test.context)
    createTest:227, SpringJUnit4ClassRunner (org.springframework.test.context.junit4)
    runReflectiveCall:289, SpringJUnit4ClassRunner$1 (org.springframework.test.context.junit4)
    run:12, ReflectiveCallable (org.junit.internal.runners.model)
    methodBlock:291, SpringJUnit4ClassRunner (org.springframework.test.context.junit4)
    runChild:246, SpringJUnit4ClassRunner (org.springframework.test.context.junit4)
    runChild:97, SpringJUnit4ClassRunner (org.springframework.test.context.junit4)
    run:331, ParentRunner$4 (org.junit.runners)
    schedule:79, ParentRunner$1 (org.junit.runners)
    runChildren:329, ParentRunner (org.junit.runners)
    access$100:66, ParentRunner (org.junit.runners)
    evaluate:293, ParentRunner$2 (org.junit.runners)
    evaluate:61, RunBeforeTestClassCallbacks (org.springframework.test.context.junit4.statements)
    evaluate:70, RunAfterTestClassCallbacks (org.springframework.test.context.junit4.statements)
    evaluate:306, ParentRunner$3 (org.junit.runners)
    run:413, ParentRunner (org.junit.runners)
    run:190, SpringJUnit4ClassRunner (org.springframework.test.context.junit4)
    run:137, JUnitCore (org.junit.runner)
    startRunnerWithArgs:69, JUnit4IdeaTestRunner (com.intellij.junit4)
    execute:38, IdeaTestRunner$Repeater$1 (com.intellij.rt.junit)
    repeat:11, TestsRepeater (com.intellij.rt.execution.junit)
    startRunnerWithArgs:35, IdeaTestRunner$Repeater (com.intellij.rt.junit)
    prepareStreamsAndStart:235, JUnitStarter (com.intellij.rt.junit)
    main:54, JUnitStarter (com.intellij.rt.junit)
    + + +
    + + + + +
    +
    +
    +
    + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    背景

    了解kafka使用

    +

    编译

    1
    2
    3
    4
    5
    6
    7
    ## 拉代码
    git clone https://github.com/apache/kafka.git
    ## 切换目录
    cd kafka/
    ## 编译 打包
    ./gradlew jar

    + +

    编译好之后需要启动zookeeper 和kafka

    +
    1
    2
    3
    4
    5
    ## 在一个窗口启动zookeeper
    bin/zookeeper-server-start.sh config/zookeeper.properties

    ## 在另外一个窗口启动kafka
    bin/zookeeper-server-start.sh config/zookeeper.properties
    + + + +
    + + + + +
    +
    +
    +
    + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    背景

    github 地址
    Arroyo 是分布式流式引擎,使用Rust编写.因为要试用,所以写了这篇

    +

    编译

      +
    • 安装rust

      +
      1
      curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh
    • +
    • 拉取代码

      +
      1
      git clone https://github.com/ArroyoSystems/arroyo.git
      +
    • +
    • 安装postgresql

      +
      1
      sudo apt install postgresql
    • +
    +

    配置路径:

    +
    1
    /etc/postgresql/14/main/pg_hba.conf
    + +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    ## 链接postgresql
    sudo -u postgres psql
    ## 创建database arroyo
    create database arroyo;
    ## 执行sql , 要执行这个路径的
    source arroyo/arroyo-api/migrations/V1__initial.sql

    ##postgresql 创建用户
    create user arroyo with password 'arroyo';
    + +
    1
    2
    ## 重启postgresql
    sudo systemctl restart postgresql.service
    + + +
      +
    • 编译
      1
      2
      3
      4
      5
      6
      ## 切换目录
      cd arroyo
      ## 编译
      cargo build
      ## 如果编译不了,用
      cargo build --no-default-features
      +编译结果:
      1
      2
      3
      4
      5
      6
      7
      Compiling datafusion-optimizer v20.0.0
      Compiling datafusion v20.0.0
      Compiling arroyo-sql v0.1.0 (/home/dai/rust/arroyo/arroyo-sql)
      Compiling arroyo-sql-macro v0.1.0 (/home/dai/rust/arroyo/arroyo-sql-macro)
      Compiling arroyo-sql-testing v0.1.0 (/home/dai/rust/arroyo/arroyo-sql-testing)
      Finished dev [unoptimized + debuginfo] target(s) in 2m 13s

    • +
    +

    错误和处理

    发现没有ssl的库,我的系统是Ubuntu,所以执行sudo apt install libssl-dev , 如果编译不了用cargo build --no-default-features 试试

    +

    相关阅读

    + + +
    + + + + +
    +
    +
    +
    + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    背景

    lucene 使用了direct memory,这类内存是非jvm直接管理的内存

    +

    DirectByteBufferR 是read only 版本的DirectByteBuffer,所以DirectByteBufferDirectByteBufferR是差不多的类

    +

    lucene的mmap

    lucene 会使用DirectByteBufferR,这里申请的内存地址是140063879776283

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    38
    39
    40
    41
    42
    43
    44
    45
    46
    47
    48
    main[1] dump receiver
    receiver = {
    $assertionsDisabled: true
    java.nio.DirectByteBuffer.ARRAY_BASE_OFFSET: 16
    java.nio.DirectByteBuffer.UNALIGNED: true
    java.nio.DirectByteBuffer.att: instance of java.nio.DirectByteBufferR(id=1500)
    java.nio.DirectByteBuffer.cleaner: null
    java.nio.DirectByteBuffer.$assertionsDisabled: true
    java.nio.MappedByteBuffer.fd: instance of java.io.FileDescriptor(id=1501)
    java.nio.MappedByteBuffer.isSync: false
    java.nio.MappedByteBuffer.SCOPED_MEMORY_ACCESS: instance of jdk.internal.misc.ScopedMemoryAccess(id=1502)
    java.nio.ByteBuffer.ARRAY_BASE_OFFSET: 16
    java.nio.ByteBuffer.hb: null
    java.nio.ByteBuffer.offset: 0
    java.nio.ByteBuffer.isReadOnly: true
    java.nio.ByteBuffer.bigEndian: false
    java.nio.ByteBuffer.nativeByteOrder: true
    java.nio.ByteBuffer.$assertionsDisabled: true
    java.nio.Buffer.UNSAFE: instance of jdk.internal.misc.Unsafe(id=1503)
    java.nio.Buffer.SCOPED_MEMORY_ACCESS: instance of jdk.internal.misc.ScopedMemoryAccess(id=1502)
    java.nio.Buffer.SPLITERATOR_CHARACTERISTICS: 16464
    java.nio.Buffer.mark: -1
    java.nio.Buffer.position: 0
    java.nio.Buffer.limit: 7
    java.nio.Buffer.capacity: 7
    java.nio.Buffer.address: 140063879776283
    java.nio.Buffer.segment: null
    java.nio.Buffer.$assertionsDisabled: true
    }
    main[1] where
    [1] org.apache.lucene.store.ByteBufferGuard.getByte (ByteBufferGuard.java:118)
    [2] org.apache.lucene.store.ByteBufferIndexInput$SingleBufferImpl.readByte (ByteBufferIndexInput.java:593)
    [3] org.apache.lucene.codecs.lucene90.Lucene90NormsProducer$3.longValue (Lucene90NormsProducer.java:388)
    [4] org.apache.lucene.search.LeafSimScorer.getNormValue (LeafSimScorer.java:47)
    [5] org.apache.lucene.search.LeafSimScorer.score (LeafSimScorer.java:60)
    [6] org.apache.lucene.search.TermScorer.score (TermScorer.java:75)
    [7] org.apache.lucene.search.TopScoreDocCollector$SimpleTopScoreDocCollector$1.collect (TopScoreDocCollector.java:73)
    [8] org.apache.lucene.search.Weight$DefaultBulkScorer.scoreAll (Weight.java:305)
    [9] org.apache.lucene.search.Weight$DefaultBulkScorer.score (Weight.java:247)
    [10] org.apache.lucene.search.BulkScorer.score (BulkScorer.java:38)
    [11] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:770)
    [12] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:693)
    [13] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:687)
    [14] org.apache.lucene.search.IndexSearcher.searchAfter (IndexSearcher.java:532)
    [15] org.apache.lucene.search.IndexSearcher.search (IndexSearcher.java:542)
    [16] org.apache.lucene.demo.SearchFiles.doPagingSearch (SearchFiles.java:180)
    [17] org.apache.lucene.demo.SearchFiles.main (SearchFiles.java:150)

    + + +

    查看_7.cfs 文件:

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    38
    39
    40
    41
    42
    43
    44
    45
    46
    47
    48
    49
    50
    51
    52
    53
    54
    55
    56
    57
    58
    59
    60
    61
    62
    63
    64
    65
    66
    67
    68
    69
    70
    71
    72
    73
    74
    75
    76
    77
    78
    79
    80
    81
    82
    83
    84
    85
    86
    87
    88
    89
    90
    91
    92
    93
    94
    95
    96
    97
    98
    99
    100
    101
    102
    103
    104
    105
    106
    107
    108
    109
    110
    111
    112
    113
    114
    115
    116
    117
    118
    119
    120
    121
    122
    123
    124
    125
    126
    127
    128
    129
    130
    131
    132
    133
    134
    135
    136
    137
    138
    139
    140
    141
    142
    143
    144
    145
    146
    147
    148
    149
    150
    151
    152
    153
    154
    155
    156
    157
    158
    159
    160
    161
    162
    163
    164
    165
    166
    167
    168
    169
    170
    171
    172
    173
    174
    175
    176
    177
    178
    179
    180
    181
    182
    183
    184
    185
    186
    187
    188
    189
    190
    191
    192
    193
    194
    195
    196
    197
    198
    199
    200
    201
    202
    203
    204
    205
    206
    207
    208
    209
    210
    211
    212
    213
    214
    215
    216
    217
    218
    219
    220
    221
    222
    223
    224
    225
    226
    227
    228
    229
    230
    231
    232
    233
    234
    235
    236
    237
    238
    239
    240
    241
    242
    243
    244
    245
    246
    247
    248
    249
    250
    251
    252
    253
    254
    255
    256
    257
    258
    259
    260
    261
    262
    263
    264
    265
    266
    267
    268
    269
    270
    271
    272
    273
    hexdump -C /home/dai/index/_7.cfs
    00000000 3f d7 6c 17 14 4c 75 63 65 6e 65 39 30 43 6f 6d |?.l..Lucene90Com|
    00000010 70 6f 75 6e 64 44 61 74 61 00 00 00 00 6b f0 66 |poundData....k.f|
    00000020 56 c3 12 5b 07 08 12 3a 32 4d 4b 92 f8 00 00 00 |V..[...:2MK.....|
    00000030 3f d7 6c 17 17 4c 75 63 65 6e 65 39 30 46 69 65 |?.l..Lucene90Fie|
    00000040 6c 64 73 49 6e 64 65 78 4d 65 74 61 00 00 00 01 |ldsIndexMeta....|
    00000050 6b f0 66 56 c3 12 5b 07 08 12 3a 32 4d 4b 92 f8 |k.fV..[...:2MK..|
    00000060 00 80 80 05 07 00 00 00 0a 00 00 00 02 00 00 00 |................|
    00000070 30 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |0...............|
    00000080 00 00 e0 40 00 00 00 00 00 00 00 00 00 30 00 00 |...@.........0..|
    00000090 00 00 00 00 00 36 00 00 00 00 00 00 00 00 00 5d |.....6.........]|
    000000a0 43 00 00 00 00 00 00 00 00 00 30 00 00 00 00 00 |C.........0.....|
    000000b0 00 00 13 01 00 00 00 00 00 00 01 01 07 c0 28 93 |..............(.|
    000000c0 e8 00 00 00 00 00 00 00 00 46 80 fe 32 00 00 00 |.........F..2...|
    000000d0 3f d7 6c 17 19 4c 75 63 65 6e 65 39 30 50 6f 69 |?.l..Lucene90Poi|
    000000e0 6e 74 73 46 6f 72 6d 61 74 49 6e 64 65 78 00 00 |ntsFormatIndex..|
    000000f0 00 00 6b f0 66 56 c3 12 5b 07 08 12 3a 32 4d 4b |..k.fV..[...:2MK|
    00000100 92 f8 00 32 c0 28 93 e8 00 00 00 00 00 00 00 00 |...2.(..........|
    00000110 0a 2f 94 55 00 00 00 00 3f d7 6c 17 18 4c 75 63 |./.U....?.l..Luc|
    00000120 65 6e 65 39 30 50 6f 69 6e 74 73 46 6f 72 6d 61 |ene90PointsForma|
    00000130 74 4d 65 74 61 00 00 00 00 6b f0 66 56 c3 12 5b |tMeta....k.fV..[|
    00000140 07 08 12 3a 32 4d 4b 92 f8 00 01 00 00 00 3f d7 |...:2MK.......?.|
    00000150 6c 17 03 42 4b 44 00 00 00 09 01 01 80 04 08 01 |l..BKD..........|
    00000160 80 00 01 81 b4 f6 00 0f 80 00 01 81 b5 2b 3d 9d |.............+=.|
    00000170 07 07 01 32 00 00 00 00 00 00 00 33 00 00 00 00 |...2.......3....|
    00000180 00 00 00 ff ff ff ff 44 00 00 00 00 00 00 00 72 |.......D.......r|
    00000190 00 00 00 00 00 00 00 c0 28 93 e8 00 00 00 00 00 |........(.......|
    000001a0 00 00 00 09 71 1c 79 00 3f d7 6c 17 12 42 6c 6f |....q.y.?.l..Blo|
    000001b0 63 6b 54 72 65 65 54 65 72 6d 73 4d 65 74 61 00 |ckTreeTermsMeta.|
    000001c0 00 00 00 6b f0 66 56 c3 12 5b 07 08 12 3a 32 4d |...k.fV..[...:2M|
    000001d0 4b 92 f8 0a 4c 75 63 65 6e 65 39 30 5f 30 3f d7 |K...Lucene90_0?.|
    000001e0 6c 17 1b 4c 75 63 65 6e 65 39 30 50 6f 73 74 69 |l..Lucene90Posti|
    000001f0 6e 67 73 57 72 69 74 65 72 54 65 72 6d 73 00 00 |ngsWriterTerms..|
    00000200 00 00 6b f0 66 56 c3 12 5b 07 08 12 3a 32 4d 4b |..k.fV..[...:2MK|
    00000210 92 f8 0a 4c 75 63 65 6e 65 39 30 5f 30 80 01 02 |...Lucene90_0...|
    00000220 02 8c 01 0c db 01 03 62 af 05 67 cf 09 6d 95 14 |.......b..g..m..|
    00000230 c2 02 a5 01 06 01 30 03 cd b1 69 37 3f d7 6c 17 |......0...i7?.l.|
    00000240 03 46 53 54 00 00 00 08 01 0d 14 95 6d 09 cf 67 |.FST........m..g|
    00000250 05 af 62 03 01 db 0c 00 00 01 00 07 02 a2 37 07 |..b...........7.|
    00000260 07 16 2f 68 6f 6d 65 2f 64 61 69 2f 64 6f 63 73 |../home/dai/docs|
    00000270 2f 61 61 61 2e 74 78 74 1f 2f 68 6f 6d 65 2f 64 |/aaa.txt./home/d|
    00000280 61 69 2f 64 6f 63 73 2f 69 6e 64 65 78 2f 77 72 |ai/docs/index/wr|
    00000290 69 74 65 2e 6c 6f 63 6b 38 3f d7 6c 17 03 46 53 |ite.lock8?.l..FS|
    000002a0 54 00 00 00 08 01 03 37 a2 02 00 00 01 49 00 00 |T......7.....I..|
    000002b0 00 00 00 00 00 c9 07 00 00 00 00 00 00 c0 28 93 |..............(.|
    000002c0 e8 00 00 00 00 00 00 00 00 c1 1b ff e4 00 00 00 |................|
    000002d0 3f d7 6c 17 18 4c 75 63 65 6e 65 39 30 50 6f 69 |?.l..Lucene90Poi|
    000002e0 6e 74 73 46 6f 72 6d 61 74 44 61 74 61 00 00 00 |ntsFormatData...|
    000002f0 00 6b f0 66 56 c3 12 5b 07 08 12 3a 32 4d 4b 92 |.k.fV..[...:2MK.|
    00000300 f8 00 07 10 00 01 00 06 00 04 00 00 00 05 00 03 |................|
    00000310 00 02 00 04 80 00 01 81 00 b4 03 f6 00 0f f6 55 |...............U|
    00000320 d3 f8 31 29 b5 04 2b 3d 81 2b 3d 81 2b 3d 85 2b |..1)..+=.+=.+=.+|
    00000330 3d 9d c0 28 93 e8 00 00 00 00 00 00 00 00 28 e1 |=..(..........(.|
    00000340 c0 de 00 00 00 00 00 00 3f d7 6c 17 19 4c 75 63 |........?.l..Luc|
    00000350 65 6e 65 39 30 50 6f 73 74 69 6e 67 73 57 72 69 |ene90PostingsWri|
    00000360 74 65 72 50 6f 73 00 00 00 00 6b f0 66 56 c3 12 |terPos....k.fV..|
    00000370 5b 07 08 12 3a 32 4d 4b 92 f8 0a 4c 75 63 65 6e |[...:2MK...Lucen|
    00000380 65 39 30 5f 30 1e 01 03 b1 01 06 0b 0b a4 01 05 |e90_0...........|
    00000390 0d 21 06 02 0b a5 01 0c 0d 0a 0e 19 18 20 09 0b |.!........... ..|
    000003a0 5f 19 45 06 30 08 0a 22 02 02 75 51 58 06 0b 03 |_.E.0.."..uQX...|
    000003b0 05 03 07 7c 05 23 96 01 02 3c 54 30 37 01 11 18 |...|.#...<T07...|
    000003c0 0a 40 30 29 0b 32 92 01 ae 01 03 1f 21 03 88 01 |.@0).2......!...|
    000003d0 23 27 d5 01 73 18 0f 5f 07 3a 04 04 06 06 07 06 |#'..s.._.:......|
    000003e0 12 19 38 04 00 72 0c 7d 52 3b 04 04 06 06 07 06 |..8..r.}R;......|
    000003f0 25 06 38 04 11 46 3e 08 4c 42 11 10 0f 1f bc 01 |%.8..F>.LB......|
    00000400 0b 1c 1a 06 8a 01 20 39 04 04 06 06 07 06 12 51 |...... 9.......Q|
    00000410 04 c8 01 15 00 7e 44 06 06 07 06 17 06 08 06 04 |.....~D.........|
    00000420 38 04 30 30 12 1d 05 07 19 06 05 02 00 05 05 06 |8.00............|
    00000430 02 07 0c 05 07 31 05 2a 06 01 06 09 06 06 08 0b |.....1.*........|
    00000440 04 00 00 1a 00 1c 0c d1 01 06 2f 07 60 07 15 06 |........../.`...|
    00000450 01 01 cb 01 63 1a 26 a8 01 9f 01 13 06 2b 99 01 |....c.&......+..|
    00000460 b4 01 01 68 28 09 d4 01 09 1b 0d 6f 0a 16 1b 10 |...h(......o....|
    00000470 17 80 01 05 71 cf 01 d0 01 06 d2 01 06 17 1e 04 |....q...........|
    00000480 05 0d 07 0c 05 07 31 05 2a 07 06 09 06 06 17 08 |......1.*.......|
    00000490 04 04 0c 04 0d 12 2a 01 25 76 0e 07 0f 20 14 1e |......*.%v... ..|
    000004a0 53 06 1e 08 a3 01 38 0a 0b a6 01 da 01 03 5e 2b |S.....8.......^+|
    000004b0 c5 01 61 18 01 ba 01 38 03 05 0d 07 0c 05 07 31 |..a....8.......1|
    000004c0 05 2a 07 06 09 06 06 17 03 04 04 03 03 02 05 0d |.*..............|
    000004d0 07 0c 05 07 31 05 2a 07 06 09 06 06 17 02 08 02 |....1.*.........|
    000004e0 02 c9 01 c0 28 93 e8 00 00 00 00 00 00 00 00 63 |....(..........c|
    000004f0 69 b5 c7 00 00 00 00 00 3f d7 6c 17 15 4c 75 63 |i.......?.l..Luc|
    00000500 65 6e 65 39 30 4e 6f 72 6d 73 4d 65 74 61 64 61 |ene90NormsMetada|
    00000510 74 61 00 00 00 00 6b f0 66 56 c3 12 5b 07 08 12 |ta....k.fV..[...|
    00000520 3a 32 4d 4b 92 f8 00 02 00 00 00 ff ff ff ff ff |:2MK............|
    00000530 ff ff ff 00 00 00 00 00 00 00 00 ff ff ff 07 00 |................|
    00000540 00 00 01 2b 00 00 00 00 00 00 00 ff ff ff ff c0 |...+............|
    00000550 28 93 e8 00 00 00 00 00 00 00 00 72 ba cc 7e 00 |(..........r..~.|
    00000560 3f d7 6c 17 12 4c 75 63 65 6e 65 39 30 46 69 65 |?.l..Lucene90Fie|
    00000570 6c 64 49 6e 66 6f 73 00 00 00 00 6b f0 66 56 c3 |ldInfos....k.fV.|
    00000580 12 5b 07 08 12 3a 32 4d 4b 92 f8 00 03 04 70 61 |.[...:2MK.....pa|
    00000590 74 68 00 02 01 00 ff ff ff ff ff ff ff ff 02 1d |th..............|
    000005a0 50 65 72 46 69 65 6c 64 50 6f 73 74 69 6e 67 73 |PerFieldPostings|
    000005b0 46 6f 72 6d 61 74 2e 66 6f 72 6d 61 74 08 4c 75 |Format.format.Lu|
    000005c0 63 65 6e 65 39 30 1d 50 65 72 46 69 65 6c 64 50 |cene90.PerFieldP|
    000005d0 6f 73 74 69 6e 67 73 46 6f 72 6d 61 74 2e 73 75 |ostingsFormat.su|
    000005e0 66 66 69 78 01 30 00 00 00 08 6d 6f 64 69 66 69 |ffix.0....modifi|
    000005f0 65 64 01 00 00 00 ff ff ff ff ff ff ff ff 00 01 |ed..............|
    00000600 01 08 00 00 08 63 6f 6e 74 65 6e 74 73 02 00 03 |.....contents...|
    00000610 00 ff ff ff ff ff ff ff ff 02 1d 50 65 72 46 69 |...........PerFi|
    00000620 65 6c 64 50 6f 73 74 69 6e 67 73 46 6f 72 6d 61 |eldPostingsForma|
    00000630 74 2e 66 6f 72 6d 61 74 08 4c 75 63 65 6e 65 39 |t.format.Lucene9|
    00000640 30 1d 50 65 72 46 69 65 6c 64 50 6f 73 74 69 6e |0.PerFieldPostin|
    00000650 67 73 46 6f 72 6d 61 74 2e 73 75 66 66 69 78 01 |gsFormat.suffix.|
    00000660 30 00 00 00 c0 28 93 e8 00 00 00 00 00 00 00 00 |0....(..........|
    00000670 1f ee 84 f9 00 00 00 00 3f d7 6c 17 1c 4c 75 63 |........?.l..Luc|
    00000680 65 6e 65 39 30 53 74 6f 72 65 64 46 69 65 6c 64 |ene90StoredField|
    00000690 73 46 61 73 74 44 61 74 61 00 00 00 01 6b f0 66 |sFastData....k.f|
    000006a0 56 c3 12 5b 07 08 12 3a 32 4d 4b 92 f8 00 00 1e |V..[...:2MK.....|
    000006b0 00 01 08 18 1d 21 21 1d 1c 18 0a 13 0b 15 12 10 |.....!!.........|
    000006c0 15 0f 15 12 10 15 12 a0 00 16 2f 68 6f 6d 65 2f |........../home/|
    000006d0 64 61 f0 04 69 2f 64 6f 63 73 2f 62 62 62 2e 74 |da..i/docs/bbb.t|
    000006e0 78 74 00 1b 2f 68 6f 01 05 00 e0 69 2f 64 6f 63 |xt../ho....i/doc|
    000006f0 73 2f 69 6e 64 65 78 2f 5f 73 30 2e 63 66 73 00 |s/index/_s0.cfs.|
    00000700 1f 0f 00 50 61 69 2f 64 6f f0 04 63 73 2f 69 6e |...Pai/do..cs/in|
    00000710 64 65 78 2f 73 65 67 6d 65 6e 74 73 5f 31 24 00 |dex/segments_1$.|
    00000720 1f 0a 00 90 69 2f 64 6f 63 73 2f 69 6e f0 04 64 |....i/docs/in..d|
    00000730 65 78 2f 77 72 69 74 65 2e 6c 6f 63 6b 00 1b 2f |ex/write.lock../|
    00000740 68 6f 01 05 00 e0 69 2f 64 6f 63 73 2f 69 6e 64 |ho....i/docs/ind|
    00000750 65 78 2f 5f 73 30 2e 63 66 65 00 1a 0f 00 50 61 |ex/_s0.cfe....Pa|
    00000760 69 2f 64 6f f0 04 63 73 2f 69 6e 64 65 78 2f 5f |i/do..cs/index/_|
    00000770 30 2e 73 69 00 16 2f 68 6f 01 05 00 e0 69 2f 64 |0.si../ho....i/d|
    00000780 6f 63 73 2f 61 61 61 2e 74 78 74 c0 28 93 e8 00 |ocs/aaa.txt.(...|
    00000790 00 00 00 00 00 00 00 52 80 f1 02 00 00 00 00 00 |.......R........|
    000007a0 3f d7 6c 17 13 42 6c 6f 63 6b 54 72 65 65 54 65 |?.l..BlockTreeTe|
    000007b0 72 6d 73 49 6e 64 65 78 00 00 00 00 6b f0 66 56 |rmsIndex....k.fV|
    000007c0 c3 12 5b 07 08 12 3a 32 4d 4b 92 f8 0a 4c 75 63 |..[...:2MK...Luc|
    000007d0 65 6e 65 39 30 5f 30 00 00 c0 28 93 e8 00 00 00 |ene90_0...(.....|
    000007e0 00 00 00 00 00 6e c7 b4 6e 00 00 00 00 00 00 00 |.....n..n.......|
    000007f0 3f d7 6c 17 11 4c 75 63 65 6e 65 39 30 4e 6f 72 |?.l..Lucene90Nor|
    00000800 6d 73 44 61 74 61 00 00 00 00 6b f0 66 56 c3 12 |msData....k.fV..|
    00000810 5b 07 08 12 3a 32 4d 4b 92 f8 00 08 44 0e 00 21 |[...:2MK....D..!| <------------- here 08 44 0e 00 21
    00000820 29 04 c0 28 93 e8 00 00 00 00 00 00 00 00 43 ab |)..(..........C.| <------------ 04 就是norm
    00000830 9e 6c 00 00 00 00 00 00 3f d7 6c 17 19 4c 75 63 |.l......?.l..Luc|
    00000840 65 6e 65 39 30 50 6f 73 74 69 6e 67 73 57 72 69 |ene90PostingsWri|
    00000850 74 65 72 44 6f 63 00 00 00 00 6b f0 66 56 c3 12 |terDoc....k.fV..|
    00000860 5b 07 08 12 3a 32 4d 4b 92 f8 0a 4c 75 63 65 6e |[...:2MK...Lucen|
    00000870 65 39 30 5f 30 03 03 02 05 08 03 01 02 02 0b 03 |e90_0...........|
    00000880 07 02 02 07 01 03 02 02 07 02 02 07 03 07 03 07 |................|
    00000890 05 02 15 03 04 02 03 02 10 03 05 03 05 05 02 10 |................|
    000008a0 02 03 05 03 02 10 02 02 05 03 c0 28 93 e8 00 00 |...........(....|
    000008b0 00 00 00 00 00 00 8d fa 92 14 00 00 00 00 00 00 |................|
    000008c0 3f d7 6c 17 12 42 6c 6f 63 6b 54 72 65 65 54 65 |?.l..BlockTreeTe|
    000008d0 72 6d 73 44 69 63 74 00 00 00 00 6b f0 66 56 c3 |rmsDict....k.fV.|
    000008e0 12 5b 07 08 12 3a 32 4d 4b 92 f8 0a 4c 75 63 65 |.[...:2MK...Luce|
    000008f0 6e 65 39 30 5f 30 36 84 0e 30 30 75 62 75 6e 74 |ne90_06..00ubunt|
    00000900 75 30 2e 32 32 2e 30 34 2e 31 31 31 30 2e 30 2e |u0.22.04.1110.0.|
    00000910 30 31 36 35 36 36 30 31 39 31 38 38 33 36 31 37 |0165660191883617|
    00000920 2e 30 2e 33 31 65 31 69 31 6d 31 6d 31 6d 32 33 |.0.31e1i1m1m1m23|
    00000930 33 33 35 2e 31 35 2e 30 36 37 39 5f 30 5f 30 5f |335.15.0679_0_0_|
    00000940 6c 75 63 65 6e 65 39 30 66 69 65 6c 64 5f 30 5f |lucene90field_0_|
    00000950 6c 75 63 65 6e 65 39 30 66 69 65 6c 64 73 69 6e |lucene90fieldsin|
    00000960 64 65 78 5f 30 5f 6c 75 63 65 6e 65 39 30 66 69 |dex_0_lucene90fi|
    00000970 65 6c 64 73 69 6e 64 65 78 66 69 6c 65 5f 70 6f |eldsindexfile_po|
    00000980 69 6e 74 65 72 73 5f 31 5f 30 cb b9 5f 6c 75 63 |inters_1_0.._luc|
    00000990 65 6e 65 39 30 5f 30 5f 6c 75 63 65 6e 65 39 30 |ene90_0_lucene90|
    000009a0 66 69 65 6c 64 73 69 6e 64 65 78 61 61 61 61 2e |fieldsindexaaaa.|
    000009b0 74 78 74 61 61 6d 62 6f 79 64 6f 67 6f 6f 64 69 |txtaamboydogoodi|
    000009c0 69 73 6b 6e 6f 77 74 68 69 6e 67 77 68 61 74 79 |isknowthingwhaty|
    000009d0 6f 75 61 6d 61 6d 64 36 34 36 01 10 01 06 0d 06 |ouamamd646......|
    000009e0 08 02 01 01 02 06 01 01 01 02 10 16 25 04 0b 14 |............%...|
    000009f0 01 07 1f 02 05 1c 02 04 02 01 04 00 03 02 02 03 |................|
    00000a00 02 01 07 02 01 02 01 04 06 07 02 04 01 06 01 02 |................|
    00000a10 02 05 3a 7a 01 3d 11 06 00 02 04 05 03 01 01 01 |..:z.=..........|
    00000a20 01 0f 03 01 02 01 01 01 02 11 01 01 01 0f 01 11 |................|
    00000a30 01 0f 02 00 02 08 01 08 01 01 01 01 05 01 09 01 |................|
    00000a40 0b 05 00 01 08 01 05 01 03 15 01 03 01 38 b4 09 |.............8..|
    00000a50 62 62 62 62 2e 74 78 74 62 65 73 74 5f 73 70 65 |bbbb.txtbest_spe|
    00000a60 65 64 62 6b 64 62 6c 6f 63 6b 74 72 65 65 74 65 |edbkdblocktreete|
    00000a70 72 6d 73 64 69 63 74 62 6c 6f 63 6b 74 72 65 65 |rmsdictblocktree|
    00000a80 74 65 72 6d 73 69 6e 64 65 78 62 6c 6f 63 6b 74 |termsindexblockt|
    00000a90 72 65 65 74 65 72 6d 73 6d 65 74 61 62 6f 79 62 |reetermsmetaboyb|
    00000aa0 75 69 6c 64 63 63 66 65 63 66 73 63 6f 6e 74 65 |uildccfecfsconte|
    00000ab0 6e 74 73 63 73 64 64 61 69 64 6f 64 6f 63 64 6f |ntscsddaidodocdo|
    00000ac0 63 5f 64 6f 63 5f 69 64 73 5f 30 64 6f 63 73 65 |c_doc_ids_0docse|
    00000ad0 75 66 66 64 6d 66 64 74 66 64 78 66 6c 75 73 68 |uffdmfdtfdxflush|
    00000ae0 66 6e 6d 66 73 74 38 01 07 0a 03 12 13 12 03 05 |fnmfst8.........|
    00000af0 01 03 03 08 02 01 03 02 03 04 09 04 03 03 03 03 |................|
    00000b00 05 03 03 1b 04 00 02 01 0d 02 02 05 02 01 04 01 |................|
    00000b10 02 0a 04 00 05 02 0a 01 04 01 04 01 05 02 01 3d |...............=|
    00000b20 8e 01 77 04 01 02 11 02 0f 01 01 01 01 01 01 02 |..w.............|
    00000b30 15 02 03 01 0f 01 11 04 01 01 0f 01 01 02 00 02 |................|
    00000b40 06 01 03 00 0b 04 04 02 0b 01 01 01 01 01 01 0b |................|
    00000b50 00 01 06 03 06 04 03 05 01 03 01 0b 01 50 cc 20 |.............P. |
    00000b60 67 67 65 6e 65 72 69 63 67 6f 6f 64 68 68 6f 6d |ggenericgoodhhom|
    00000b70 65 69 69 64 73 5f 30 69 6e 64 65 78 69 73 6a 6a |eiids_0indexisjj|
    00000b80 61 76 61 2e 72 75 6e 74 69 6d 65 2e 76 65 72 73 |ava.runtime.vers|
    00000b90 69 6f 6e 6a 61 76 61 2e 76 65 6e 64 6f 72 6a 61 |ionjava.vendorja|
    00000ba0 76 61 2e 76 65 72 73 69 6f 6e 6a 61 76 61 2e 76 |va.versionjava.v|
    00000bb0 6d 2e 76 65 72 73 69 6f 6e 6b 64 64 6b 64 69 6b |m.versionkddkdik|
    00000bc0 64 6d 30 6b 6e 6f 77 6c 6c 69 6e 75 78 6c 75 63 |dm0knowllinuxluc|
    00000bd0 65 6e 65 2e 76 65 72 73 69 6f 6e 6c 75 63 65 6e |ene.versionlucen|
    00000be0 65 39 30 6c 75 63 65 6e 65 39 30 5f 30 6c 75 63 |e90lucene90_0luc|
    00000bf0 65 6e 65 39 30 63 6f 6d 70 6f 75 6e 64 64 61 74 |ene90compounddat|
    00000c00 61 6c 75 63 65 6e 65 39 30 63 6f 6d 70 6f 75 6e |alucene90compoun|
    00000c10 64 65 6e 74 72 69 65 73 6c 75 63 65 6e 65 39 30 |dentrieslucene90|
    00000c20 66 69 65 6c 64 69 6e 66 6f 73 6c 75 63 65 6e 65 |fieldinfoslucene|
    00000c30 39 30 66 69 65 6c 64 73 69 6e 64 65 78 69 64 78 |90fieldsindexidx|
    00000c40 6c 75 63 65 6e 65 39 30 66 69 65 6c 64 73 69 6e |lucene90fieldsin|
    00000c50 64 65 78 6d 65 74 61 6c 75 63 65 6e 65 39 30 6e |dexmetalucene90n|
    00000c60 6f 72 6d 73 64 61 74 61 6c 75 63 65 6e 65 39 30 |ormsdatalucene90|
    00000c70 6e 6f 72 6d 73 6d 65 74 61 64 61 74 61 6c 75 63 |normsmetadataluc|
    00000c80 65 6e 65 39 30 70 6f 69 6e 74 73 66 6f 72 6d 61 |ene90pointsforma|
    00000c90 74 64 61 74 61 6c 75 63 65 6e 65 39 30 70 6f 69 |tdatalucene90poi|
    00000ca0 6e 74 73 66 6f 72 6d 61 74 69 6e 64 65 78 6c 75 |ntsformatindexlu|
    00000cb0 63 65 6e 65 39 30 70 6f 69 6e 74 73 66 6f 72 6d |cene90pointsform|
    00000cc0 61 74 6d 65 74 61 6c 75 63 65 6e 65 39 30 70 6f |atmetalucene90po|
    00000cd0 73 74 69 6e 67 73 77 72 69 74 65 72 64 6f 63 6c |stingswriterdocl|
    00000ce0 75 63 65 6e 65 39 30 70 6f 73 74 69 6e 67 73 77 |ucene90postingsw|
    00000cf0 72 69 74 65 72 70 6f 73 6c 75 63 65 6e 65 39 30 |riterposlucene90|
    00000d00 70 6f 73 74 69 6e 67 73 77 72 69 74 65 72 74 65 |postingswriterte|
    00000d10 72 6d 73 6c 75 63 65 6e 65 39 30 73 65 67 6d 65 |rmslucene90segme|
    00000d20 6e 74 69 6e 66 6f 6c 75 63 65 6e 65 39 30 73 74 |ntinfolucene90st|
    00000d30 6f 72 65 64 66 69 65 6c 64 73 66 61 73 74 64 61 |oredfieldsfastda|
    00000d40 74 61 6c 75 63 65 6e 65 39 30 73 74 6f 72 65 64 |talucene90stored|
    00000d50 66 69 65 6c 64 73 66 6f 72 6d 61 74 2e 6d 6f 64 |fieldsformat.mod|
    00000d60 65 6c 75 63 65 6e 65 39 33 50 01 07 04 01 04 01 |elucene93P......|
    00000d70 05 05 02 01 14 0b 0c 0f 03 03 04 04 01 05 0e 08 |................|
    00000d80 0a 14 17 12 16 17 11 15 18 19 18 19 19 1b 13 1c |................|
    00000d90 1f 08 16 05 04 00 02 09 06 00 01 02 0a 01 02 01 |................|
    00000da0 0f 08 15 03 02 01 02 05 21 56 a8 01 04 b9 01 05 |........!V......|
    00000db0 01 13 01 00 01 04 01 03 00 0a 06 01 04 01 01 03 |................|
    00000dc0 0b 05 01 11 02 01 01 01 01 01 01 03 01 01 01 01 |................|
    00000dd0 01 0f 01 00 01 0c 05 19 01 01 0f 01 01 03 01 06 |................|
    00000de0 0d 01 0b 01 01 02 01 01 01 01 01 01 01 02 01 02 |................|
    00000df0 01 01 01 01 01 01 01 02 11 02 0f 01 11 01 0b 01 |................|
    00000e00 5b a4 0f 6d 6f 64 69 66 69 65 64 6e 76 64 6e 76 |[..modifiednvdnv|
    00000e10 6d 6f 6f 63 73 6f 73 6f 73 2e 61 72 63 68 6f 73 |moocsosos.archos|
    00000e20 2e 76 65 72 73 69 6f 6e 70 70 61 69 70 61 74 68 |.versionppaipath|
    00000e30 70 65 72 66 69 65 6c 64 70 6f 73 74 69 6e 67 73 |perfieldpostings|
    00000e40 66 6f 72 6d 61 74 2e 66 6f 72 6d 61 74 70 65 72 |format.formatper|
    00000e50 66 69 65 6c 64 70 6f 73 74 69 6e 67 73 66 6f 72 |fieldpostingsfor|
    00000e60 6d 61 74 2e 73 75 66 66 69 78 70 6f 73 70 72 69 |mat.suffixpospri|
    00000e70 76 61 74 65 70 d7 99 70 d7 9b 70 d7 9c 71 71 78 |vatep..p..p..qqx|
    00000e80 72 73 65 67 6d 65 6e 74 73 73 69 73 69 6e 64 65 |rsegmentssisinde|
    00000e90 78 66 69 6c 65 5f 70 6f 69 6e 74 65 72 73 5f 31 |xfile_pointers_1|
    00000ea0 73 6f 75 72 63 65 74 68 69 6e 67 74 69 6d 74 69 |sourcethingtimti|
    00000eb0 6d 65 73 74 61 6d 70 74 69 70 78 74 6d 64 74 6d |mestamptipxtmdtm|
    00000ec0 70 75 75 62 75 6e 74 75 76 78 77 63 77 68 61 74 |puubuntuvxwcwhat|
    00000ed0 77 72 69 74 65 2e 6c 6f 63 6b 77 72 69 74 65 2e |write.lockwrite.|
    00000ee0 6c 6f 63 6b 38 78 79 79 6f 75 79 6f 75 37 7a 7a |lock8xyyouyou7zz|
    00000ef0 74 37 cb b9 cd b1 69 5a 08 03 03 01 03 02 07 0a |t7....iZ........|
    00000f00 01 03 04 1d 1d 03 07 03 03 03 01 03 08 02 15 06 |................|
    00000f10 05 03 09 04 03 03 01 06 02 02 04 0a 0b 01 01 03 |................|
    00000f20 04 01 03 02 03 21 07 02 02 05 02 01 03 02 01 02 |.....!..........|
    00000f30 01 03 08 0f 03 04 00 13 02 03 02 01 02 01 05 02 |................|
    00000f40 01 0b 08 11 08 10 01 60 be 01 01 9e 02 0d 02 01 |.......`........|
    00000f50 01 01 01 0b 01 11 03 01 01 01 01 0f 01 01 03 01 |................|
    00000f60 01 01 02 01 03 0d 03 05 01 00 01 0a 02 13 01 01 |................|
    00000f70 00 01 04 05 02 0b 01 0d 01 0f 01 11 01 13 01 11 |................|
    00000f80 01 05 01 03 01 01 01 0b 01 01 04 11 03 0f 02 01 |................|
    00000f90 02 03 02 05 01 01 02 01 02 0d 01 0f 01 05 01 01 |................|
    00000fa0 02 00 01 0c 15 0c 01 14 0f d4 0b 2f 68 6f 6d 65 |.........../home|
    00000fb0 2f 64 61 69 2f 64 6f 63 73 2f 61 61 61 2e 74 78 |/dai/docs/aaa.tx|
    00000fc0 74 2f 68 6f 6d 65 2f 64 61 69 2f 64 6f 63 73 2f |t/home/dai/docs/|
    00000fd0 62 62 62 2e 74 78 74 2f 68 6f 6d 65 2f 64 61 69 |bbb.txt/home/dai|
    00000fe0 2f 64 6f 63 73 2f 69 6e 64 65 78 2f 5f 30 2e 63 |/docs/index/_0.c|
    00000ff0 66 65 2f 68 6f 6d 65 2f 64 61 69 2f 64 6f 63 73 |fe/home/dai/docs|
    00001000 2f 69 6e 64 65 78 2f 5f 30 2e 63 66 73 2f 68 6f |/index/_0.cfs/ho|
    00001010 6d 65 2f 64 61 69 2f 64 6f 63 73 2f 69 6e 64 65 |me/dai/docs/inde|
    00001020 78 2f 5f 30 2e 73 69 2f 68 6f 6d 65 2f 64 61 69 |x/_0.si/home/dai|
    00001030 2f 64 6f 63 73 2f 69 6e 64 65 78 2f 73 65 67 6d |/docs/index/segm|
    00001040 65 6e 74 73 5f 31 2f 68 6f 6d 65 2f 64 61 69 2f |ents_1/home/dai/|
    00001050 64 6f 63 73 2f 69 6e 64 65 78 2f 77 72 69 74 65 |docs/index/write|
    00001060 2e 6c 6f 63 6b 0e 16 16 1b 1b 1a 1f 1f 01 0d 09 |.lock...........|
    00001070 e4 01 06 17 11 0b 11 0b 05 c0 28 93 e8 00 00 00 |..........(.....|
    00001080 00 00 00 00 00 26 7d 6b cb 00 00 00 00 00 00 00 |.....&}k........|
    00001090 3f d7 6c 17 16 4c 75 63 65 6e 65 39 30 46 69 65 |?.l..Lucene90Fie|
    000010a0 6c 64 73 49 6e 64 65 78 49 64 78 00 00 00 00 6b |ldsIndexIdx....k|
    000010b0 f0 66 56 c3 12 5b 07 08 12 3a 32 4d 4b 92 f8 00 |.fV..[...:2MK...|
    000010c0 c0 28 93 e8 00 00 00 00 00 00 00 00 be 7c 21 a1 |.(...........|!.|
    000010d0 c0 28 93 e8 00 00 00 00 00 00 00 00 15 f4 63 e8 |.(............c.|
    000010e0

    + +

    gdb 读取内容:

    +
    1
    2
    3
    4
    5
    6
    (gdb) x/32xb 140063879776283
    0x7f6329ccc81b: 0x08 0x44 0x0e 0x00 0x21 0x29 0x04 0xc0
    0x7f6329ccc823: 0x28 0x93 0xe8 0x00 0x00 0x00 0x00 0x00
    0x7f6329ccc82b: 0x00 0x00 0x00 0x43 0xab 0x9e 0x6c 0x00
    0x7f6329ccc833: 0x00 0x00 0x00 0x00 0x00 0x3f 0xd7 0x6c

    + + + + + +

    源码分析

    DirectByteBufferR 继承关系

    DirectByteBufferR extend

    +

    依赖以下的脚本自动根据平台自动实现nio的DirectByteBufferR这个类:

    +
    1
    2
    3
    4
    5
    6
    7
    // 源码地址 jdk/make/modules/java.base/gensrc/GensrcBuffer.gmk
    # Direct byte buffer
    #
    DIRECT_X_BUF := Direct-X-Buffer

    $(eval $(call SetupGenBuffer,DirectByteBuffer, $(DIRECT_X_BUF), type:=byte, BIN:=1))
    $(eval $(call SetupGenBuffer,DirectByteBufferR,$(DIRECT_X_BUF), type:=byte, BIN:=1, RW:=R))
    + +

    DirectByteBufferR 继承 DirectByteBuffer , DirectByteBuffer 则继承ByteBuffer
    下面是编译后通过宏自动构建的DirectByteBufferR类,需要编译jvm的时候才能生成,我的在这个目录生成(编译jdk之后才会有这个文件,直接下载是没有这个文件的)
    jdk/build/linux-x86_64-server-slowdebug/support/gensrc/java.base/java/nio/DirectByteBufferR.java

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    38
    39
    40
    41
    42
    43
    44


    // -- This file was mechanically generated: Do not edit! -- //

    package java.nio;

    import java.io.FileDescriptor;
    import java.lang.ref.Reference;
    import java.util.Objects;
    import jdk.internal.access.foreign.MemorySegmentProxy;
    import jdk.internal.misc.ScopedMemoryAccess.Scope;
    import jdk.internal.misc.VM;
    import jdk.internal.ref.Cleaner;
    import sun.nio.ch.DirectBuffer;


    class DirectByteBufferR extends DirectByteBuffer implements DirectBuffer
    {
    ...
    // Primary constructor
    //
    DirectByteBufferR(int cap) { // package-private
    super(cap);
    this.isReadOnly = true;

    }



    // For memory-mapped buffers -- invoked by FileChannelImpl via reflection
    //
    protected DirectByteBufferR(int cap, long addr,
    FileDescriptor fd,
    Runnable unmapper,
    boolean isSync, MemorySegmentProxy segment)
    {
    super(cap, addr, fd, unmapper, isSync, segment);
    this.isReadOnly = true;
    }

    ...

    }

    + +

    他的读取方法DirectByteBufferR.get是从DirectByteBuffer继承的,下面是实现:

    +

    实际是调用SCOPED_MEMORY_ACCESS.getByte

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    // jdk/build/linux-x86_64-server-slowdebug/support/gensrc/java.base/java/nio/DirectByteBuffer.java
    public byte get() {
    try {
    return ((SCOPED_MEMORY_ACCESS.getByte(scope(), null, ix(nextGetIndex()))));
    } finally {
    Reference.reachabilityFence(this);
    }
    }

    public byte get(int i) {
    try {
    return ((SCOPED_MEMORY_ACCESS.getByte(scope(), null, ix(checkIndex(i)))));
    } finally {
    Reference.reachabilityFence(this);
    }
    }

    +

    SCOPED_MEMORY_ACCESS是在MappedByteBuffer里面定义的 ,而DirectByteBufferMappedByteBuffer 子类

    +
    1
    2
    3
    4
    class DirectByteBuffer  extends MappedByteBuffer implements DirectBuffer
    {
    ...
    }
    + +

    SCOPED_MEMORY_ACCESS.getByte最后调用的是UNSAFE.getByte

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19

    public class ScopedMemoryAccess {

    private static final Unsafe UNSAFE = Unsafe.getUnsafe();


    @ForceInline
    public byte getByte(Scope scope, Object base, long offset) {
    ...
    return getByteInternal(scope, base, offset); // 调用 内部函数
    ...
    }

    @ForceInline @Scoped
    private byte getByteInternal(Scope scope, Object base, long offset) {
    ...
    return UNSAFE.getByte(base, offset); // 最后调用的是UNSAFE.getByte
    ...
    }
    + +

    UNSAFE是一个全局的静态变量,最后调用的是

    +
    1
    2
    3
    4
    5
    6
    // jdk/src/hotspot/share/prims/unsafe.cpp

    UNSAFE_ENTRY(java_type, Unsafe_Get##Type(JNIEnv *env, jobject unsafe, jobject obj, jlong offset)) { \
    return MemoryAccess<java_type>(thread, obj, offset).get(); \
    } UNSAFE_END \

    + +

    展开之后是调用MemoryAccess的get方法,实际是获取内存的值

    +

    相关阅读

    + + +
    + + + + +
    +
    +
    +
    + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    背景

    了解java的基本类型,基本类型的大小和取值范围

    +

    platform:amd64

    +

    源码分析

    在c++ standard 里面

    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    类型是否有符号最小范围字节数type
    charimplement defined [Type char is a distinct type that has an implementation-defined choice of “signed char” or “unsigned char” as its underlying type]
    signed charsignedsigned type
    short intsignedsigned type
    intsignedsigned type
    long intsignedsigned type
    long long intsignedsigned type
    +

    在linux 64位下面

    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    java基本类型c/c++宏
    jintint
    jlonglong
    jbytesigned char
    jbooleanunsigned char
    jcharunsigned short
    jfloatfloat
    jdoubledouble
    jsizejint 也就是int
    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    // jdk/src/java.base/unix/native/include/jni_md.h
    typedef int jint;
    #ifdef _LP64
    typedef long jlong;
    #else
    typedef long long jlong;
    #endif

    typedef signed char jbyte;
    + +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    // jdk/src/java.base/share/native/include/jni.h
    #ifndef JNI_TYPES_ALREADY_DEFINED_IN_JNI_MD_H

    typedef unsigned char jboolean;
    typedef unsigned short jchar;
    typedef short jshort;
    typedef float jfloat;
    typedef double jdouble;

    typedef jint jsize;
    + + +

    相关阅读

    + + +
    + + + + +
    +
    +
    +
    + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    背景

    java的unsafe包是有很多底层的api暴露出来,举例,java的netty就大量使用这个api

    +

    例子

    下面是java的unsafe包里面的allocateMemory方法.

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    public class UnsafeDemo {

    public static void main(String[] args) {
    var unsafe = getUnsafe();
    var memory = unsafe.allocateMemory(100);
    System.out.println(memory);
    }

    private static Unsafe getUnsafe() {
    try {
    Field field = Unsafe.class.getDeclaredField("theUnsafe");
    field.setAccessible(true);
    return (Unsafe) field.get(null);
    } catch (Exception e) {
    return null;
    }
    }
    }
    + +

    这里memory 返回的是一个地址

    +

    实际上是调用Unsafe_AllocateMemory0

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    UNSAFE_ENTRY(jlong, Unsafe_AllocateMemory0(JNIEnv *env, jobject unsafe, jlong size)) {
    size_t sz = (size_t)size;

    assert(is_aligned(sz, HeapWordSize), "sz not aligned");

    void* x = os::malloc(sz, mtOther);

    return addr_to_java(x);
    } UNSAFE_END
    + +

    最后调用的是glibc 的malloc

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    38
    39
    40
    41
    42
    43
    44
    45
    void* os::malloc(size_t size, MEMFLAGS memflags, const NativeCallStack& stack) {

    // Special handling for NMT preinit phase before arguments are parsed
    void* rc = NULL;
    if (NMTPreInit::handle_malloc(&rc, size)) {
    // No need to fill with 0 because DumpSharedSpaces doesn't use these
    // early allocations.
    return rc;
    }

    DEBUG_ONLY(check_crash_protection());

    // On malloc(0), implementations of malloc(3) have the choice to return either
    // NULL or a unique non-NULL pointer. To unify libc behavior across our platforms
    // we chose the latter.
    size = MAX2((size_t)1, size);

    // For the test flag -XX:MallocMaxTestWords
    if (has_reached_max_malloc_test_peak(size)) {
    return NULL;
    }

    const size_t outer_size = size + MemTracker::overhead_per_malloc();

    // Check for overflow.
    if (outer_size < size) {
    return NULL;
    }

    ALLOW_C_FUNCTION(::malloc, void* const outer_ptr = ::malloc(outer_size);) <-- malloc 分配内存
    if (outer_ptr == NULL) {
    return NULL;
    }

    void* const inner_ptr = MemTracker::record_malloc((address)outer_ptr, size, memflags, stack);

    if (DumpSharedSpaces) {
    // Need to deterministically fill all the alignment gaps in C++ structures.
    ::memset(inner_ptr, 0, size);
    } else {
    DEBUG_ONLY(::memset(inner_ptr, uninitBlockPad, size);)
    }
    DEBUG_ONLY(break_if_ptr_caught(inner_ptr);)
    return inner_ptr;
    }
    + +

    相关阅读

    + + +
    + + + + +
    +
    +
    +
    + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    背景

    最近chatgpt 很流行 ,所以想了解一下TensorFlow是怎么拟合数据的

    +

    python 版本

    1
    2
    $python3 -V
    Python 3.10.6
    + +

    安装

    安装分为pip 方式和jupyter方式

    +

    我用的是pip安装的方式

    +
      +
    • 下载安装 Miniconda

      +
      1
      2
      curl https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh -o Miniconda3-latest-Linux-x86_64.sh
      bash Miniconda3-latest-Linux-x86_64.sh
    • +
    • 创建一个叫tf 的environment

      +
      1
      2
      3
      ## 我的python是3.10 , 根据自己情况改
      conda create --name tf python=3.10

      +
    • +
    • 安装tf

      +
      1
      (tf) dai@myhost:~$ pip install tensorflow==2.11.*
      +
    • +
    • 测试运行

      +
      1
      2
      (tf) dai@myhost:~$ python3 -c "import tensorflow as tf; print(tf.reduce_sum(tf.random.normal([1000, 1000])))"

    • +
    +

    相关阅读

    + + +
    + + + + +
    +
    +
    +
    + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    背景

    想要了解jvm的bytecode 的汇编实现 ,目标平台是x86

    +

    汇编格式

    同样一个汇编语句:将1赋值给rax

    +

    汇编有两种表达方式

    + + + + + + + + + + + + + + + + + + +
    desc/描述intelAT&T
    将1写入rax寄存器mov eax,1movl $1,%eax
    将rab+3 的地址的值写入raxmov eax,[ebx+3]movl 3(%ebx),%eax
    +

    stack frame

    在x86 64 位的模式下 rbcp 是用r13 , 描述的是下一个指令,i = instruction
    r14则存了本地变量指针

    +
    1
    2
    3
    // Global Register Names
    static const Register rbcp = LP64_ONLY(r13) NOT_LP64(rsi);
    static const Register rlocals = LP64_ONLY(r14) NOT_LP64(rdi);
    + +

    这里LP64_ONLY()和NOT_LP64()是通过宏_LP64来确定的

    +
    1
    2
    3
    __LP64__
    _LP64
    These macros are defined, with value 1, if (and only if) the compilation is for a target where long int and pointer both use 64-bits and int uses 32-bit.
    +

    amd64 下面的寄存器

    java的stack frame

    + + + + + + + + + + + + + + + +
    寄存器含义、描述
    r14存了本地变量的基地址
    r13指向下一个执行的bytecode
    +

    类似c的堆栈,java 的栈如下:

    +

    stack

    +

    相关阅读

    +

    frame 用下面的结构描述

    主要包括:

    +
      +
    • _sp :指向栈
    • +
    • _pc : 指向指令
      1
      2
      3
      4
      5
      6
      7
      8
      9
      10
      11
      12
      13
      14
      15
      16
      17
      18
      19
      jdk/src/hotspot/share/runtime/frame.hpp
      class frame {
      private:
      // Instance variables:
      intptr_t* _sp; // stack pointer (from Thread::last_Java_sp) , java 的stack 指针
      address _pc; // program counter (the next instruction after the call) 下一个指令的指针

      CodeBlob* _cb; // CodeBlob that "owns" pc
      enum deopt_state {
      not_deoptimized,
      is_deoptimized,
      unknown
      };

      deopt_state _deopt_state;

      ...

      };
    • +
    +

    bytecode

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    enum TosState {         // describes the tos cache contents
    btos = 0, // byte, bool tos cached
    ztos = 1, // byte, bool tos cached
    ctos = 2, // char tos cached
    stos = 3, // short tos cached
    itos = 4, // int tos cached
    ltos = 5, // long tos cached
    ftos = 6, // float tos cached
    dtos = 7, // double tos cached
    atos = 8, // object cached
    vtos = 9, // tos not cached
    number_of_states,
    ilgl // illegal state: should not occur
    };
    +

    iload

    + + + + + + + + + + + + +
    bytecodeenumasm
    iload21
    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    $65 = (address) 0x7fffe1012693 "A\017\266]\002\203\373\025\017\204J"
    (gdb) x/20i 0x7fffe1012693
    0x7fffe1012693: movzbl 0x2(%r13),%ebx
    0x7fffe1012698: cmp $0x15,%ebx <--- 下一个bytecode
    0x7fffe101269b: je 0x7fffe10126eb <-- 跳转到 done
    0x7fffe10126a1: cmp $0xe0,%ebx <-- 判断下一个是否是_fast_iload
    0x7fffe10126a7: mov $0xe1,%ecx <------ 下一个是_fast_iload 则重写成fast_iload2
    0x7fffe10126ac: je 0x7fffe10126bd <-------- 跳转到rewrite label
    0x7fffe10126ae: cmp $0x34,%ebx
    0x7fffe10126b1: mov $0xe2,%ecx
    0x7fffe10126b6: je 0x7fffe10126bd
    0x7fffe10126b8: mov $0xe0,%ecx
    0x7fffe10126bd: movzbl 0x0(%r13),%ebx
    0x7fffe10126c2: cmp $0x15,%ebx
    0x7fffe10126c5: je 0x7fffe10126e7
    0x7fffe10126cb: cmp %ecx,%ebx
    0x7fffe10126cd: je 0x7fffe10126e7
    0x7fffe10126d3: movabs $0x7ffff74ef9d7,%rdi
    0x7fffe10126dd: and $0xfffffffffffffff0,%rsp
    0x7fffe10126e1: call 0x7ffff694f3c0 <_ZN14MacroAssembler7debug64EPclPl>
    0x7fffe10126e6: hlt
    0x7fffe10126e7: mov %cl,0x0(%r13)

    + + +

    源码分析

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    38
    39
    40
    41
    42
    void TemplateTable::iload_internal(RewriteControl rc) {
    transition(vtos, itos);
    if (RewriteFrequentPairs && rc == may_rewrite) {
    Label rewrite, done;
    Register bc = r4;

    // get next bytecode
    __ load_unsigned_byte(r1, at_bcp(Bytecodes::length_for(Bytecodes::_iload)));

    // if _iload, wait to rewrite to iload2. We only want to rewrite the
    // last two iloads in a pair. Comparing against fast_iload means that
    // the next bytecode is neither an iload or a caload, and therefore
    // an iload pair.
    __ cmpw(r1, Bytecodes::_iload); <--- 下一个bytecode
    __ br(Assembler::EQ, done); <---- 跳转到done

    // if _fast_iload rewrite to _fast_iload2
    __ cmpw(r1, Bytecodes::_fast_iload); <-- 判断下一个是否是_fast_iload
    __ movw(bc, Bytecodes::_fast_iload2); <------ 下一个是_fast_iload 则重写成fast_iload2
    __ br(Assembler::EQ, rewrite); <-------- 跳转到rewrite label

    // if _caload rewrite to _fast_icaload
    __ cmpw(r1, Bytecodes::_caload);
    __ movw(bc, Bytecodes::_fast_icaload);
    __ br(Assembler::EQ, rewrite);

    // else rewrite to _fast_iload
    __ movw(bc, Bytecodes::_fast_iload);

    // rewrite
    // bc: new bytecode
    __ bind(rewrite);
    patch_bytecode(Bytecodes::_iload, bc, r1, false);
    __ bind(done);

    }

    // do iload, get the local value into tos
    locals_index(r1);
    __ ldr(r0, iaddress(r1));

    }
    + + +

    aconst_null

    + + + + + + + + + + + + +
    bytecodedescenum
    aconst_nullpush a null reference onto the stack0x01
    +
    1
    2
    3
    4
    void TemplateTable::aconst_null() {
    transition(vtos, atos);
    __ xorl(rax, rax); // rax 就是栈顶
    }
    + +

    istore

    + + + + + + + + + + + + +
    bytecodedescenum
    istoreStore int into local variable54, // 0x36
    +

    可以通过这个bytecode 了解怎么访问本地变量

    +
    1
    2
    3
    4
    5
    void TemplateTable::istore() {
    transition(itos, vtos); // 这里只是一个断言assert , 断言之前的状态是itos , 之后的状态是vtos , 实际上是由def来定义的
    locals_index(rbx); // 将偏移 也就是index 写入rbx
    __ movl(iaddress(rbx), rax); //iaddress 就是 rlocal + rbx 也就是获取最后的跳转地址 ,然后将rax写入偏移地址
    }
    +

    这里iaddress(rbx) 其实是rlocals+rbx 的偏移,也就是相对于本地变量的偏移

    +
    1
    2
    3
    static inline Address iaddress(Register r) {
    return Address(rlocals, r, Address::times_ptr);
    }
    + +

    iaddress 的源码在这里: src\hotspot\cpu\x86\assembler_x86.hpp
    调用顺序是iaddress -> Address

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    static inline Address iaddress(Register r) {
    return Address(rlocals, r, Address::times_ptr);
    }
    Address(Register base, Register index, ScaleFactor scale, int disp = 0)
    : _base (base),
    _index(index),
    _xmmindex(xnoreg),
    _scale(scale),
    _disp (disp),
    _isxmmindex(false) {
    assert(!index->is_valid() == (scale == Address::no_scale),
    "inconsistent address");
    }
    + +

    def istore展开

    前面不是看到transition(itos, vtos); , 这个transition只是一个类似测试时候的断言,真正是在def 处理的

    +
    1
    2
    def(Bytecodes::_istore              , ubcp|____|clvm|____, itos, vtos, istore              ,  _           );

    +

    下面我们看看def展开,会慢慢展开成

    +
    1
    2
    3
    4
    5
    6
    7
    void TemplateTable::def(Bytecodes::Code code, int flags, TosState in, TosState out, void (*gen)(int arg), int arg) {
    ...
    Template* t = is_wide ? template_for_wide(code) : template_for(code);
    // setup entry
    t->initialize(flags, in, out, gen, arg);
    assert(t->bytecode() == code, "just checkin'");
    }
    + + +

    这里的 in 和out 会在TemplateInterpreterGenerator::generate_and_dispatch的时候使用

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    38
    39

    //------------------------------------------------------------------------------------------------------------------------

    void TemplateInterpreterGenerator::generate_and_dispatch(Template* t, TosState tos_out) {
    #ifndef PRODUCT
    // debugging code
    if (CountBytecodes || TraceBytecodes || StopInterpreterAt > 0) count_bytecode();
    if (PrintBytecodeHistogram) histogram_bytecode(t);
    if (PrintBytecodePairHistogram) histogram_bytecode_pair(t);
    if (TraceBytecodes) trace_bytecode(t);
    if (StopInterpreterAt > 0) stop_interpreter_at();
    __ verify_FPU(1, t->tos_in());
    #endif // !PRODUCT
    int step = 0;
    if (!t->does_dispatch()) {
    step = t->is_wide() ? Bytecodes::wide_length_for(t->bytecode()) : Bytecodes::length_for(t->bytecode());
    if (tos_out == ilgl) tos_out = t->tos_out();
    // compute bytecode size
    assert(step > 0, "just checkin'");
    // setup stuff for dispatching next bytecode
    if (ProfileInterpreter && VerifyDataPointer
    && MethodData::bytecode_has_profile(t->bytecode())) {
    __ verify_method_data_pointer();
    }
    __ dispatch_prolog(tos_out, step);
    }
    // generate template
    t->generate(_masm);
    // advance
    if (t->does_dispatch()) {
    #ifdef ASSERT
    // make sure execution doesn't go beyond this point if code is broken
    __ should_not_reach_here();
    #endif // ASSERT
    } else {
    // dispatch to next bytecode
    __ dispatch_epilog(tos_out, step);
    }
    }
    + + +

    reference

    + + +
    + + + + +
    +
    +
    +
    + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    背景

    java 的字符串会设计很多编码相关的问题,全部整理一下

    +

    知识点

    Code Unit

    code unit 描述的是一个编码的的最小单位(注意一个Unicode 平面对应的字符可能由多个code unit 组成)

    + + + + + + + + + + + + + + + +
    编码unit code
    utf-81字节
    utf-162字节
    +

    java 的char

    java 的char 是2个字节,类型的范围是 0 到 2^16 - 1.

    + + +
    + + + + +
    +
    +
    +
    + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    背景

    背景

    +

    使用jdbc clickhouse,批量写入,发现cpu升高非常多,升到了90%多

    +

    排查及原因

    相关环境

    jdk: jdk11
    clickhouse 使用的sql驱动:

    +
    1
    2
    3
    <groupId>com.clickhouse</groupId>
    <artifactId>clickhouse-jdbc</artifactId>
    <version>0.3.2-patch11</version>
    + + +

    原因:
    使用mybatis plus 拼写sql,批量写入2000条
    sql 用mybatis 的xml 拼写 类似:

    +
    1
    insert into table values (row1_field1 , row1_field2 ),(row1_field1 , row1_field2) .... 这里是用mybatis 的xml foreache 2000 次
    + +

    线上warning日志:

    +
    1
    Please consider to use one and only one values expression, for example: use 'values(?)' instead of 'values(?),(?)'.
    + +

    由于jdbc 的parser 比较慢,需要将perpare语句改成以下形式:

    +
    1
    insert into table values ( ?, ? )  ## 只有一次 
    + +

    采用的是

    +
    1
    2
    3
    4
    5
    6
    7
    8
    // Note: "insert into table values(?,?)" is treated as "insert into mytable"
    try (PreparedStatement ps = conn.prepareStatement("insert into table values(?,?)")) {
    ps.setString(1, "test"); // id
    ps.setObject(2, LocalDateTime.now()); // timestamp
    ps.addBatch(); // append parameters to the query
    ...
    ps.executeBatch(); // issue the composed query: insert into mytable values(...)(...)...(...)
    }
    + + +

    结果

    cpu 从80%降低到30%以内
    优化前:

    +

    优化前

    +

    优化后:
    优化后

    +

    相关阅读

    + + +
    + + + + +
    +
    +
    +
    + + + + + + + + + + +
    + + + + +
    + + + + + + + + +
    +
    + +
    + +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/page/7/index.html b/page/7/index.html new file mode 100644 index 0000000000..877516494b --- /dev/null +++ b/page/7/index.html @@ -0,0 +1,1238 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + dinosaur + + + + + + + + + + + + +
    +
    + +
    +
    + + + + + +
    + + + + + + + + +
    + +
    + +
    +
    + + +
    + + 0% +
    + + +
    +
    +
    + + +
    + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + + + + + + + + + + + + +
    命令描述
    java -XshowSettings:all -version获取所有配置
    + + +
    + + + + +
    +
    +
    +
    + + + + + + + + + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    背景

    double checked licking 在我们构造一个单例的时候会用到。

    +

    用法

    对于一个单例,我们需要在不同线程中共享这个变量,所以

    +

    相关阅读

    + + +
    + + + + +
    +
    +
    +
    + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    Garbage-First Garbage Collection

    Garbage-First简称g1算法,是java目前可选的一个gc算法。

    +

    目标: 在一定的软实时性条件下,保证整体的吞吐

    +

    算法构成:

    +
      +
    • 堆等大小: 整个内存堆被划分为相同大小的块。
    • +
    +

    The Garbage-First collector achieves these goals via sev-
    eral techniques. The heap is partitioned into a set of equal-
    sized heap regions, much like the train cars of the Mature-
    Object Space collector of Hudson and Moss [22]. However,
    whereas the remembered sets of the Mature-Object Space
    collector are unidirectional, recording pointers from older
    regions to younger but not vice versa, Garbage-First remem-
    bered sets record pointers from all regions (with some excep-
    tions, described in sections 2.4 and 4.6). Recording all ref-
    erences allows an arbitrary set of heap regions to be chosen
    for collection. A concurrent thread processes log records cre-
    ated by special mutator write barriers to keep remembered
    sets up-to-date, allowing shorter collections.

    +

    源码分析

      +
    • Young gc
    • +
    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    38
    39
    40
    41
    42
    43
    44
    45
    46
    47
    48
    49
    50
    51
    52
    53
    54
    55
    56
    57
    58
    59
    60
    61
    62
    63
    64
    65
    66
    67
    68
    69
    70
    71
    72
    73
    74
    75
    void G1YoungCollector::collect() {
    // Do timing/tracing/statistics/pre- and post-logging/verification work not
    // directly related to the collection. They should not be accounted for in
    // collection work timing.

    // The G1YoungGCTraceTime message depends on collector state, so must come after
    // determining collector state.
    G1YoungGCTraceTime tm(this, _gc_cause);

    // JFR
    G1YoungGCJFRTracerMark jtm(gc_timer_stw(), gc_tracer_stw(), _gc_cause);
    // JStat/MXBeans
    G1MonitoringScope ms(monitoring_support(),
    false /* full_gc */,
    collector_state()->in_mixed_phase() /* all_memory_pools_affected */);
    // Create the heap printer before internal pause timing to have
    // heap information printed as last part of detailed GC log.
    G1HeapPrinterMark hpm(_g1h);
    // Young GC internal pause timing
    G1YoungGCNotifyPauseMark npm(this);

    // Verification may use the workers, so they must be set up before.
    // Individual parallel phases may override this.
    set_young_collection_default_active_worker_threads();

    // Wait for root region scan here to make sure that it is done before any
    // use of the STW workers to maximize cpu use (i.e. all cores are available
    // just to do that).
    wait_for_root_region_scanning();

    G1YoungGCVerifierMark vm(this);
    {
    // Actual collection work starts and is executed (only) in this scope.

    // Young GC internal collection timing. The elapsed time recorded in the
    // policy for the collection deliberately elides verification (and some
    // other trivial setup above).
    policy()->record_young_collection_start();

    calculate_collection_set(jtm.evacuation_info(), _target_pause_time_ms);

    G1RedirtyCardsQueueSet rdcqs(G1BarrierSet::dirty_card_queue_set().allocator());
    G1PreservedMarksSet preserved_marks_set(workers()->active_workers());
    G1ParScanThreadStateSet per_thread_states(_g1h,
    &rdcqs,
    &preserved_marks_set,
    workers()->active_workers(),
    collection_set()->young_region_length(),
    collection_set()->optional_region_length(),
    &_evac_failure_regions);

    pre_evacuate_collection_set(jtm.evacuation_info(), &per_thread_states);

    bool may_do_optional_evacuation = collection_set()->optional_region_length() != 0;
    // Actually do the work...
    evacuate_initial_collection_set(&per_thread_states, may_do_optional_evacuation);

    if (may_do_optional_evacuation) {
    evacuate_optional_collection_set(&per_thread_states);
    }
    post_evacuate_collection_set(jtm.evacuation_info(), &per_thread_states);

    // Refine the type of a concurrent mark operation now that we did the
    // evacuation, eventually aborting it.
    _concurrent_operation_is_full_mark = policy()->concurrent_operation_is_full_mark("Revise IHOP");

    // Need to report the collection pause now since record_collection_pause_end()
    // modifies it to the next state.
    jtm.report_pause_type(collector_state()->young_gc_pause_type(_concurrent_operation_is_full_mark));

    policy()->record_young_collection_end(_concurrent_operation_is_full_mark, evacuation_failed());
    }
    TASKQUEUE_STATS_ONLY(print_taskqueue_stats());
    TASKQUEUE_STATS_ONLY(reset_taskqueue_stats());
    }
    + +

    真正复制的代码

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    evacuate_live_objects(


    MAYBE_INLINE_EVACUATION
    void G1ParScanThreadState::dispatch_task(ScannerTask task) {
    verify_task(task);
    if (task.is_narrow_oop_ptr()) {
    do_oop_evac(task.to_narrow_oop_ptr());
    } else if (task.is_oop_ptr()) { //oop 复制
    do_oop_evac(task.to_oop_ptr());
    } else {
    do_partial_array(task.to_partial_array_task());
    }
    }
    + + +

    堆栈:

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    Thread 23 "GC Thread#4" hit Breakpoint 1, G1ParScanThreadState::do_copy_to_survivor_space (this=0x7fff7c000d90, region_attr=..., old=0x716809d28, old_mark=...) at /home/dai/jdk/src/hotspot/share/gc/g1/g1ParScanThreadState.cpp:443
    443 assert(region_attr.is_in_cset(),
    (gdb) bt
    #0 G1ParScanThreadState::do_copy_to_survivor_space (this=0x7fff7c000d90, region_attr=..., old=0x716809d28, old_mark=...) at /home/dai/jdk/src/hotspot/share/gc/g1/g1ParScanThreadState.cpp:443
    #1 0x00007ffff64ab3f6 in G1ParScanThreadState::copy_to_survivor_space (this=0x7fff7c000d90, region_attr=..., old=0x716809d28, old_mark=...)
    at /home/dai/jdk/src/hotspot/share/gc/g1/g1ParScanThreadState.cpp:555
    #2 0x00007ffff64de15e in G1ParCopyClosure<(G1Barrier)0, false>::do_oop_work<oopDesc*> (this=0x7fff7c001478, p=0x7ffff02e1cc8) at /home/dai/jdk/src/hotspot/share/gc/g1/g1OopClosures.inline.hpp:240
    #3 0x00007ffff64dcbc3 in G1ParCopyClosure<(G1Barrier)0, false>::do_oop (this=0x7fff7c001478, p=0x7ffff02e1cc8) at /home/dai/jdk/src/hotspot/share/gc/g1/g1OopClosures.hpp:167
    #4 0x00007ffff6546dd8 in chunk_oops_do (f=0x7fff7c001478, chunk=0x7ffff02e1cb0, chunk_top=0x7ffff02e1cd0 "\350\034\200\026\a") at /home/dai/jdk/src/hotspot/share/runtime/handles.cpp:100
    #5 0x00007ffff6546e23 in HandleArea::oops_do (this=0x7ffff02e1c30, f=0x7fff7c001478) at /home/dai/jdk/src/hotspot/share/runtime/handles.cpp:108
    #6 0x00007ffff6d85dd4 in Thread::oops_do_no_frames (this=0x7ffff02e1160, f=0x7fff7c001478, cf=0x7fff7c001520) at /home/dai/jdk/src/hotspot/share/runtime/thread.cpp:550
    #7 0x00007ffff6d8a513 in JavaThread::oops_do_no_frames (this=0x7ffff02e1160, f=0x7fff7c001478, cf=0x7fff7c001520) at /home/dai/jdk/src/hotspot/share/runtime/thread.cpp:1968
    #8 0x00007ffff6d85e28 in Thread::oops_do (this=0x7ffff02e1160, f=0x7fff7c001478, cf=0x7fff7c001520) at /home/dai/jdk/src/hotspot/share/runtime/thread.cpp:580
    #9 0x00007ffff6d91359 in ParallelOopsDoThreadClosure::do_thread (this=0x7fff87dfaa00, t=0x7ffff02e1160) at /home/dai/jdk/src/hotspot/share/runtime/thread.cpp:3620
    #10 0x00007ffff6d8c40b in Threads::possibly_parallel_threads_do (is_par=true, tc=0x7fff87dfaa00) at /home/dai/jdk/src/hotspot/share/runtime/thread.cpp:2545
    #11 0x00007ffff6d8eac8 in Threads::possibly_parallel_oops_do (is_par=true, f=0x7fff7c001478, cf=0x7fff7c001520) at /home/dai/jdk/src/hotspot/share/runtime/thread.cpp:3626
    #12 0x00007ffff64dea53 in G1RootProcessor::process_java_roots (this=0x7fffc9723df0, closures=0x7fff7c001470, phase_times=0x7fffb8001380, worker_id=1)
    at /home/dai/jdk/src/hotspot/share/gc/g1/g1RootProcessor.cpp:183
    #13 0x00007ffff64de78e in G1RootProcessor::evacuate_roots (this=0x7fffc9723df0, pss=0x7fff7c000d90, worker_id=1) at /home/dai/jdk/src/hotspot/share/gc/g1/g1RootProcessor.cpp:60
    #14 0x00007ffff64f06b8 in G1EvacuateRegionsTask::scan_roots (this=0x7fffc9723f50, pss=0x7fff7c000d90, worker_id=1) at /home/dai/jdk/src/hotspot/share/gc/g1/g1YoungCollector.cpp:706
    #15 0x00007ffff64f0632 in G1EvacuateRegionsBaseTask::work (this=0x7fffc9723f50, worker_id=1) at /home/dai/jdk/src/hotspot/share/gc/g1/g1YoungCollector.cpp:693
    #16 0x00007ffff6e8bb7c in WorkerTaskDispatcher::worker_run_task (this=0x7ffff00a4c88) at /home/dai/jdk/src/hotspot/share/gc/shared/workerThread.cpp:67
    #17 0x00007ffff6e8c074 in WorkerThread::run (this=0x7fffb800df30) at /home/dai/jdk/src/hotspot/share/gc/shared/workerThread.cpp:159
    #18 0x00007ffff6d8557f in Thread::call_run (this=0x7fffb800df30) at /home/dai/jdk/src/hotspot/share/runtime/thread.cpp:358
    #19 0x00007ffff6acc1e7 in thread_native_entry (thread=0x7fffb800df30) at /home/dai/jdk/src/hotspot/os/linux/os_linux.cpp:705
    #20 0x00007ffff7c94ac3 in start_thread (arg=<optimized out>) at ./nptl/pthread_create.c:442
    #21 0x00007ffff7d26a40 in clone3 () at ../sysdeps/unix/sysv/linux/x86_64/clone3.S:81

    + +

    内存复制:

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    static void pd_disjoint_words(const HeapWord* from, HeapWord* to, size_t count) {
    #ifdef AMD64
    switch (count) {
    case 8: to[7] = from[7];
    case 7: to[6] = from[6];
    case 6: to[5] = from[5];
    case 5: to[4] = from[4];
    case 4: to[3] = from[3];
    case 3: to[2] = from[2];
    case 2: to[1] = from[1];
    case 1: to[0] = from[0];
    case 0: break;
    default:
    (void)memcpy(to, from, count * HeapWordSize);
    break;
    }
    #else
    // Includes a zero-count check.
    intx temp;
    __asm__ volatile(" testl %6,%6 ;"
    " jz 3f ;"
    " cmpl $32,%6 ;"
    " ja 2f ;"
    " subl %4,%1 ;"
    "1: movl (%4),%3 ;"
    " movl %7,(%5,%4,1);"
    " addl $4,%0 ;"
    " subl $1,%2 ;"
    " jnz 1b ;"
    " jmp 3f ;"
    "2: rep; smovl ;"
    "3: nop "
    : "=S" (from), "=D" (to), "=c" (count), "=r" (temp)
    : "0" (from), "1" (to), "2" (count), "3" (temp)
    : "memory", "cc");
    #endif // AMD64
    }
    + +

    相关阅读

    + + +
    + + + + +
    +
    +
    +
    + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    背景

    使用k8s 搭建redis集群

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    root@redis-app-0:/data# redis-cli 
    127.0.0.1:6379> info cluster
    # Cluster
    cluster_enabled:1
    127.0.0.1:6379> cluster meet 10.42.0.35 6379
    OK
    127.0.0.1:6379> cluster meet 10.42.0.36 6379
    OK
    127.0.0.1:6379> cluster meet 10.42.0.37 6379
    OK
    127.0.0.1:6379> cluster meet 10.42.0.38 6379
    OK
    127.0.0.1:6379> cluster meet 10.42.0.39 6379
    OK
    + + +

    查看节点

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    27.0.0.1:6379> cluster nodes
    f8d5dd6aef17c622f541ade32a95430421606f6c 10.42.0.39:6379 master - 0 1673791234512 0 connected
    76af8c3c32cf535a3733ce75db2c3c6719c644fc 10.42.0.38:6379 master - 0 1673791234512 4 connected
    7f803ec0f21e4382bb773285fd40286069b26075 10.42.0.36:6379 master - 0 1673791235012 2 connected
    c48e86d680b74df9c70cb7369201fb2cbd8650be 10.42.0.34:6379 myself,master - 0 0 5 connected
    3ab965513f345444689cdeb7358c51263772f454 10.42.0.35:6379 master - 0 1673791233512 1 connected
    8e256e1614cf9f330404693b6f18785da5794fbc 10.42.0.37:6379 master - 0 1673791234011 3 connected
    127.0.0.1:6379>

    + +

    分配槽位:

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    10.42.0.39:6379> cluster nodes
    f8d5dd6aef17c622f541ade32a95430421606f6c 10.42.0.39:6379 myself,slave 76af8c3c32cf535a3733ce75db2c3c6719c644fc 0 0 0 connected
    7f803ec0f21e4382bb773285fd40286069b26075 10.42.0.36:6379 master - 0 1673791666954 2 connected
    8e256e1614cf9f330404693b6f18785da5794fbc 10.42.0.37:6379 slave 7f803ec0f21e4382bb773285fd40286069b26075 0 1673791668456 3 connected
    76af8c3c32cf535a3733ce75db2c3c6719c644fc 10.42.0.38:6379 master - 0 1673791667955 4 connected
    3ab965513f345444689cdeb7358c51263772f454 10.42.0.35:6379 slave c48e86d680b74df9c70cb7369201fb2cbd8650be 0 1673791668955 5 connected
    c48e86d680b74df9c70cb7369201fb2cbd8650be 10.42.0.34:6379 master - 0 1673791666954 5 connected
    10.42.0.39:6379> redis-cli -h 10.42.0.34 -p 6379 cluster addslots {0..5461}
    (error) ERR unknown command 'redis-cli'
    10.42.0.39:6379> exit
    root@redis-app-0:/data# redis-cli -h 10.42.0.34 -p 6379 cluster addslots {0..5461}
    OK
    root@redis-app-0:/data# redis-cli -h 10.42.0.36 -p 6379 cluster addslots {5462..10922}
    OK
    root@redis-app-0:/data# redis-cli -h 10.42.0.38 -p 6379 cluster addslots {10923..16383}

    + +

    搭建结果:

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    127.0.0.1:6379> cluster info
    cluster_state:ok
    cluster_slots_assigned:16384
    cluster_slots_ok:16384
    cluster_slots_pfail:0
    cluster_slots_fail:0
    cluster_known_nodes:6
    cluster_size:3
    cluster_current_epoch:5
    cluster_my_epoch:5
    cluster_stats_messages_sent:3134
    cluster_stats_messages_received:3134

    + +

    相关阅读

    + + +
    + + + + +
    +
    +
    +
    + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    背景

    线上环境请求clickhouse 400 bad request

    +

    问题排查

    线上clickhouse导出的是http端口

    +

    线上java client抛出400异常:

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    	ru.yandex.clickhouse.except.ClickHouseException: ClickHouse exception, code: 400, host: internal-LB-clickhouse-write-intranet-429192233.us-west-2.elb.amazonaws.com, port: 8123; HTTP/1.1 400 Bad Request
    at ru.yandex.clickhouse.except.ClickHouseExceptionSpecifier.specify(ClickHouseExceptionSpecifier.java:60)
    at ru.yandex.clickhouse.except.ClickHouseExceptionSpecifier.specify(ClickHouseExceptionSpecifier.java:26)
    at ru.yandex.clickhouse.ClickHouseStatementImpl.checkForErrorAndThrow(ClickHouseStatementImpl.java:1080)
    at ru.yandex.clickhouse.ClickHouseStatementImpl.getInputStream(ClickHouseStatementImpl.java:770)
    at ru.yandex.clickhouse.ClickHouseStatementImpl.getLastInputStream(ClickHouseStatementImpl.java:693)
    Caused by: java.lang.IllegalStateException: HTTP/1.1 400 Bad Request
    at ru.yandex.clickhouse.ClickHouseStatementImpl.checkForErrorAndThrow(ClickHouseStatementImpl.java:1080)
    at ru.yandex.clickhouse.ClickHouseStatementImpl.getInputStream(ClickHouseStatementImpl.java:770)
    at ru.yandex.clickhouse.ClickHouseStatementImpl.getLastInputStream(ClickHouseStatementImpl.java:693)
    at ru.yandex.clickhouse.ClickHouseStatementImpl.executeQuery(ClickHouseStatementImpl.java:341)
    at ru.yandex.clickhouse.ClickHouseStatementImpl.executeQuery(ClickHouseStatementImpl.java:326)
    + +

    打开clickhouse日志,发现有以下报错:

    +
    1
    2023.01.12 09:24:26.187479 [ 205027 ] {65179d49-ea6e-4a15-b13b-16d8378cfe29} <Error> executeQuery: Code: 62, e.displayText() = DB::Exception: Syntax error: failed at position 72 ('Client'): Client as DEFAULT_VALUE, Application name as DESCRIPTION union all select CustomHttpHeaders as NAME, toInt32(0) as MAX_LEN,  as DEFAULT_VALUE, Custom HTTP heade. Expected one of: UNION, LIMIT, WHERE, WINDOW, end of query, HAVING, GROUP BY, INTO OUTFILE, OFFSET, PREWHERE, Comma, ORDER BY, SETTINGS, FROM, FORMAT, WITH, token (version 21.3.3.14 (official build)) (from 172.31.42.57:23878) (in query: select ApplicationName as NAME, toInt32(0) as MAX_LEN, ClickHouse Java Client as DEFAULT_VALUE, Application name as DESCRIPTION union all select CustomHttpHeaders as NAME, toInt32(0) as MAX_LEN, as DEFAULT_VALUE, Custom HTTP headers as DESCRIPTION union all select CustomHttpParameters as NAME, toInt32(0) as MAX_LEN, as DEFAULT_VALUE, Customer HTTP query parameters as DESCRIPTION), Stack trace (when copying this message, always include the lines below):
    + + +

    最后在github上找到相关issue:
    https://github.com/dbeaver/dbeaver/issues/16885
    https://github.com/ClickHouse/clickhouse-jdbc/pull/930

    +

    发现是clickhouse的jdbc 包的sql错了

    +

    ClickHouse Java Client as DEFAULT_VALUE 应该改为 'ClickHouse Java Client' as DEFAULT_VALUE

    +

    我们用的jdbc包是0.3.2

    +
    1
    2
    3
    4
    5
    <dependency>
    <groupId>ru.yandex.clickhouse</groupId>
    <artifactId>clickhouse-jdbc</artifactId>
    <version>0.3.2</version>
    </dependency>
    + +

    将clickhouse jdbc 包改成0.3.2-patch-1 即可.
    其实可以用更加新的包,自己选择吧.

    + + +
    + + + + +
    +
    +
    +
    + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    背景

    测试环境有个old generation 非常占用内存.所以需要排查

    +

    过程

    问题出现

    同事压测的时候,发现old generation 占用内存非常大
    例子是下图的样子:
    java_gc_generation

    +

    相关配置:

    +
      +
    • jdk 11
    • +
    • 最大堆内存2g
    • +
    • gc算法用默认的g1
    • +
    +

    排查问题

    dump 文件

    使用jamp命令将java的内存dump出来

    +
    1
    2
    jmap -dump:format=b,,live,file=<file-path> <pid> 

    + +

    mat工具分析

    然后去下载mat工具下载地址
    安装之后打开,点击Dominator Tree:
    Dominator Tree

    +

    就可以看到对象以及大小

    +

    Dominator Tree detail

    +

    可以看到有一个900m大小的HashSet , 这是一个去重的set , 每次都会往这里塞入设备id ,当循环结束,会自动不会再被引用,然后会被gc回收.

    +

    结论

    这不是内存泄漏,而是一个有1kw 数据的的大set ,也就是一个大对象

    +

    我们找运维调大了最大的堆内存,问题解决

    +

    后续优化: 后续我们打算不使用HashSet 塞字符串去重,而是用布隆过滤器去重

    + + +
    + + + + +
    +
    +
    +
    + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    背景

    排查一个gc问题的时候想到需要了解java的对象大小

    +

    举例这样创建一个1099 的对象会占用多少个字节呢?

    +

    那如果是一个特定的对象NumClass

    +
    1
    2
    3
    4
    5
    public class NumClass{
    public int num1 ;
    public int num2 ;
    }

    + +

    那样创建NunClass[] 又占用多少字节呢?

    +
    1
    2
    3
    4
    ## 这样 的array占用多少字节呢? 
    Object[] array = new Object[10245] ;
    ## 这样 的array占用多少字节呢?
    Object[] array = new NumClass[10245] ;
    + +

    结论是两者占用的字节是一样的

    +

    object 数组例子

    先上最简单的new Object的代码,在这个例子里面: 创建了一个长度是10245的对象数组

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    package com;
    public class Hello{
    public static volatile Object[] arr ;
    public static void main(String [] argv){
    arr = new Object[10245]; // 创建一个对象数组 , 数组也是一个对象 , 那么这个对象有多大呢?
    arr[1] = "hihi";

    }
    }
    + +

    实际上是在64位机器上,没有开启指针压缩的情况下是: 8+ 4 + 10245*8个字节长度

    +

    oop 是指针oopDesc* 的别名

    +
    1
    2
    3
    4
    5
    typedef class oopDesc*                    oop;
    typedef class instanceOopDesc* instanceOop;
    typedef class arrayOopDesc* arrayOop;
    typedef class objArrayOopDesc* objArrayOop;
    typedef class typeArrayOopDesc* typeArrayOop;
    + +
    1
    2
    3
    4
    5
    +-----------++----------++-------++------+           
    | || || || |
    | || || || |
    | MarkWord || length || oop || oop |
    +-----------++----------++-------++------+ repeat 10254 次
    + + + + +

    jol

    核心函数:

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    jol\jol-core\src\main\java\org\openjdk\jol\layouters\CurrentLayouter.java

    @Override
    public ClassLayout layout(ClassData data) {
    VirtualMachine vm = VM.current();

    if (data.isArray()) {
    // special case of arrays
    int base = vm.arrayBaseOffset(data.arrayComponentType());
    int scale = vm.arrayIndexScale(data.arrayComponentType());

    long instanceSize = MathUtil.align(base + data.arrayLength() * scale, vm.objectAlignment());

    SortedSet<FieldLayout> result = new TreeSet<>();
    result.add(new FieldLayout(FieldData.create(data.arrayClass(), "<elements>", data.arrayComponentType()), base, scale * data.arrayLength()));
    return ClassLayout.create(data, result, CURRENT, instanceSize, false);
    }

    Collection<FieldData> fields = data.fields();

    SortedSet<FieldLayout> result = new TreeSet<>();
    for (FieldData f : fields) {
    result.add(new FieldLayout(f, vm.fieldOffset(f.refField()), vm.sizeOfField(f.typeClass())));
    }

    long instanceSize;
    if (result.isEmpty()) {
    instanceSize = vm.objectHeaderSize();
    } else {
    FieldLayout f = result.last();
    instanceSize = f.offset() + f.size();
    // TODO: This calculation is incorrect if there is a trailing @Contended field, or the instance is @Contended
    }
    instanceSize = MathUtil.align(instanceSize, vm.objectAlignment());
    return ClassLayout.create(data, result, CURRENT, instanceSize, true);
    }
    + +
    1
    2
    3
    4
    static int length_offset_in_bytes() {
    return UseCompressedClassPointers ? klass_gap_offset_in_bytes() :
    sizeof(arrayOopDesc);
    }
    + + +

    创建一个数组

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    38
    39
    40
    41
    42
    43
    44
    45
    (gdb) bt
    #0 arrayOopDesc::length_offset_in_bytes () at /var/jdk/src/hotspot/share/oops/arrayOop.hpp:83
    #1 0x00007f93c2fdc06e in arrayOopDesc::length_addr_impl (obj_ptr=0x7162010c0) at /var/jdk/src/hotspot/share/oops/arrayOop.hpp:67
    #2 0x00007f93c2fdc0ba in arrayOopDesc::length (this=0x7162010c0) at /var/jdk/src/hotspot/share/oops/arrayOop.hpp:114
    #3 0x00007f93c3239521 in arrayOopDesc::is_within_bounds (this=0x7162010c0, index=10) at /var/jdk/src/hotspot/share/oops/arrayOop.hpp:110
    #4 0x00007f93c3239690 in typeArrayOopDesc::byte_at_addr (this=0x7162010c0, which=10) at /var/jdk/src/hotspot/share/oops/typeArrayOop.inline.hpp:48
    #5 0x00007f93c323972d in typeArrayOopDesc::byte_at_put (this=0x7162010c0, which=10, contents=112 'p') at /var/jdk/src/hotspot/share/oops/typeArrayOop.inline.hpp:96
    #6 0x00007f93c389e259 in java_lang_String::create_from_unicode (unicode=0x7f93bc037c88, length=33, __the_thread__=0x7f93bc028fa0) at /var/jdk/src/hotspot/share/classfile/javaClasses.cpp:300
    #7 0x00007f93c3f7b91d in StringTable::do_intern (string_or_null_h=..., name=0x7f93bc037c88, len=33, hash=2694772907, __the_thread__=0x7f93bc028fa0) at /var/jdk/src/hotspot/share/classfile/stringTable.cpp:347
    #8 0x00007f93c3f7b87e in StringTable::intern (string_or_null_h=..., name=0x7f93bc037c88, len=33, __the_thread__=0x7f93bc028fa0) at /var/jdk/src/hotspot/share/classfile/stringTable.cpp:336
    #9 0x00007f93c3f7b5b0 in StringTable::intern (symbol=0x7f93c0018d38, __the_thread__=0x7f93bc028fa0) at /var/jdk/src/hotspot/share/classfile/stringTable.cpp:296
    #10 0x00007f93c35740b5 in ConstantPool::uncached_string_at (this=0x7f938c02e0a0, which=250, __the_thread__=0x7f93bc028fa0) at /var/jdk/src/hotspot/share/oops/constantPool.cpp:1171
    #11 0x00007f93c36a19cd in fieldDescriptor::string_initial_value (this=0x7f93c2bd3a68, __the_thread__=0x7f93bc028fa0) at /var/jdk/src/hotspot/share/runtime/fieldDescriptor.cpp:103
    #12 0x00007f93c389ff2d in initialize_static_string_field (fd=0x7f93c2bd3a68, mirror=..., __the_thread__=0x7f93bc028fa0) at /var/jdk/src/hotspot/share/classfile/javaClasses.cpp:809
    #13 0x00007f93c38a0444 in initialize_static_field (fd=0x7f93c2bd3a68, mirror=..., __the_thread__=0x7f93bc028fa0) at /var/jdk/src/hotspot/share/classfile/javaClasses.cpp:866
    #14 0x00007f93c3869f51 in InstanceKlass::do_local_static_fields (this=0x8000431a0, f=0x7f93c38a0365 <initialize_static_field(fieldDescriptor*, Handle, JavaThread*)>, mirror=..., __the_thread__=0x7f93bc028fa0) at /var/jdk/src/hotspot/share/oops/instanceKlass.cpp:1672
    #15 0x00007f93c38a08d5 in java_lang_Class::initialize_mirror_fields (k=0x8000431a0, mirror=..., protection_domain=..., classData=..., __the_thread__=0x7f93bc028fa0) at /var/jdk/src/hotspot/share/classfile/javaClasses.cpp:930
    #16 0x00007f93c38a10d1 in java_lang_Class::create_mirror (k=0x8000431a0, class_loader=..., module=..., protection_domain=..., classData=..., __the_thread__=0x7f93bc028fa0) at /var/jdk/src/hotspot/share/classfile/javaClasses.cpp:1035
    #17 0x00007f93c34c6144 in ClassFileParser::fill_instance_klass (this=0x7f93c2bd3dd0, ik=0x8000431a0, changed_by_loadhook=false, cl_inst_info=..., __the_thread__=0x7f93bc028fa0) at /var/jdk/src/hotspot/share/classfile/classFileParser.cpp:5426
    #18 0x00007f93c34c532e in ClassFileParser::create_instance_klass (this=0x7f93c2bd3dd0, changed_by_loadhook=false, cl_inst_info=..., __the_thread__=0x7f93bc028fa0) at /var/jdk/src/hotspot/share/classfile/classFileParser.cpp:5255
    #19 0x00007f93c3b554d1 in KlassFactory::create_from_stream (stream=0x7f93bc036fb0, name=0x7f93c00001f8, loader_data=0x7f93bc121170, cl_info=..., __the_thread__=0x7f93bc028fa0) at /var/jdk/src/hotspot/share/classfile/klassFactory.cpp:202
    #20 0x00007f93c34d86e9 in ClassLoader::load_class (name=0x7f93c00001f8, search_append_only=false, __the_thread__=0x7f93bc028fa0) at /var/jdk/src/hotspot/share/classfile/classLoader.cpp:1231
    #21 0x00007f93c4000806 in SystemDictionary::load_instance_class_impl (class_name=0x7f93c00001f8, class_loader=..., __the_thread__=0x7f93bc028fa0) at /var/jdk/src/hotspot/share/classfile/systemDictionary.cpp:1289
    #22 0x00007f93c4000ba3 in SystemDictionary::load_instance_class (name_hash=2036240099, name=0x7f93c00001f8, class_loader=..., __the_thread__=0x7f93bc028fa0) at /var/jdk/src/hotspot/share/classfile/systemDictionary.cpp:1354
    #23 0x00007f93c3ffeca9 in SystemDictionary::resolve_instance_class_or_null (name=0x7f93c00001f8, class_loader=..., protection_domain=..., __the_thread__=0x7f93bc028fa0) at /var/jdk/src/hotspot/share/classfile/systemDictionary.cpp:723
    #24 0x00007f93c3ffd82e in SystemDictionary::resolve_instance_class_or_null_helper (class_name=0x7f93c00001f8, class_loader=..., protection_domain=..., __the_thread__=0x7f93bc028fa0) at /var/jdk/src/hotspot/share/classfile/systemDictionary.cpp:294
    #25 0x00007f93c3ffd6d4 in SystemDictionary::resolve_or_null (class_name=0x7f93c00001f8, class_loader=..., protection_domain=..., __the_thread__=0x7f93bc028fa0) at /var/jdk/src/hotspot/share/classfile/systemDictionary.cpp:277
    #26 0x00007f93c3ffd617 in SystemDictionary::resolve_or_fail (class_name=0x7f93c00001f8, class_loader=..., protection_domain=..., throw_error=true, __the_thread__=0x7f93bc028fa0) at /var/jdk/src/hotspot/share/classfile/systemDictionary.cpp:263
    #27 0x00007f93c32b8d98 in SystemDictionary::resolve_or_fail (class_name=0x7f93c00001f8, throw_error=true, __the_thread__=0x7f93bc028fa0) at /var/jdk/src/hotspot/share/classfile/systemDictionary.hpp:100
    #28 0x00007f93c40dca98 in vmClasses::resolve (id=vmClassID::Throwable_klass_knum, __the_thread__=0x7f93bc028fa0) at /var/jdk/src/hotspot/share/classfile/vmClasses.cpp:99
    #29 0x00007f93c40dcb96 in vmClasses::resolve_until (limit_id=vmClassID::SoftReference_klass_knum, start_id=@0x7f93c2bd48f0: vmClassID::Cloneable_klass_knum, __the_thread__=0x7f93bc028fa0) at /var/jdk/src/hotspot/share/classfile/vmClasses.cpp:108
    #30 0x00007f93c40dd59a in vmClasses::resolve_through (last_id=vmClassID::Reference_klass_knum, start_id=@0x7f93c2bd48f0: vmClassID::Cloneable_klass_knum, __the_thread__=0x7f93bc028fa0) at /var/jdk/src/hotspot/share/classfile/vmClasses.hpp:64
    #31 0x00007f93c40dce23 in vmClasses::resolve_all (__the_thread__=0x7f93bc028fa0) at /var/jdk/src/hotspot/share/classfile/vmClasses.cpp:168
    #32 0x00007f93c4001ab2 in SystemDictionary::initialize (__the_thread__=0x7f93bc028fa0) at /var/jdk/src/hotspot/share/classfile/systemDictionary.cpp:1655
    #33 0x00007f93c40812fb in Universe::genesis (__the_thread__=0x7f93bc028fa0) at /var/jdk/src/hotspot/share/memory/universe.cpp:335
    #34 0x00007f93c408378f in universe2_init () at /var/jdk/src/hotspot/share/memory/universe.cpp:937
    #35 0x00007f93c3863d8a in init_globals () at /var/jdk/src/hotspot/share/runtime/init.cpp:132
    #36 0x00007f93c404a1ca in Threads::create_vm (args=0x7f93c2bd4d50, canTryAgain=0x7f93c2bd4c5b) at /var/jdk/src/hotspot/share/runtime/thread.cpp:2843
    #37 0x00007f93c396f43b in JNI_CreateJavaVM_inner (vm=0x7f93c2bd4da8, penv=0x7f93c2bd4db0, args=0x7f93c2bd4d50) at /var/jdk/src/hotspot/share/prims/jni.cpp:3613
    #38 0x00007f93c396f787 in JNI_CreateJavaVM (vm=0x7f93c2bd4da8, penv=0x7f93c2bd4db0, args=0x7f93c2bd4d50) at /var/jdk/src/hotspot/share/prims/jni.cpp:3701
    #39 0x00007f93c50e6a6a in InitializeJVM (pvm=0x7f93c2bd4da8, penv=0x7f93c2bd4db0, ifn=0x7f93c2bd4e00) at /var/jdk/src/java.base/share/native/libjli/java.c:1459
    #40 0x00007f93c50e35ec in JavaMain (_args=0x7ffedd44e1a0) at /var/jdk/src/java.base/share/native/libjli/java.c:411
    #41 0x00007f93c50ea5ec in ThreadJavaMain (args=0x7ffedd44e1a0) at /var/jdk/src/java.base/unix/native/libjli/java_md.c:651
    #42 0x00007f93c4f45b43 in start_thread (arg=<optimized out>) at ./nptl/pthread_create.c:442
    #43 0x00007f93c4fd6bb4 in clone () at ../sysdeps/unix/sysv/linux/x86_64/clone.S:100
    + + + +

    分配数组大小:

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
      static int array_size(int length) {
    const uint OopsPerHeapWord = HeapWordSize/heapOopSize;
    assert(OopsPerHeapWord >= 1 && (HeapWordSize % heapOopSize == 0),
    "Else the following (new) computation would be in error");
    uint res = ((uint)length + OopsPerHeapWord - 1)/OopsPerHeapWord;
    #ifdef ASSERT
    // The old code is left in for sanity-checking; it'll
    // go away pretty soon. XXX
    // Without UseCompressedOops, this is simply:
    // oop->length() * HeapWordsPerOop;
    // With narrowOops, HeapWordsPerOop is 1/2 or equal 0 as an integer.
    // The oop elements are aligned up to wordSize
    const uint HeapWordsPerOop = heapOopSize/HeapWordSize;
    uint old_res;
    if (HeapWordsPerOop > 0) {
    old_res = length * HeapWordsPerOop;
    } else {
    old_res = align_up((uint)length, OopsPerHeapWord)/OopsPerHeapWord;
    }
    assert(res == old_res, "Inconsistency between old and new.");
    #endif // ASSERT
    return res;
    }
    + + + +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    Thread 2 "java" hit Breakpoint 5, jni_invoke_static (env=0x7fca48029310, result=0x7fca4c534bf0, receiver=0x0, call_type=JNI_STATIC, method_id=0x7fca48542d50, args=0x7fca4c534c60, __the_thread__=0x7fca48029030) at /var/jdk/src/hotspot/share/prims/jni.cpp:881
    881 args->push_arguments_on(&java_args);
    (gdb) p method._value->print()
    {method}
    - this oop: 0x00007fca14411240
    - method holder: 'com/Hello'
    - constants: 0x00007fca14411020 constant pool [20] {0x00007fca14411020} for 'com/Hello' cache=0x00007fca14411348
    - access: 0x9 public static
    - name: 'main'
    - signature: '([Ljava/lang/String;)V'
    - max stack: 2
    - max locals: 1
    - size of params: 1
    - method size: 13
    - vtable index: -2
    - i2i entry: 0x00007fca3900dc00
    - adapters: AHE@0x00007fca4812b8d0: 0xb i2c: 0x00007fca39114d60 c2i: 0x00007fca39114e1a c2iUV: 0x00007fca39114de4 c2iNCI: 0x00007fca39114e57
    - compiled entry 0x00007fca39114e1a
    - code size: 10
    - code start: 0x00007fca14411230
    - code end (excl): 0x00007fca1441123a
    - checked ex length: 0
    - linenumber start: 0x00007fca1441123a
    - localvar length: 0
    $17 = void
    (gdb) bt
    #0 jni_invoke_static (env=0x7fca48029310, result=0x7fca4c534bf0, receiver=0x0, call_type=JNI_STATIC, method_id=0x7fca48542d50, args=0x7fca4c534c60, __the_thread__=0x7fca48029030) at /var/jdk/src/hotspot/share/prims/jni.cpp:881
    #1 0x00007fca4d2c141c in jni_CallStaticVoidMethod (env=0x7fca48029310, cls=0x7fca4802b368, methodID=0x7fca48542d50) at /var/jdk/src/hotspot/share/prims/jni.cpp:1710
    #2 0x00007fca4ea4415e in JavaMain (_args=0x7fff1ad56b60) at /var/jdk/src/java.base/share/native/libjli/java.c:545
    #3 0x00007fca4ea4a5ec in ThreadJavaMain (args=0x7fff1ad56b60) at /var/jdk/src/java.base/unix/native/libjli/java_md.c:651
    #4 0x00007fca4e8a5b43 in start_thread (arg=<optimized out>) at ./nptl/pthread_create.c:442
    #5 0x00007fca4e936bb4 in clone () at ../sysdeps/unix/sysv/linux/x86_64/clone.S:100
    + + + +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    38
    39
    40
    41
    42
    Thread 2 "java" hit Breakpoint 5, jni_invoke_static (env=0x7f8674029310, result=0x7f86791b0bf0, receiver=0x0, call_type=JNI_STATIC, method_id=0x7f8674652560, args=0x7f86791b0c60, __the_thread__=0x7f8674029030) at /var/jdk/src/hotspot/share/prims/jni.cpp:881
    881 args->push_arguments_on(&java_args);
    (gdb) p method._value->print()
    {method}
    - this oop: 0x00007f8644411240
    - method holder: 'com/Hello'
    - constants: 0x00007f8644411020 constant pool [20] {0x00007f8644411020} for 'com/Hello' cache=0x00007f8644411348
    - access: 0x9 public static
    - name: 'main'
    - signature: '([Ljava/lang/String;)V'
    - max stack: 2
    - max locals: 1
    - size of params: 1
    - method size: 13
    - vtable index: -2
    - i2i entry: 0x00007f866500dc00
    - adapters: AHE@0x00007f867412b8d0: 0xb i2c: 0x00007f8665114d60 c2i: 0x00007f8665114e1a c2iUV: 0x00007f8665114de4 c2iNCI: 0x00007f8665114e57
    - compiled entry 0x00007f8665114e1a
    - code size: 10
    - code start: 0x00007f8644411230
    - code end (excl): 0x00007f864441123a
    - checked ex length: 0
    - linenumber start: 0x00007f864441123a
    - localvar length: 0
    $23 = void
    (gdb) enable 1
    (gdb) c
    Continuing.

    Thread 2 "java" hit Breakpoint 1, oopFactory::new_objArray (klass=0x800040f80, length=2019, __the_thread__=0x7f8674029030) at /var/jdk/src/hotspot/share/memory/oopFactory.cpp:118
    118 assert(klass->is_klass(), "must be instance class");
    (gdb) bt
    #0 oopFactory::new_objArray (klass=0x800040f80, length=2019, __the_thread__=0x7f8674029030) at /var/jdk/src/hotspot/share/memory/oopFactory.cpp:118
    #1 0x00007f8679e68a5b in InterpreterRuntime::anewarray (current=0x7f8674029030, pool=0x7f8644411020, index=2, size=2019) at /var/jdk/src/hotspot/share/interpreter/interpreterRuntime.cpp:254
    #2 0x00007f8665024083 in ?? ()
    #3 0x00007f867b4520a0 in TemplateInterpreter::_active_table () from /var/jdk/build/linux-x86_64-server-slowdebug/jdk/lib/server/libjvm.so
    #4 0x00007f8665024002 in ?? ()
    #5 0x00007f86791b07b0 in ?? ()
    #6 0x00007f8644411233 in ?? ()
    #7 0x00007f86791b0808 in ?? ()
    #8 0x00007f8644411348 in ?? ()
    #9 0x0000000000000000 in ?? ()
    + + + +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    38
    39
    40
    41
    42
    43
    44
    45
    46
    47
    48
    49
    50
    51
    52
    53
    54
    55
    56
    57
    58
    59
    60
    61
    62
    63
    64
    65
    66
    67
    68
    69
    70
    71
    72
    73
    74
    75
    76
    77
    78
    79
    80
    81
    82
    83
    84
    85
    86
    87
    88
    89
    90
    91
    92
    93
    94
    95
    96
    97
    98
    99
    100
    101
    Bottom (innermost) frame selected; you cannot go down.
    (gdb) p _do_zero
    $35 = true
    (gdb) n
    413 if (_do_zero) {
    (gdb) n
    414 mem_clear(mem);
    (gdb) n
    416 arrayOopDesc::set_length(mem, _length);
    (gdb) l
    411 // concurrent GC.
    412 assert(_length >= 0, "length should be non-negative");
    413 if (_do_zero) {
    414 mem_clear(mem);
    415 }
    416 arrayOopDesc::set_length(mem, _length);
    417 return finish(mem);
    418 }
    419
    420 oop ClassAllocator::initialize(HeapWord* mem) const {
    (gdb) up
    #1 0x00007f867a27af4b in MemAllocator::allocate (this=0x7f86791b0650) at /var/jdk/src/hotspot/share/gc/shared/memAllocator.cpp:365
    365 obj = initialize(mem);
    (gdb) down
    #0 ObjArrayAllocator::initialize (this=0x7f86791b0650, mem=0x715e73dd0) at /var/jdk/src/hotspot/share/gc/shared/memAllocator.cpp:416
    416 arrayOopDesc::set_length(mem, _length);
    (gdb) s
    arrayOopDesc::set_length (mem=0x715e73dd0, length=2019) at /var/jdk/src/hotspot/share/oops/arrayOop.hpp:122
    122 *length_addr_impl(mem) = length;
    (gdb) s
    arrayOopDesc::length_addr_impl (obj_ptr=0x715e73dd0) at /var/jdk/src/hotspot/share/oops/arrayOop.hpp:66
    66 char* ptr = static_cast<char*>(obj_ptr);
    (gdb) l
    61 return (int)hs;
    62 }
    63
    64 // Returns the address of the length "field". See length_offset_in_bytes().
    65 static int* length_addr_impl(void* obj_ptr) {
    66 char* ptr = static_cast<char*>(obj_ptr);
    67 return reinterpret_cast<int*>(ptr + length_offset_in_bytes());
    68 }
    69
    70 // Check whether an element of a typeArrayOop with the given type must be
    (gdb) n
    67 return reinterpret_cast<int*>(ptr + length_offset_in_bytes());
    (gdb) s
    arrayOopDesc::length_offset_in_bytes () at /var/jdk/src/hotspot/share/oops/arrayOop.hpp:83
    83 sizeof(arrayOopDesc);
    (gdb) l
    78 // The _length field is not declared in C++. It is allocated after the
    79 // declared nonstatic fields in arrayOopDesc if not compressed, otherwise
    80 // it occupies the second half of the _klass field in oopDesc.
    81 static int length_offset_in_bytes() {
    82 return UseCompressedClassPointers ? klass_gap_offset_in_bytes() :
    83 sizeof(arrayOopDesc);
    84 }
    85
    86 // Returns the offset of the first element.
    87 static int base_offset_in_bytes(BasicType type) {
    (gdb) p Use
    Display all 161 possibilities? (y or n)
    (gdb) p UseCompressedClassPointers
    $36 = true
    (gdb) s
    82 return UseCompressedClassPointers ? klass_gap_offset_in_bytes() :
    (gdb) s
    oopDesc::klass_gap_offset_in_bytes () at /var/jdk/src/hotspot/share/oops/oop.hpp:307
    307 assert(has_klass_gap(), "only applicable to compressed klass pointers");
    (gdb) l
    302
    303 // for code generation
    304 static int mark_offset_in_bytes() { return offset_of(oopDesc, _mark); }
    305 static int klass_offset_in_bytes() { return offset_of(oopDesc, _metadata._klass); }
    306 static int klass_gap_offset_in_bytes() {
    307 assert(has_klass_gap(), "only applicable to compressed klass pointers");
    308 return klass_offset_in_bytes() + sizeof(narrowKlass);
    309 }
    310
    311 // for error reporting
    (gdb) n
    oopDesc::klass_gap_offset_in_bytes () at /var/jdk/src/hotspot/share/oops/oop.hpp:308
    308 return klass_offset_in_bytes() + sizeof(narrowKlass);
    (gdb) n
    309 }
    (gdb) n
    arrayOopDesc::length_offset_in_bytes () at /var/jdk/src/hotspot/share/oops/arrayOop.hpp:83
    83 sizeof(arrayOopDesc);
    (gdb) n
    84 }
    (gdb) n
    arrayOopDesc::length_addr_impl (obj_ptr=0x715e73dd0) at /var/jdk/src/hotspot/share/oops/arrayOop.hpp:68
    68 }
    (gdb) n
    arrayOopDesc::set_length (mem=0x715e73dd0, length=2019) at /var/jdk/src/hotspot/share/oops/arrayOop.hpp:122
    122 *length_addr_impl(mem) = length;
    (gdb) n
    123 }
    (gdb) p (int *)mem@20
    Only values in memory can be extended with '@'.
    (gdb) p *(int *)mem@20
    $37 = {-1163019586, -1163019586, -1163019586, 2019, 0 <repeats 16 times>}
    + + + +

    byte code 生成入口

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    // jdk/src/hotspot/share/interpreter/templateInterpreter.cpp
    void DispatchTable::set_entry(int i, EntryPoint& entry) {
    assert(0 <= i && i < length, "index out of bounds");
    assert(number_of_states == 10, "check the code below");
    _table[btos][i] = entry.entry(btos);
    _table[ztos][i] = entry.entry(ztos);
    _table[ctos][i] = entry.entry(ctos);
    _table[stos][i] = entry.entry(stos);
    _table[atos][i] = entry.entry(atos);
    _table[itos][i] = entry.entry(itos);
    _table[ltos][i] = entry.entry(ltos);
    _table[ftos][i] = entry.entry(ftos);
    _table[dtos][i] = entry.entry(dtos);
    _table[vtos][i] = entry.entry(vtos);
    }
    + +
    1
    2
    3
    4
    5
    //src/hotspot/share/interpreter/bytecodes.hpp
    _new = 187, // 0xbb
    _newarray = 188, // 0xbc
    _anewarray = 189, // 0xbd
    _arraylength = 190, // 0xbe
    +

    堆栈:

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    Thread 2 "java" hit Breakpoint 13, 0x00007fffe1011b13 in ?? ()
    (gdb) x/20i $pc
    => 0x7fffe1011b13: movzwl 0x1(%r13),%eax
    0x7fffe1011b18: bswap %eax
    0x7fffe1011b1a: sar $0x10,%eax
    0x7fffe1011b1d: movzbl 0x3(%r13),%ebx
    0x7fffe1011b22: add $0x3,%r13
    0x7fffe1011b26: movabs $0x7ffff7bca0a0,%r10
    0x7fffe1011b30: jmp *(%r10,%rbx,8)
    0x7fffe1011b34: nop
    0x7fffe1011b35: nop
    0x7fffe1011b36: nop
    0x7fffe1011b37: nop
    0x7fffe1011b38: int3
    0x7fffe1011b39: int3
    0x7fffe1011b3a: int3
    0x7fffe1011b3b: int3
    0x7fffe1011b3c: int3
    0x7fffe1011b3d: int3
    0x7fffe1011b3e: int3
    0x7fffe1011b3f: int3
    0x7fffe1011b40: and %al,(%rax,%rax,1)

    + + +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    0x00007fffe1011b1d in ?? ()
    (gdb) info registers
    rax 0x2805 10245
    rbx 0x11 17
    rcx 0x2 2
    rdx 0x8 8
    rsi 0x555555581230 93824992416304
    rdi 0x7ffff0028f70 140737220087664
    rbp 0x7ffff59fe7f8 0x7ffff59fe7f8
    rsp 0x7ffff59fe7b0 0x7ffff59fe7b0
    r8 0x8 8
    r9 0x0 0
    r10 0x7ffff7bcc8a0 140737349732512
    r11 0x216 534
    r12 0x0 0
    r13 0x7fffb4411230 140736217551408
    r14 0x7ffff59fe808 140737314285576
    r15 0x7ffff0028f70 140737220087664
    rip 0x7fffe1011b1d 0x7fffe1011b1d
    eflags 0x216 [ PF AF IF ]
    cs 0x33 51
    ss 0x2b 43
    ds 0x0 0
    es 0x0 0
    fs 0x0 0
    gs 0x0 0

    + +

    anewarray 汇编代码:

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    38
    39
    40
    41
    42
    43
    44
    45
    46
    47
    48
    49
    50
    51
    52
    53
    54
    55
    56
    57
    58
    59
    60
    61
    62
    63
    64
    65
    66
    67
    68
    69
    70
    71
    72
    73
    74
    75
    76
    77
    78
    79
    80
    81
    82
    83
    84
    85
    86
    87
    88
    89
    90
    91
    92
    93
    94
    95
    96
    97
    98
    99
    100
    101
    102
    (gdb) x/20i $pc
    => 0x7fffe102400a: lea 0x8(%rsp),%rax
    0x7fffe102400f: mov %r13,-0x40(%rbp)
    0x7fffe1024013: cmpq $0x0,-0x10(%rbp)
    0x7fffe102401b: je 0x7fffe1024035
    0x7fffe1024021: movabs $0x7ffff71becc8,%rdi
    0x7fffe102402b: and $0xfffffffffffffff0,%rsp
    0x7fffe102402f: call 0x7ffff694f3c0 <_ZN14MacroAssembler7debug64EPclPl>
    0x7fffe1024034: hlt
    0x7fffe1024035: push %r10
    0x7fffe1024037: cmp 0x16ae2ec2(%rip),%r12 # 0x7ffff7b06f00 <_ZN14CompressedOops11_narrow_oopE>
    0x7fffe102403e: je 0x7fffe1024058
    0x7fffe1024044: movabs $0x7ffff7311c28,%rdi
    0x7fffe102404e: and $0xfffffffffffffff0,%rsp
    0x7fffe1024052: call 0x7ffff694f3c0 <_ZN14MacroAssembler7debug64EPclPl>
    0x7fffe1024057: hlt
    0x7fffe1024058: pop %r10
    0x7fffe102405a: mov %r15,%rdi
    0x7fffe102405d: vzeroupper
    0x7fffe1024060: mov %rbp,0x2d0(%r15)
    0x7fffe1024067: mov %rax,0x2c0(%r15)
    (gdb) x/200i $pc
    => 0x7fffe102400a: lea 0x8(%rsp),%rax
    0x7fffe102400f: mov %r13,-0x40(%rbp)
    0x7fffe1024013: cmpq $0x0,-0x10(%rbp)
    0x7fffe102401b: je 0x7fffe1024035
    0x7fffe1024021: movabs $0x7ffff71becc8,%rdi
    0x7fffe102402b: and $0xfffffffffffffff0,%rsp
    0x7fffe102402f: call 0x7ffff694f3c0 <_ZN14MacroAssembler7debug64EPclPl>
    0x7fffe1024034: hlt
    0x7fffe1024035: push %r10
    0x7fffe1024037: cmp 0x16ae2ec2(%rip),%r12 # 0x7ffff7b06f00 <_ZN14CompressedOops11_narrow_oopE>
    0x7fffe102403e: je 0x7fffe1024058
    0x7fffe1024044: movabs $0x7ffff7311c28,%rdi
    0x7fffe102404e: and $0xfffffffffffffff0,%rsp
    0x7fffe1024052: call 0x7ffff694f3c0 <_ZN14MacroAssembler7debug64EPclPl>
    0x7fffe1024057: hlt
    0x7fffe1024058: pop %r10
    0x7fffe102405a: mov %r15,%rdi
    0x7fffe102405d: vzeroupper
    0x7fffe1024060: mov %rbp,0x2d0(%r15)
    0x7fffe1024067: mov %rax,0x2c0(%r15)
    0x7fffe102406e: test $0xf,%esp
    0x7fffe1024074: je 0x7fffe102408c
    0x7fffe102407a: sub $0x8,%rsp
    0x7fffe102407e: call 0x7ffff65cf968 <_ZN18InterpreterRuntime9anewarrayEP10JavaThreadP12ConstantPoolii>
    0x7fffe1024083: add $0x8,%rsp
    0x7fffe1024087: jmp 0x7fffe1024091
    0x7fffe102408c: call 0x7ffff65cf968 <_ZN18InterpreterRuntime9anewarrayEP10JavaThreadP12ConstantPoolii>
    0x7fffe1024091: push %rax
    0x7fffe1024092: push %rdi
    0x7fffe1024093: push %rsi
    0x7fffe1024094: push %rdx
    --Type <RET> for more, q to quit, c to continue without paging--
    0x7fffe1024095: push %rcx
    0x7fffe1024096: push %r8
    0x7fffe1024098: push %r9
    0x7fffe102409a: push %r10
    0x7fffe102409c: push %r11
    0x7fffe102409e: test $0xf,%esp
    0x7fffe10240a4: je 0x7fffe10240bc
    0x7fffe10240aa: sub $0x8,%rsp
    0x7fffe10240ae: call 0x7ffff5d1c04e <_ZN6Thread7currentEv>
    0x7fffe10240b3: add $0x8,%rsp
    0x7fffe10240b7: jmp 0x7fffe10240c1
    0x7fffe10240bc: call 0x7ffff5d1c04e <_ZN6Thread7currentEv>
    0x7fffe10240c1: pop %r11
    0x7fffe10240c3: pop %r10
    0x7fffe10240c5: pop %r9
    0x7fffe10240c7: pop %r8
    0x7fffe10240c9: pop %rcx
    0x7fffe10240ca: pop %rdx
    0x7fffe10240cb: pop %rsi
    0x7fffe10240cc: pop %rdi
    0x7fffe10240cd: cmp %rax,%r15
    0x7fffe10240d0: je 0x7fffe10240ea
    0x7fffe10240d6: movabs $0x7ffff7311da0,%rdi
    0x7fffe10240e0: and $0xfffffffffffffff0,%rsp
    0x7fffe10240e4: call 0x7ffff694f3c0 <_ZN14MacroAssembler7debug64EPclPl>
    0x7fffe10240e9: hlt
    0x7fffe10240ea: pop %rax
    0x7fffe10240eb: movq $0x0,0x2c0(%r15)
    0x7fffe10240f6: movq $0x0,0x2d0(%r15)
    0x7fffe1024101: movq $0x0,0x2c8(%r15)
    0x7fffe102410c: vzeroupper
    --Type <RET> for more, q to quit, c to continue without paging--
    0x7fffe102410f: cmpq $0x0,0x8(%r15)
    0x7fffe1024117: je 0x7fffe1024122
    0x7fffe102411d: jmp 0x7fffe1000c20
    0x7fffe1024122: mov 0x318(%r15),%rax
    0x7fffe1024129: movq $0x0,0x318(%r15)
    0x7fffe1024134: mov -0x40(%rbp),%r13
    0x7fffe1024138: mov -0x38(%rbp),%r14
    0x7fffe102413c: ret
    0x7fffe102413d: movzbl 0x3(%r13),%ebx
    0x7fffe1024142: add $0x3,%r13
    0x7fffe1024146: movabs $0x7ffff7bcc0a0,%r10
    0x7fffe1024150: jmp *(%r10,%rbx,8)
    0x7fffe1024154: nop
    0x7fffe1024155: nop


    + +

    内存分配

    用的是jdk11以上版本,我这个jdk是用g1来做gc的,所以看看g1 是怎么分配的

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    38
    39
    40
    41
    (gdb) bt
    #0 HeapRegion::par_allocate_impl (this=0x7ffff00e11b0, min_word_size=256, desired_word_size=63020, actual_size=0x7ffff59fcba8)
    at /home/dai/jdk/src/hotspot/share/gc/g1/heapRegion.inline.hpp:63
    #1 0x00007ffff640bdcc in HeapRegion::par_allocate (this=0x7ffff00e11b0, min_word_size=256, desired_word_size=63020, actual_word_size=0x7ffff59fcba8)
    at /home/dai/jdk/src/hotspot/share/gc/g1/heapRegion.inline.hpp:225
    #2 0x00007ffff640bfcb in G1AllocRegion::par_allocate (this=0x7ffff0052e10, alloc_region=0x7ffff00e11b0, min_word_size=256, desired_word_size=63020,
    actual_word_size=0x7ffff59fcba8) at /home/dai/jdk/src/hotspot/share/gc/g1/g1AllocRegion.inline.hpp:63
    #3 0x00007ffff640c0c6 in G1AllocRegion::attempt_allocation (this=0x7ffff0052e10, min_word_size=256, desired_word_size=63020, actual_word_size=0x7ffff59fcba8)
    at /home/dai/jdk/src/hotspot/share/gc/g1/g1AllocRegion.inline.hpp:77
    #4 0x00007ffff6447142 in G1Allocator::attempt_allocation (this=0x7ffff0052d50, min_word_size=256, desired_word_size=63020, actual_word_size=0x7ffff59fcba8)
    at /home/dai/jdk/src/hotspot/share/gc/g1/g1Allocator.inline.hpp:62
    #5 0x00007ffff6447b1d in G1CollectedHeap::attempt_allocation (this=0x7ffff0048bf0, min_word_size=256, desired_word_size=63020,
    actual_word_size=0x7ffff59fcba8) at /home/dai/jdk/src/hotspot/share/gc/g1/g1CollectedHeap.cpp:709
    #6 0x00007ffff64385ea in G1CollectedHeap::allocate_new_tlab (this=0x7ffff0048bf0, min_size=256, requested_size=63020, actual_size=0x7ffff59fcba8)
    at /home/dai/jdk/src/hotspot/share/gc/g1/g1CollectedHeap.cpp:359
    #7 0x00007ffff69e1cf6 in MemAllocator::allocate_inside_tlab_slow (this=0x7ffff59fcc00, allocation=...)
    at /home/dai/jdk/src/hotspot/share/gc/shared/memAllocator.cpp:318
    #8 0x00007ffff69e1bc2 in MemAllocator::allocate_inside_tlab (this=0x7ffff59fcc00, allocation=...)
    at /home/dai/jdk/src/hotspot/share/gc/shared/memAllocator.cpp:278
    #9 0x00007ffff69e1eb9 in MemAllocator::mem_allocate (this=0x7ffff59fcc00, allocation=...) at /home/dai/jdk/src/hotspot/share/gc/shared/memAllocator.cpp:350
    #10 0x00007ffff69e1f22 in MemAllocator::allocate (this=0x7ffff59fcc00) at /home/dai/jdk/src/hotspot/share/gc/shared/memAllocator.cpp:363
    #11 0x00007ffff6260d84 in CollectedHeap::array_allocate (this=0x7ffff0048bf0, klass=0x8000407c0, size=106, length=825, do_zero=true,
    __the_thread__=0x7ffff0028f70) at /home/dai/jdk/src/hotspot/share/gc/shared/collectedHeap.inline.hpp:41
    #12 0x00007ffff6db9bf2 in TypeArrayKlass::allocate_common (this=0x8000407c0, length=825, do_zero=true, __the_thread__=0x7ffff0028f70)
    at /home/dai/jdk/src/hotspot/share/oops/typeArrayKlass.cpp:93
    #13 0x00007ffff62f7428 in TypeArrayKlass::allocate (this=0x8000407c0, length=825, __the_thread__=0x7ffff0028f70)
    at /home/dai/jdk/src/hotspot/share/oops/typeArrayKlass.hpp:68
    #14 0x00007ffff6ab4757 in oopFactory::new_typeArray (type=T_BYTE, length=825, __the_thread__=0x7ffff0028f70)
    at /home/dai/jdk/src/hotspot/share/memory/oopFactory.cpp:93
    #15 0x00007ffff65cf8e5 in InterpreterRuntime::newarray (current=0x7ffff0028f70, type=T_BYTE, size=825)
    at /home/dai/jdk/src/hotspot/share/interpreter/interpreterRuntime.cpp:247
    #16 0x00007fffe1023eb2 in ?? ()
    #17 0x00007ffff7bca0a0 in TemplateInterpreter::_active_table () from /home/dai/jdk/build/linux-x86_64-server-slowdebug/jdk/lib/server/libjvm.so
    #18 0x00007fffe1023e31 in ?? ()
    #19 0x000000062a47ab38 in ?? ()
    #20 0x00007ffff59fcd88 in ?? ()
    #21 0x00007fffb43a23e6 in ?? ()
    #22 0x00007ffff59fcde8 in ?? ()
    #23 0x00007fffb43a3520 in ?? ()
    #24 0x0000000000000000 in ?? ()

    + + + +

    array_copy 反查

    java里面 System.arraycopy 函数就是copy 整个数组到新的数组里面,复制方式是浅拷贝.
    最后调用的入口是下面的c++代码.所以可以通过这个函数就可以了解数组的内存布局

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    void ObjArrayKlass::copy_array(arrayOop s, int src_pos, arrayOop d,
    int dst_pos, int length, TRAPS) {


    ...
    size_t src_offset = (size_t) objArrayOopDesc::obj_at_offset<oop>(src_pos); <----------------------------- 开始地址
    size_t dst_offset = (size_t) objArrayOopDesc::obj_at_offset<oop>(dst_pos); <---------------------------- 结束地址
    assert(arrayOopDesc::obj_offset_to_raw<oop>(s, src_offset, NULL) ==
    objArrayOop(s)->obj_at_addr<oop>(src_pos), "sanity");
    assert(arrayOopDesc::obj_offset_to_raw<oop>(d, dst_offset, NULL) ==
    objArrayOop(d)->obj_at_addr<oop>(dst_pos), "sanity");
    do_copy(s, src_offset, d, dst_offset, length, CHECK);
    }
    }

    + + + +

    所以java的对象数组的内存布局就像下面一样 , 这里的oop 是一个指针

    +

    64位下面是8字节

    +

    相关阅读

    + + +
    + + + + +
    +
    +
    +
    + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    背景

    java的redis操作使用redission封装

    +

    一个简单的堆栈堆栈

    +

    使用

    比较核心的堆栈

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    38
    39
    40
    41
    42
    43
    44
    encode:132, CommandEncoder (org.redisson.client.handler)
    encode:99, CommandEncoder (org.redisson.client.handler)
    encode:55, CommandEncoder (org.redisson.client.handler)
    write:107, MessageToByteEncoder (io.netty.handler.codec)
    write:75, CommandEncoder (org.redisson.client.handler)
    invokeWrite0:881, AbstractChannelHandlerContext (io.netty.channel)
    invokeWrite:863, AbstractChannelHandlerContext (io.netty.channel)
    write:968, AbstractChannelHandlerContext (io.netty.channel)
    write:856, AbstractChannelHandlerContext (io.netty.channel)
    write:120, MessageToByteEncoder (io.netty.handler.codec)
    write:45, CommandBatchEncoder (org.redisson.client.handler)
    invokeWrite0:881, AbstractChannelHandlerContext (io.netty.channel)
    invokeWriteAndFlush:940, AbstractChannelHandlerContext (io.netty.channel)
    write:966, AbstractChannelHandlerContext (io.netty.channel)
    writeAndFlush:934, AbstractChannelHandlerContext (io.netty.channel)
    write:83, CommandsQueue (org.redisson.client.handler)
    invokeWrite0:879, AbstractChannelHandlerContext (io.netty.channel)
    invokeWriteAndFlush:940, AbstractChannelHandlerContext (io.netty.channel)
    run:1247, AbstractChannelHandlerContext$WriteTask (io.netty.channel)
    runTask$$$capture:174, AbstractEventExecutor (io.netty.util.concurrent)
    runTask:-1, AbstractEventExecutor (io.netty.util.concurrent)
    - Async stack trace
    addTask:-1, SingleThreadEventExecutor (io.netty.util.concurrent)
    execute:836, SingleThreadEventExecutor (io.netty.util.concurrent)
    execute0:827, SingleThreadEventExecutor (io.netty.util.concurrent)
    execute:817, SingleThreadEventExecutor (io.netty.util.concurrent)
    safeExecute:1165, AbstractChannelHandlerContext (io.netty.channel)
    write:972, AbstractChannelHandlerContext (io.netty.channel)
    writeAndFlush:934, AbstractChannelHandlerContext (io.netty.channel)
    writeAndFlush:984, AbstractChannelHandlerContext (io.netty.channel)
    writeAndFlush:1025, DefaultChannelPipeline (io.netty.channel)
    writeAndFlush:306, AbstractChannel (io.netty.channel)
    send:206, RedisConnection (org.redisson.client)
    sendCommand:590, RedisExecutor (org.redisson.command)
    lambda$execute$3:164, RedisExecutor (org.redisson.command)
    uniWhenComplete:859, CompletableFuture (java.util.concurrent)
    uniWhenCompleteStage:883, CompletableFuture (java.util.concurrent)
    whenComplete:2251, CompletableFuture (java.util.concurrent)
    execute:149, RedisExecutor (org.redisson.command)
    async:526, CommandAsyncService (org.redisson.command)
    writeAsync:490, CommandAsyncService (org.redisson.command)
    setAsync:192, RedissonBucket (org.redisson)
    set:183, RedissonBucket (org.redisson)
    main:25, App (com.demo.redission)
    + +

    分配内存

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    allocateMemory:608, Unsafe (jdk.internal.misc)
    <init>:122, DirectByteBuffer (java.nio)
    allocateDirect:317, ByteBuffer (java.nio)
    <clinit>:93, PlatformDependent0 (io.netty.util.internal)
    isAndroid:333, PlatformDependent (io.netty.util.internal)
    <clinit>:88, PlatformDependent (io.netty.util.internal)
    <clinit>:84, NioEventLoop (io.netty.channel.nio)
    newChild:182, NioEventLoopGroup (io.netty.channel.nio)
    newChild:38, NioEventLoopGroup (io.netty.channel.nio)
    <init>:84, MultithreadEventExecutorGroup (io.netty.util.concurrent)
    <init>:60, MultithreadEventExecutorGroup (io.netty.util.concurrent)
    <init>:49, MultithreadEventExecutorGroup (io.netty.util.concurrent)
    <init>:59, MultithreadEventLoopGroup (io.netty.channel)
    <init>:87, NioEventLoopGroup (io.netty.channel.nio)
    <init>:82, NioEventLoopGroup (io.netty.channel.nio)
    <init>:69, NioEventLoopGroup (io.netty.channel.nio)
    <init>:181, MasterSlaveConnectionManager (org.redisson.connection)
    <init>:73, ClusterConnectionManager (org.redisson.cluster)
    createConnectionManager:196, ConfigSupport (org.redisson.config)
    <init>:68, Redisson (org.redisson)
    create:109, Redisson (org.redisson)
    main:24, App (com.demo.redission)
    + + +

    ByteBuf 写入

    堆栈

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    putByte:704, PlatformDependent0 (io.netty.util.internal)
    putByte:719, PlatformDependent (io.netty.util.internal)
    unsafeWriteUtf8:1020, ByteBufUtil (io.netty.buffer)
    writeUtf8:836, ByteBufUtil (io.netty.buffer)
    writeUtf8:820, ByteBufUtil (io.netty.buffer)
    setCharSequence0:707, AbstractByteBuf (io.netty.buffer)
    writeCharSequence:1187, AbstractByteBuf (io.netty.buffer)
    encode:45, StringCodec$1 (org.redisson.client.codec)
    encode:622, CommandAsyncService (org.redisson.command)
    encode:313, RedissonObject (org.redisson)
    setAsync:192, RedissonBucket (org.redisson)
    set:183, RedissonBucket (org.redisson)
    testApp:57, AppTest (com.demo.redission)
    invokeVirtual:-1, LambdaForm$DMH/0x0000000800c0c400 (java.lang.invoke)
    invoke:-1, LambdaForm$MH/0x0000000800c0d000 (java.lang.invoke)
    invokeExact_MT:-1, Invokers$Holder (java.lang.invoke)
    invokeImpl:154, DirectMethodHandleAccessor (jdk.internal.reflect)
    invoke:104, DirectMethodHandleAccessor (jdk.internal.reflect)
    invoke:577, Method (java.lang.reflect)
    runTest:154, TestCase (junit.framework)
    runBare:127, TestCase (junit.framework)
    protect:106, TestResult$1 (junit.framework)
    runProtected:124, TestResult (junit.framework)
    run:109, TestResult (junit.framework)
    run:118, TestCase (junit.framework)
    doRun:116, TestRunner (junit.textui)
    doRun:117, JUnit3IdeaTestRunner (com.intellij.junit3)
    doRun:109, TestRunner (junit.textui)
    startRunnerWithArgs:38, JUnit3IdeaTestRunner (com.intellij.junit3)
    execute:38, IdeaTestRunner$Repeater$1 (com.intellij.rt.junit)
    repeat:11, TestsRepeater (com.intellij.rt.execution.junit)
    startRunnerWithArgs:35, IdeaTestRunner$Repeater (com.intellij.rt.junit)
    prepareStreamsAndStart:235, JUnitStarter (com.intellij.rt.junit)
    main:54, JUnitStarter (com.intellij.rt.junit)
    +

    ByteBuf 最后会调用UNSAFE.putByte(data, offset, value); 写入

    +

    ByteBuf 写入socket

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    38
    39
    40
    41
    42
    43
    44
    45
    46
    47
    48
    49
    50
    51
    52
    53
    54
    55
    56
    57
    58
    59
    60
    61
    62
    63
    64
    write:62, SocketDispatcher (sun.nio.ch)
    writeFromNativeBuffer:137, IOUtil (sun.nio.ch)
    write:81, IOUtil (sun.nio.ch)
    write:58, IOUtil (sun.nio.ch)
    write:532, SocketChannelImpl (sun.nio.ch)
    doWrite:415, NioSocketChannel (io.netty.channel.socket.nio)
    flush0:931, AbstractChannel$AbstractUnsafe (io.netty.channel)
    flush0:354, AbstractNioChannel$AbstractNioUnsafe (io.netty.channel.nio)
    flush:895, AbstractChannel$AbstractUnsafe (io.netty.channel)
    flush:1372, DefaultChannelPipeline$HeadContext (io.netty.channel)
    invokeFlush0:921, AbstractChannelHandlerContext (io.netty.channel)
    invokeFlush:907, AbstractChannelHandlerContext (io.netty.channel)
    flush:893, AbstractChannelHandlerContext (io.netty.channel)
    flush:125, ChannelOutboundHandlerAdapter (io.netty.channel)
    invokeFlush0:925, AbstractChannelHandlerContext (io.netty.channel)
    invokeWriteAndFlush:941, AbstractChannelHandlerContext (io.netty.channel)
    write:966, AbstractChannelHandlerContext (io.netty.channel)
    writeAndFlush:934, AbstractChannelHandlerContext (io.netty.channel)
    write:83, CommandsQueue (org.redisson.client.handler)
    invokeWrite0:879, AbstractChannelHandlerContext (io.netty.channel)
    invokeWriteAndFlush:940, AbstractChannelHandlerContext (io.netty.channel)
    run:1247, AbstractChannelHandlerContext$WriteTask (io.netty.channel)
    runTask$$$capture:174, AbstractEventExecutor (io.netty.util.concurrent)
    runTask:-1, AbstractEventExecutor (io.netty.util.concurrent)
    - Async stack trace
    addTask:-1, SingleThreadEventExecutor (io.netty.util.concurrent)
    execute:836, SingleThreadEventExecutor (io.netty.util.concurrent)
    execute0:827, SingleThreadEventExecutor (io.netty.util.concurrent)
    execute:817, SingleThreadEventExecutor (io.netty.util.concurrent)
    safeExecute:1165, AbstractChannelHandlerContext (io.netty.channel)
    write:972, AbstractChannelHandlerContext (io.netty.channel)
    writeAndFlush:934, AbstractChannelHandlerContext (io.netty.channel)
    writeAndFlush:984, AbstractChannelHandlerContext (io.netty.channel)
    writeAndFlush:1025, DefaultChannelPipeline (io.netty.channel)
    writeAndFlush:306, AbstractChannel (io.netty.channel)
    send:206, RedisConnection (org.redisson.client)
    sendCommand:590, RedisExecutor (org.redisson.command)
    lambda$execute$3:164, RedisExecutor (org.redisson.command)
    uniWhenComplete:863, CompletableFuture (java.util.concurrent)
    uniWhenCompleteStage:887, CompletableFuture (java.util.concurrent)
    whenComplete:2325, CompletableFuture (java.util.concurrent)
    execute:149, RedisExecutor (org.redisson.command)
    async:526, CommandAsyncService (org.redisson.command)
    readAsync:292, CommandAsyncService (org.redisson.command)
    getAsync:140, RedissonBucket (org.redisson)
    get:135, RedissonBucket (org.redisson)
    testApp:59, AppTest (com.demo.redission)
    invoke:104, DirectMethodHandleAccessor (jdk.internal.reflect)
    invoke:577, Method (java.lang.reflect)
    runTest:154, TestCase (junit.framework)
    runBare:127, TestCase (junit.framework)
    protect:106, TestResult$1 (junit.framework)
    runProtected:124, TestResult (junit.framework)
    run:109, TestResult (junit.framework)
    run:118, TestCase (junit.framework)
    doRun:116, TestRunner (junit.textui)
    doRun:117, JUnit3IdeaTestRunner (com.intellij.junit3)
    doRun:109, TestRunner (junit.textui)
    startRunnerWithArgs:38, JUnit3IdeaTestRunner (com.intellij.junit3)
    execute:38, IdeaTestRunner$Repeater$1 (com.intellij.rt.junit)
    repeat:11, TestsRepeater (com.intellij.rt.execution.junit)
    startRunnerWithArgs:35, IdeaTestRunner$Repeater (com.intellij.rt.junit)
    prepareStreamsAndStart:235, JUnitStarter (com.intellij.rt.junit)
    main:54, JUnitStarter (com.intellij.rt.junit)
    +

    ByteBuf 到ByteBuffer 转换

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    38
    39
    40
    41
    42
    43
    44
    45
    46
    47
    public ByteBuffer[] nioBuffers(int maxCount, long maxBytes) {
    ...
    ByteBuffer[] nioBuffers = NIO_BUFFERS.get(threadLocalMap);
    Entry entry = flushedEntry;
    while (isFlushedEntry(entry) && entry.msg instanceof ByteBuf) {
    if (!entry.cancelled) {
    ByteBuf buf = (ByteBuf) entry.msg;
    final int readerIndex = buf.readerIndex();
    final int readableBytes = buf.writerIndex() - readerIndex;

    if (readableBytes > 0) {
    nioBufferSize += readableBytes;
    int count = entry.count;
    if (count == -1) {
    //noinspection ConstantValueVariableUse
    entry.count = count = buf.nioBufferCount();
    }
    int neededSpace = min(maxCount, nioBufferCount + count);
    if (neededSpace > nioBuffers.length) {
    nioBuffers = expandNioBufferArray(nioBuffers, neededSpace, nioBufferCount);
    NIO_BUFFERS.set(threadLocalMap, nioBuffers);
    }
    if (count == 1) {
    ByteBuffer nioBuf = entry.buf;
    if (nioBuf == null) {
    // cache ByteBuffer as it may need to create a new ByteBuffer instance if its a
    // derived buffer
    entry.buf = nioBuf = buf.internalNioBuffer(readerIndex, readableBytes); // 转换
    }
    nioBuffers[nioBufferCount++] = nioBuf;
    } else {
    // The code exists in an extra method to ensure the method is not too big to inline as this
    // branch is not very likely to get hit very frequently.
    nioBufferCount = nioBuffers(entry, buf, nioBuffers, nioBufferCount, maxCount); // 转换nioBuffers
    }
    if (nioBufferCount >= maxCount) {
    break;
    }
    }
    }
    entry = entry.next;
    }
    this.nioBufferCount = nioBufferCount;
    this.nioBufferSize = nioBufferSize;

    return nioBuffers;
    }
    + +

    写入ByteBuf 转换到ByteBuffer 的过程, 最后调用的是PooledByteBuf.internalNioBuffer 这个方法

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    38
    39
    40
    41
    42
    internalNioBuffer:158, PooledByteBuf (io.netty.buffer)
    _internalNioBuffer:194, PooledByteBuf (io.netty.buffer)
    internalNioBuffer:207, PooledByteBuf (io.netty.buffer)
    nioBuffers:447, ChannelOutboundBuffer (io.netty.channel)
    doWrite:399, NioSocketChannel (io.netty.channel.socket.nio)
    flush0:931, AbstractChannel$AbstractUnsafe (io.netty.channel)
    flush0:354, AbstractNioChannel$AbstractNioUnsafe (io.netty.channel.nio)
    flush:895, AbstractChannel$AbstractUnsafe (io.netty.channel)
    flush:1372, DefaultChannelPipeline$HeadContext (io.netty.channel)
    invokeFlush0:921, AbstractChannelHandlerContext (io.netty.channel)
    invokeFlush:907, AbstractChannelHandlerContext (io.netty.channel)
    flush:893, AbstractChannelHandlerContext (io.netty.channel)
    flush:125, ChannelOutboundHandlerAdapter (io.netty.channel)
    invokeFlush0:925, AbstractChannelHandlerContext (io.netty.channel)
    invokeWriteAndFlush:941, AbstractChannelHandlerContext (io.netty.channel)
    write:966, AbstractChannelHandlerContext (io.netty.channel)
    writeAndFlush:934, AbstractChannelHandlerContext (io.netty.channel)
    write:83, CommandsQueue (org.redisson.client.handler)
    invokeWrite0:879, AbstractChannelHandlerContext (io.netty.channel)
    invokeWriteAndFlush:940, AbstractChannelHandlerContext (io.netty.channel)
    run:1247, AbstractChannelHandlerContext$WriteTask (io.netty.channel)
    runTask$$$capture:174, AbstractEventExecutor (io.netty.util.concurrent)
    runTask:-1, AbstractEventExecutor (io.netty.util.concurrent)
    - Async stack trace
    addTask:-1, SingleThreadEventExecutor (io.netty.util.concurrent)
    execute:836, SingleThreadEventExecutor (io.netty.util.concurrent)
    execute0:827, SingleThreadEventExecutor (io.netty.util.concurrent)
    execute:817, SingleThreadEventExecutor (io.netty.util.concurrent)
    safeExecute:1165, AbstractChannelHandlerContext (io.netty.channel)
    write:972, AbstractChannelHandlerContext (io.netty.channel)
    writeAndFlush:934, AbstractChannelHandlerContext (io.netty.channel)
    writeAndFlush:984, AbstractChannelHandlerContext (io.netty.channel)
    writeAndFlush:1025, DefaultChannelPipeline (io.netty.channel)
    writeAndFlush:306, AbstractChannel (io.netty.channel)
    send:206, RedisConnection (org.redisson.client)
    sync:215, RedisConnection (org.redisson.client)
    sync:202, RedisConnection (org.redisson.client)
    <init>:100, ClusterConnectionManager (org.redisson.cluster)
    createConnectionManager:196, ConfigSupport (org.redisson.config)
    <init>:68, Redisson (org.redisson)
    create:109, Redisson (org.redisson)
    main:24, App (com.demo.redission)
    + +

    创建链接的过程:

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    38
    39
    40
    41
    42
    43
    44
    45
    46
    47
    48
    49
    50
    51
    52
    53
    54
    55
    56
    57
    58
    59
    60
    61
    62
    63
    64
    65
    66
    67
    68
    69
    70
    71
    createConnection:36, RedisConnectionHandler (org.redisson.client.handler)
    channelRegistered:53, BaseConnectionHandler (org.redisson.client.handler)
    invokeChannelRegistered:176, AbstractChannelHandlerContext (io.netty.channel)
    invokeChannelRegistered:152, AbstractChannelHandlerContext (io.netty.channel) [2]
    fireChannelRegistered:145, AbstractChannelHandlerContext (io.netty.channel)
    channelRegistered:1383, DefaultChannelPipeline$HeadContext (io.netty.channel)
    invokeChannelRegistered:172, AbstractChannelHandlerContext (io.netty.channel)
    invokeChannelRegistered:152, AbstractChannelHandlerContext (io.netty.channel) [1]
    fireChannelRegistered:815, DefaultChannelPipeline (io.netty.channel)
    register0:517, AbstractChannel$AbstractUnsafe (io.netty.channel)
    access$200:429, AbstractChannel$AbstractUnsafe (io.netty.channel)
    run:486, AbstractChannel$AbstractUnsafe$1 (io.netty.channel)
    runTask$$$capture:174, AbstractEventExecutor (io.netty.util.concurrent)
    runTask:-1, AbstractEventExecutor (io.netty.util.concurrent)
    - Async stack trace
    addTask:-1, SingleThreadEventExecutor (io.netty.util.concurrent)
    execute:836, SingleThreadEventExecutor (io.netty.util.concurrent)
    execute0:827, SingleThreadEventExecutor (io.netty.util.concurrent)
    execute:817, SingleThreadEventExecutor (io.netty.util.concurrent)
    register:483, AbstractChannel$AbstractUnsafe (io.netty.channel)
    register:89, SingleThreadEventLoop (io.netty.channel)
    register:83, SingleThreadEventLoop (io.netty.channel)
    register:86, MultithreadEventLoopGroup (io.netty.channel)
    initAndRegister:323, AbstractBootstrap (io.netty.bootstrap)
    doResolveAndConnect:155, Bootstrap (io.netty.bootstrap)
    connect:139, Bootstrap (io.netty.bootstrap)
    lambda$connectAsync$1:220, RedisClient (org.redisson.client)
    uniComposeStage:1187, CompletableFuture (java.util.concurrent)
    thenCompose:2309, CompletableFuture (java.util.concurrent)
    connectAsync:218, RedisClient (org.redisson.client)
    connect:189, ClientConnectionsEntry (org.redisson.connection)
    connect:249, ConnectionPool (org.redisson.connection.pool)
    createConnection:274, ConnectionPool (org.redisson.connection.pool)
    lambda$createConnection$1:112, ConnectionPool (org.redisson.connection.pool)
    uniAcceptNow:757, CompletableFuture (java.util.concurrent)
    uniAcceptStage:735, CompletableFuture (java.util.concurrent)
    thenAccept:2182, CompletableFuture (java.util.concurrent)
    createConnection:110, ConnectionPool (org.redisson.connection.pool)
    initConnections:92, ConnectionPool (org.redisson.connection.pool)
    add:69, ConnectionPool (org.redisson.connection.pool)
    add:34, MasterConnectionPool (org.redisson.connection.pool)
    lambda$setupMasterEntry$1:139, MasterSlaveEntry (org.redisson.connection)
    uniComposeStage:1187, CompletableFuture (java.util.concurrent)
    thenCompose:2309, CompletableFuture (java.util.concurrent)
    setupMasterEntry:122, MasterSlaveEntry (org.redisson.connection)
    setupMasterEntry:117, MasterSlaveEntry (org.redisson.connection)
    setupMasterEntry:112, MasterSlaveEntry (org.redisson.connection)
    initSingleEntry:330, MasterSlaveConnectionManager (org.redisson.connection)
    <init>:146, MasterSlaveConnectionManager (org.redisson.connection)
    <init>:30, SingleConnectionManager (org.redisson.connection)
    createConnectionManager:190, ConfigSupport (org.redisson.config)
    <init>:68, Redisson (org.redisson)
    create:109, Redisson (org.redisson)
    testApp:58, AppTest (com.demo.redission)
    invoke:104, DirectMethodHandleAccessor (jdk.internal.reflect)
    invoke:577, Method (java.lang.reflect)
    runTest:154, TestCase (junit.framework)
    runBare:127, TestCase (junit.framework)
    protect:106, TestResult$1 (junit.framework)
    runProtected:124, TestResult (junit.framework)
    run:109, TestResult (junit.framework)
    run:118, TestCase (junit.framework)
    doRun:116, TestRunner (junit.textui)
    doRun:117, JUnit3IdeaTestRunner (com.intellij.junit3)
    doRun:109, TestRunner (junit.textui)
    startRunnerWithArgs:38, JUnit3IdeaTestRunner (com.intellij.junit3)
    execute:38, IdeaTestRunner$Repeater$1 (com.intellij.rt.junit)
    repeat:11, TestsRepeater (com.intellij.rt.execution.junit)
    startRunnerWithArgs:35, IdeaTestRunner$Repeater (com.intellij.rt.junit)
    prepareStreamsAndStart:235, JUnitStarter (com.intellij.rt.junit)
    main:54, JUnitStarter (com.intellij.rt.junit)
    + +

    Bootstrap使用:

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    createBootstrap:122, RedisClient (org.redisson.client)
    <init>:115, RedisClient (org.redisson.client)
    create:77, RedisClient (org.redisson.client)
    createClient:425, MasterSlaveConnectionManager (org.redisson.connection)
    createClient:412, MasterSlaveConnectionManager (org.redisson.connection)
    setupMasterEntry:116, MasterSlaveEntry (org.redisson.connection)
    setupMasterEntry:112, MasterSlaveEntry (org.redisson.connection)
    initSingleEntry:330, MasterSlaveConnectionManager (org.redisson.connection)
    <init>:146, MasterSlaveConnectionManager (org.redisson.connection)
    <init>:30, SingleConnectionManager (org.redisson.connection)
    createConnectionManager:190, ConfigSupport (org.redisson.config)
    <init>:68, Redisson (org.redisson)
    create:109, Redisson (org.redisson)
    testApp:58, AppTest (com.demo.redission)
    invokeVirtual:-1, LambdaForm$DMH/0x0000000800c0c400 (java.lang.invoke)
    invoke:-1, LambdaForm$MH/0x0000000800c0d800 (java.lang.invoke)
    invokeExact_MT:-1, Invokers$Holder (java.lang.invoke)
    invokeImpl:154, DirectMethodHandleAccessor (jdk.internal.reflect)
    invoke:104, DirectMethodHandleAccessor (jdk.internal.reflect)
    invoke:577, Method (java.lang.reflect)
    runTest:154, TestCase (junit.framework)
    runBare:127, TestCase (junit.framework)
    protect:106, TestResult$1 (junit.framework)
    runProtected:124, TestResult (junit.framework)
    run:109, TestResult (junit.framework)
    run:118, TestCase (junit.framework)
    runTest:208, TestSuite (junit.framework)
    run:203, TestSuite (junit.framework)
    doRun:116, TestRunner (junit.textui)
    doRun:117, JUnit3IdeaTestRunner (com.intellij.junit3)
    doRun:109, TestRunner (junit.textui)
    startRunnerWithArgs:38, JUnit3IdeaTestRunner (com.intellij.junit3)
    execute:38, IdeaTestRunner$Repeater$1 (com.intellij.rt.junit)
    repeat:11, TestsRepeater (com.intellij.rt.execution.junit)
    startRunnerWithArgs:35, IdeaTestRunner$Repeater (com.intellij.rt.junit)
    prepareStreamsAndStart:235, JUnitStarter (com.intellij.rt.junit)
    main:54, JUnitStarter (com.intellij.rt.junit)
    +

    future 使用:

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    handleResult:547, RedisExecutor (org.redisson.command)
    checkAttemptPromise:524, RedisExecutor (org.redisson.command)
    lambda$execute$4:176, RedisExecutor (org.redisson.command)
    accept:-1, RedisExecutor$$Lambda$85/0x0000000800d64c28 (org.redisson.command)
    uniWhenComplete:863, CompletableFuture (java.util.concurrent)
    tryFire:841, CompletableFuture$UniWhenComplete (java.util.concurrent)
    postComplete:510, CompletableFuture (java.util.concurrent)
    complete:2147, CompletableFuture (java.util.concurrent)
    completeResponse:467, CommandDecoder (org.redisson.client.handler)
    handleResult:461, CommandDecoder (org.redisson.client.handler)
    decode:340, CommandDecoder (org.redisson.client.handler)
    decodeCommand:205, CommandDecoder (org.redisson.client.handler)
    decode:144, CommandDecoder (org.redisson.client.handler)
    decode:120, CommandDecoder (org.redisson.client.handler)
    decodeRemovalReentryProtection:529, ByteToMessageDecoder (io.netty.handler.codec)
    callDecode:366, ReplayingDecoder (io.netty.handler.codec)
    channelRead:290, ByteToMessageDecoder (io.netty.handler.codec)
    invokeChannelRead:444, AbstractChannelHandlerContext (io.netty.channel)
    invokeChannelRead:420, AbstractChannelHandlerContext (io.netty.channel)
    fireChannelRead:412, AbstractChannelHandlerContext (io.netty.channel)
    channelRead:1410, DefaultChannelPipeline$HeadContext (io.netty.channel)
    invokeChannelRead:440, AbstractChannelHandlerContext (io.netty.channel)
    invokeChannelRead:420, AbstractChannelHandlerContext (io.netty.channel)
    fireChannelRead:919, DefaultChannelPipeline (io.netty.channel)
    read:166, AbstractNioByteChannel$NioByteUnsafe (io.netty.channel.nio)
    processSelectedKey:788, NioEventLoop (io.netty.channel.nio)
    processSelectedKeysOptimized:724, NioEventLoop (io.netty.channel.nio)
    processSelectedKeys:650, NioEventLoop (io.netty.channel.nio)
    run:562, NioEventLoop (io.netty.channel.nio)
    run:997, SingleThreadEventExecutor$4 (io.netty.util.concurrent)
    run:74, ThreadExecutorMap$2 (io.netty.util.internal)
    run:30, FastThreadLocalRunnable (io.netty.util.concurrent)
    run:833, Thread (java.lang)
    + +

    redis 解码

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    /home/dai/.m2/repository/org/redisson/redisson/3.19.0/redisson-3.19.0-sources.jar!/org/redisson/client/handler/RedisChannelInitializer.java
    @Override
    protected void initChannel(Channel ch) throws Exception {
    initSsl(config, ch);

    if (type == Type.PLAIN) {
    ch.pipeline().addLast(new RedisConnectionHandler(redisClient));
    } else {
    ch.pipeline().addLast(new RedisPubSubConnectionHandler(redisClient));
    }

    ch.pipeline().addLast(
    connectionWatchdog,
    CommandEncoder.INSTANCE,
    CommandBatchEncoder.INSTANCE);

    if (type == Type.PLAIN) {
    ch.pipeline().addLast(new CommandsQueue());
    } else {
    ch.pipeline().addLast(new CommandsQueuePubSub());
    }

    if (pingConnectionHandler != null) {
    ch.pipeline().addLast(pingConnectionHandler);
    }

    if (type == Type.PLAIN) {
    ch.pipeline().addLast(new CommandDecoder(config.getAddress().getScheme())); // 解码
    } else {
    ch.pipeline().addLast(new CommandPubSubDecoder(config));
    }

    ch.pipeline().addLast(new ErrorsLoggingHandler());

    config.getNettyHook().afterChannelInitialization(ch);
    }

    + + +

    redission future 回调:

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    handleResult:547, RedisExecutor (org.redisson.command)
    checkAttemptPromise:524, RedisExecutor (org.redisson.command)
    lambda$execute$4:176, RedisExecutor (org.redisson.command)
    accept:-1, RedisExecutor$$Lambda$86/0x0000000800d64c18 (org.redisson.command)
    uniWhenComplete:863, CompletableFuture (java.util.concurrent)
    tryFire:841, CompletableFuture$UniWhenComplete (java.util.concurrent)
    postComplete:510, CompletableFuture (java.util.concurrent)
    complete:2147, CompletableFuture (java.util.concurrent)
    completeResponse:467, CommandDecoder (org.redisson.client.handler)
    handleResult:461, CommandDecoder (org.redisson.client.handler)
    decode:392, CommandDecoder (org.redisson.client.handler)
    decodeCommand:205, CommandDecoder (org.redisson.client.handler)
    decode:144, CommandDecoder (org.redisson.client.handler)
    decode:120, CommandDecoder (org.redisson.client.handler)
    decodeRemovalReentryProtection:529, ByteToMessageDecoder (io.netty.handler.codec)
    callDecode:366, ReplayingDecoder (io.netty.handler.codec)
    channelRead:290, ByteToMessageDecoder (io.netty.handler.codec)
    invokeChannelRead:444, AbstractChannelHandlerContext (io.netty.channel)
    invokeChannelRead:420, AbstractChannelHandlerContext (io.netty.channel)
    fireChannelRead:412, AbstractChannelHandlerContext (io.netty.channel)
    channelRead:1410, DefaultChannelPipeline$HeadContext (io.netty.channel)
    invokeChannelRead:440, AbstractChannelHandlerContext (io.netty.channel)
    invokeChannelRead:420, AbstractChannelHandlerContext (io.netty.channel)
    fireChannelRead:919, DefaultChannelPipeline (io.netty.channel)
    read:166, AbstractNioByteChannel$NioByteUnsafe (io.netty.channel.nio)
    processSelectedKey:788, NioEventLoop (io.netty.channel.nio)
    processSelectedKeysOptimized:724, NioEventLoop (io.netty.channel.nio)
    processSelectedKeys:650, NioEventLoop (io.netty.channel.nio)
    run:562, NioEventLoop (io.netty.channel.nio)
    run:997, SingleThreadEventExecutor$4 (io.netty.util.concurrent)
    run:74, ThreadExecutorMap$2 (io.netty.util.internal)
    run:30, FastThreadLocalRunnable (io.netty.util.concurrent)
    run:833, Thread (java.lang)
    + +

    相关阅读

    + + +
    + + + + +
    +
    +
    +
    + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    背景

    了解maven使用

    +
      +
    • compile:默认的scope,运行期有效,需要打入包中
    • +
    • provided:编译期有效,运行期不需要提供,不会打入包中
    • +
    • runtime:编译不需要,在运行期有效,需要导入包中。(接口与实现分离)
    • +
    • test:测试需要,不会打入包中
    • +
    • system:非本地仓库引入、存在系统的某个路径下的jar。(一般不使用)
    • +
    +

    使用

    + + +
    + + + + +
    +
    +
    +
    + + + + + + + + + + +
    + + + + +
    + + + + + + + + +
    +
    + +
    + +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/page/8/index.html b/page/8/index.html new file mode 100644 index 0000000000..e4e7a7bd5e --- /dev/null +++ b/page/8/index.html @@ -0,0 +1,1179 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + dinosaur + + + + + + + + + + + + +
    +
    + +
    +
    + + + + + +
    + + + + + + + + +
    + +
    + +
    +
    + + +
    + + 0% +
    + + +
    +
    +
    + + +
    + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    背景

    了解java 的cms gc 算法

    +

    路径

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    src\hotspot\share\gc\serial\defNewGeneration.cpp
    // Ignores "ref" and calls allocate().
    oop Generation::promote(oop obj, size_t obj_size) {
    assert(obj_size == obj->size(), "bad obj_size passed in");

    #ifndef PRODUCT
    if (GenCollectedHeap::heap()->promotion_should_fail()) {
    return NULL;
    }
    #endif // #ifndef PRODUCT

    HeapWord* result = allocate(obj_size, false);
    if (result != NULL) {
    Copy::aligned_disjoint_words(cast_from_oop<HeapWord*>(obj), result, obj_size);
    return cast_to_oop(result);
    } else {
    GenCollectedHeap* gch = GenCollectedHeap::heap();
    return gch->handle_failed_promotion(this, obj, obj_size);
    }
    }
    + + +

    相关阅读

    + + +
    + + + + +
    +
    +
    +
    + + + + + + + + + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +
      +
    • 查看默认选项
    • +
    +
    1
    2
    3
    4
    5
    $ java -XX:+PrintCommandLineFlags -version 
    -XX:ConcGCThreads=3 -XX:G1ConcRefinementThreads=10 -XX:GCDrainStackTargetSize=64 -XX:InitialHeapSize=525168320 -XX:MarkStackSize=4194304 -XX:MaxHeapSize=8402693120 -XX:MinHeapSize=6815736 -XX:+PrintCommandLineFlags -XX:ReservedCodeCacheSize=251658240 -XX:+SegmentedCodeCache -XX:+UseCompressedClassPointers -XX:+UseCompressedOops -XX:+UseG1GC
    openjdk version "17.0.5" 2022-10-18
    OpenJDK Runtime Environment (build 17.0.5+8-Ubuntu-2ubuntu122.04)
    OpenJDK 64-Bit Server VM (build 17.0.5+8-Ubuntu-2ubuntu122.04, mixed mode, sharing)
    + +

    java io

    java 的io分字节流 :
    字节流都是 inputStream/outPutStream
    字符流: reader/writer

    +

    方向:

    + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    类名方法:读、写字节、字符
    inputStreamreadbyte
    outputStreamwritebyte
    readerreadchar
    writerwritechar
    +

    相关阅读

    + + +
    + + + + +
    +
    +
    +
    + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    背景

    了解arraycopy的实现,是浅拷贝还是深拷贝

    +

    arrays

    在了解arraycopy之前,先了解arrays

    +

    jls里面有很详细的描述

    +
    1
    2
    In the Java programming language, arrays are objects (§4.3.1), are dynamically created
    An array object contains a number of variables. The number of variables may be zero, in which case the array is said to be empty. The variables contained in an array have no names; instead they are referenced by array access expressions that use non-negative integer index values. These variables are called the components of the array. If an array has n components, we say n is the length of the array; the components of the array are referenced using integer indices from 0 to n - 1, inclusive.
    + +

    arrays:

    +
      +
    • 类型:Object
    • +
    • arrays对象持有的是变量variables
    • +
    +

    变量 variables

    1
    2
    3
    A variable is a storage location and has an associated type, sometimes called its compile-time type, that is either a primitive type (§4.2) or a reference type (§4.3).

    A variable's value is changed by an assignment (§15.26) or by a prefix or postfix ++ (increment) or -- (decrement) operator (§15.14.2, §15.14.3, §15.15.1, §15.15.2).
    + +

    variable 由两部分组成:

    +
      +
    • type
        +
      • primitive type
      • +
      • reference type
      • +
      +
    • +
    • value
        +
      • primitive value
      • +
      • reference value
      • +
      +
    • +
    +
    1
    2
    3
    4
    4.3.1 Objects
    An object is a class instance or an array.
    The reference values (often just references) are pointers to these objects, and a
    special null reference, which refers to no object.
    + +

    jni 实现

    src\hotspot\share\oops\objArrayKlass.cpp

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    38
    39
    40
    41
    42
    43
    44
    45
    46
    47
    48
    49
    50
    51
    52
    53
    54
    55
    56
    57
    58
    59
    60
    61
    62
    63
    64
    65
    66
    67
    68
    69
    70
    71
    72
    73
    void ObjArrayKlass::copy_array(arrayOop s, int src_pos, arrayOop d,
    int dst_pos, int length, TRAPS) {
    assert(s->is_objArray(), "must be obj array");

    if (!d->is_objArray()) {
    ResourceMark rm(THREAD);
    stringStream ss;
    if (d->is_typeArray()) {
    ss.print("arraycopy: type mismatch: can not copy object array[] into %s[]",
    type2name_tab[ArrayKlass::cast(d->klass())->element_type()]);
    } else {
    ss.print("arraycopy: destination type %s is not an array", d->klass()->external_name());
    }
    THROW_MSG(vmSymbols::java_lang_ArrayStoreException(), ss.as_string());
    }

    // Check is all offsets and lengths are non negative
    if (src_pos < 0 || dst_pos < 0 || length < 0) {
    // Pass specific exception reason.
    ResourceMark rm(THREAD);
    stringStream ss;
    if (src_pos < 0) {
    ss.print("arraycopy: source index %d out of bounds for object array[%d]",
    src_pos, s->length());
    } else if (dst_pos < 0) {
    ss.print("arraycopy: destination index %d out of bounds for object array[%d]",
    dst_pos, d->length());
    } else {
    ss.print("arraycopy: length %d is negative", length);
    }
    THROW_MSG(vmSymbols::java_lang_ArrayIndexOutOfBoundsException(), ss.as_string());
    }
    // Check if the ranges are valid
    if ((((unsigned int) length + (unsigned int) src_pos) > (unsigned int) s->length()) ||
    (((unsigned int) length + (unsigned int) dst_pos) > (unsigned int) d->length())) {
    // Pass specific exception reason.
    ResourceMark rm(THREAD);
    stringStream ss;
    if (((unsigned int) length + (unsigned int) src_pos) > (unsigned int) s->length()) {
    ss.print("arraycopy: last source index %u out of bounds for object array[%d]",
    (unsigned int) length + (unsigned int) src_pos, s->length());
    } else {
    ss.print("arraycopy: last destination index %u out of bounds for object array[%d]",
    (unsigned int) length + (unsigned int) dst_pos, d->length());
    }
    THROW_MSG(vmSymbols::java_lang_ArrayIndexOutOfBoundsException(), ss.as_string());
    }

    // Special case. Boundary cases must be checked first
    // This allows the following call: copy_array(s, s.length(), d.length(), 0).
    // This is correct, since the position is supposed to be an 'in between point', i.e., s.length(),
    // points to the right of the last element.
    if (length==0) {
    return;
    }
    if (UseCompressedOops) {
    size_t src_offset = (size_t) objArrayOopDesc::obj_at_offset<narrowOop>(src_pos);
    size_t dst_offset = (size_t) objArrayOopDesc::obj_at_offset<narrowOop>(dst_pos);
    assert(arrayOopDesc::obj_offset_to_raw<narrowOop>(s, src_offset, NULL) ==
    objArrayOop(s)->obj_at_addr<narrowOop>(src_pos), "sanity");
    assert(arrayOopDesc::obj_offset_to_raw<narrowOop>(d, dst_offset, NULL) ==
    objArrayOop(d)->obj_at_addr<narrowOop>(dst_pos), "sanity");
    do_copy(s, src_offset, d, dst_offset, length, CHECK);
    } else {
    size_t src_offset = (size_t) objArrayOopDesc::obj_at_offset<oop>(src_pos);
    size_t dst_offset = (size_t) objArrayOopDesc::obj_at_offset<oop>(dst_pos);
    assert(arrayOopDesc::obj_offset_to_raw<oop>(s, src_offset, NULL) ==
    objArrayOop(s)->obj_at_addr<oop>(src_pos), "sanity");
    assert(arrayOopDesc::obj_offset_to_raw<oop>(d, dst_offset, NULL) ==
    objArrayOop(d)->obj_at_addr<oop>(dst_pos), "sanity");
    do_copy(s, src_offset, d, dst_offset, length, CHECK);
    }
    }
    + +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    (gdb) bt
    #0 Copy::conjoint_oops_atomic (count=6, to=0x62a434ee8, from=0x62a42f220) at /home/dai/jdk/src/hotspot/share/utilities/copy.hpp:164
    #1 AccessInternal::arraycopy_conjoint_oops (src=0x62a42f220, dst=0x62a434ee8, length=6) at /home/dai/jdk/src/hotspot/share/oops/accessBackend.cpp:94
    #2 0x00007ffff7067317 in RawAccessBarrierArrayCopy::arraycopy<18112614ul, narrowOop> (length=6, dst_raw=<optimized out>, dst_offset_in_bytes=0, dst_obj=..., src_raw=0x62a42f220, src_offset_in_bytes=0,
    src_obj=...) at /home/dai/jdk/src/hotspot/share/oops/accessBackend.inline.hpp:270
    #3 RawAccessBarrier<18112614ul>::arraycopy<narrowOop> (length=6, dst_raw=0x62a434ee8, dst_offset_in_bytes=0, dst_obj=..., src_raw=<optimized out>, src_offset_in_bytes=0, src_obj=...)
    at /home/dai/jdk/src/hotspot/share/oops/accessBackend.inline.hpp:344
    #4 RawAccessBarrier<18112614ul>::oop_arraycopy<narrowOop> (length=6, dst_raw=0x62a434ee8, dst_offset_in_bytes=0, dst_obj=..., src_raw=<optimized out>, src_offset_in_bytes=0, src_obj=...)
    at /home/dai/jdk/src/hotspot/share/oops/accessBackend.inline.hpp:128
    #5 ModRefBarrierSet::AccessBarrier<18112614ul, G1BarrierSet>::oop_arraycopy_in_heap<narrowOop> (length=6, dst_raw=0x62a434ee8, dst_offset_in_bytes=<optimized out>, dst_obj=...,
    src_raw=<optimized out>, src_offset_in_bytes=<optimized out>, src_obj=...) at /home/dai/jdk/src/hotspot/share/gc/shared/modRefBarrierSet.inline.hpp:108
    #6 AccessInternal::PostRuntimeDispatch<G1BarrierSet::AccessBarrier<18112614ul, G1BarrierSet>, (AccessInternal::BarrierType)8, 18112614ul>::oop_access_barrier<HeapWordImpl*> (src_obj=...,
    src_offset_in_bytes=<optimized out>, src_raw=<optimized out>, dst_obj=..., dst_offset_in_bytes=<optimized out>, dst_raw=<optimized out>, length=6)
    at /home/dai/jdk/src/hotspot/share/oops/access.inline.hpp:142
    #7 0x00007ffff7063f43 in AccessInternal::RuntimeDispatch<18112582ul, HeapWordImpl*, (AccessInternal::BarrierType)8>::arraycopy_init (src_obj=..., src_offset_in_bytes=16, src_raw=0x0, dst_obj=...,
    dst_offset_in_bytes=<optimized out>, dst_raw=<optimized out>, length=<optimized out>) at /home/dai/jdk/src/hotspot/share/oops/access.inline.hpp:339
    #8 0x00007ffff7061a0e in AccessInternal::RuntimeDispatch<18112582ul, HeapWordImpl*, (AccessInternal::BarrierType)8>::arraycopy (length=<optimized out>, dst_raw=<optimized out>,
    dst_offset_in_bytes=<optimized out>, dst_obj=..., src_raw=<optimized out>, src_offset_in_bytes=<optimized out>, src_obj=...) at /home/dai/jdk/src/hotspot/share/oops/accessBackend.hpp:554
    #9 AccessInternal::PreRuntimeDispatch::arraycopy<18112582ul, HeapWordImpl*> (length=<optimized out>, dst_raw=<optimized out>, dst_offset_in_bytes=<optimized out>, dst_obj=..., src_raw=<optimized out>,
    src_offset_in_bytes=<optimized out>, src_obj=...) at /home/dai/jdk/src/hotspot/share/oops/accessBackend.hpp:907
    #10 AccessInternal::arraycopy_reduce_types<18112580ul> (length=<optimized out>, dst_raw=<optimized out>, dst_offset_in_bytes=<optimized out>, dst_obj=..., src_raw=<optimized out>,
    src_offset_in_bytes=<optimized out>, src_obj=...) at /home/dai/jdk/src/hotspot/share/oops/accessBackend.hpp:1054
    #11 AccessInternal::arraycopy<18087940ul, HeapWordImpl*> (length=<optimized out>, dst_raw=<optimized out>, dst_offset_in_bytes=<optimized out>, dst_obj=..., src_raw=<optimized out>,
    src_offset_in_bytes=<optimized out>, src_obj=...) at /home/dai/jdk/src/hotspot/share/oops/accessBackend.hpp:1208
    #12 Access<18087936ul>::oop_arraycopy<HeapWordImpl*> (length=<optimized out>, dst_raw=<optimized out>, dst_offset_in_bytes=<optimized out>, dst_obj=..., src_raw=<optimized out>,
    src_offset_in_bytes=<optimized out>, src_obj=...) at /home/dai/jdk/src/hotspot/share/oops/access.hpp:137
    #13 ArrayAccess<16777216ul>::oop_arraycopy (length=<optimized out>, dst_offset_in_bytes=<optimized out>, dst_obj=..., src_offset_in_bytes=<optimized out>, src_obj=...)
    at /home/dai/jdk/src/hotspot/share/oops/access.hpp:323
    #14 ObjArrayKlass::do_copy (this=this@entry=0x800058a00, s=..., src_offset=src_offset@entry=16, d=..., dst_offset=dst_offset@entry=16, length=length@entry=6, __the_thread__=0x7ffff0028f20)
    at /home/dai/jdk/src/hotspot/share/oops/objArrayKlass.cpp:213
    #15 0x00007ffff7062e33 in ObjArrayKlass::copy_array (this=0x800058a00, s=..., src_pos=<optimized out>, d=..., dst_pos=<optimized out>, length=6, __the_thread__=0x7ffff0028f20)
    at /home/dai/jdk/src/hotspot/share/oops/oopsHierarchy.hpp:85
    #16 0x00007ffff6b2d75f in JVM_ArrayCopy (env=<optimized out>, ignored=<optimized out>, src=<optimized out>, src_pos=0, dst=<optimized out>, dst_pos=0, length=6)
    at /home/dai/jdk/src/hotspot/share/prims/jvm.cpp:298

    + + +

    x86的汇编代码如下src/hotspot/os_cpu/linux_x86/linux_x86_64.S

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    38
    39
    40
    41
    42
    43
    44
    45
    46
    47
    48
    49
    50
    51
    52
    53
    54
    55
    56
    57
    58
    59
    60
    61
    62
    63
    64
    65
            # Support for void Copy::arrayof_conjoint_jlongs(jlong* from,
    # jlong* to,
    # size_t count)
    # Equivalent to
    # conjoint_jlongs_atomic
    # arrayof_conjoint_oops
    # conjoint_oops_atomic
    #
    # rdi - from
    # rsi - to
    # rdx - count, treated as ssize_t
    #
    .p2align 4,,15
    .type _Copy_arrayof_conjoint_jlongs,@function
    .type _Copy_conjoint_jlongs_atomic,@function
    _Copy_arrayof_conjoint_jlongs:
    _Copy_conjoint_jlongs_atomic:
    cmpq %rdi,%rsi
    leaq -8(%rdi,%rdx,8),%rax # from + count*8 - 8
    jbe acl_CopyRight
    cmpq %rax,%rsi
    jbe acl_CopyLeft
    acl_CopyRight:
    leaq -8(%rsi,%rdx,8),%rcx # to + count*8 - 8
    negq %rdx
    jmp 3f
    1: movq 8(%rax,%rdx,8),%rsi
    movq %rsi,8(%rcx,%rdx,8)
    addq $1,%rdx
    jnz 1b
    ret
    .p2align 4,,15
    2: movq -24(%rax,%rdx,8),%rsi
    movq %rsi,-24(%rcx,%rdx,8)
    movq -16(%rax,%rdx,8),%rsi
    movq %rsi,-16(%rcx,%rdx,8)
    movq -8(%rax,%rdx,8),%rsi
    movq %rsi,-8(%rcx,%rdx,8)
    movq (%rax,%rdx,8),%rsi
    movq %rsi,(%rcx,%rdx,8)
    3: addq $4,%rdx
    jle 2b
    subq $4,%rdx
    jl 1b
    ret
    4: movq -8(%rdi,%rdx,8),%rcx
    movq %rcx,-8(%rsi,%rdx,8)
    subq $1,%rdx
    jnz 4b
    ret
    .p2align 4,,15
    5: movq 24(%rdi,%rdx,8),%rcx
    movq %rcx,24(%rsi,%rdx,8)
    movq 16(%rdi,%rdx,8),%rcx
    movq %rcx,16(%rsi,%rdx,8)
    movq 8(%rdi,%rdx,8),%rcx
    movq %rcx,8(%rsi,%rdx,8)
    movq (%rdi,%rdx,8),%rcx
    movq %rcx,(%rsi,%rdx,8)
    acl_CopyLeft:
    subq $4,%rdx
    jge 5b
    addq $4,%rdx
    jg 4b
    ret
    + +

    这里的文件jz jne 是相对段的偏移 , 然后试试编译成字节码之后看效果

    +

    这里的as是将汇编代码编译成二进制代码

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    38
    39
    40
    41
    42
    43
    44
    45
    46
    47
    48
    49
    50
    51
    52
    53
    54
    55
    56
    57
    58
    59
    60
    61
    62
    63
    64
    65
    66
    67
    68
    69
    70
    71
    72
    73
    74
    75
    76
    77
    78
    79
    80
    81
    82
    83
    84
    85
    86
    87
    88
    89
    90
    91
    92
    93
    94
    95
    96
    97
    98
    99
    100
    101
    102
    103
    104
    105
    106
    107
    108
    109
    110
    111
    112
    113
    114
    115
    116
    117
    118
    119
    120
    121
    122
    123
    124
    125
    126
    127
    128
    129
    130
    131
    132
    133
    134
    135
    136
    137
    138
    139
    140
    141
    142
    143
    144
    145
    146
    147
    148
    149
    150
    151
    152
    153
    154
    155
    156
    157
    158
    159
    160
    161
    162
    163
    164
    165
    166
    167
    168
    169
    170
    171
    172
    173
    174
    175
    176
    177
    178
    179
    180
    181
    182
    183
    184
    185
    186
    187
    188
    189
    190
    191
    192
    193
    194
    195
    196
    197
    198
    199
    200
    201
    202
    203
    204
    205
    206
    207
    208
    209
    210
    211
    212
    213
    214
    215
    216
    217
    218
    219
    220
    221
    222
    223
    224
    225
    226
    227
    228
    229
    230
    231
    232
    233
    234
    235
    236
    237
    238
    239
    240
    241
    242
    243
    244
    245
    246
    247
    248
    249
    250
    251
    252
    253
    254
    255
    256
    257
    258
    259
    260
    261
    262
    263
    264
    265
    266
    267
    268
    269
    270
    271
    272
    273
    274
    275
    276
    277
    278
    279
    280
    281
    282
    283
    284
    285
    286
    287
    288
    289
    290
    291
    292
    293
    294
    295
    296
    297
    298
    299
    300
    301
    302
    303
    304
    $ as src/hotspot/os_cpu/linux_x86/linux_x86_64.S
    $ objdump -S a.out

    a.out: file format elf64-x86-64


    Disassembly of section .text:

    0000000000000000 <SpinPause>:
    0: f3 90 pause
    2: 48 c7 c0 01 00 00 00 mov $0x1,%rax
    9: c3 ret
    a: 66 0f 1f 44 00 00 nopw 0x0(%rax,%rax,1)

    0000000000000010 <_Copy_arrayof_conjoint_bytes>:
    10: 49 89 d0 mov %rdx,%r8
    13: 48 c1 ea 03 shr $0x3,%rdx
    17: 48 39 fe cmp %rdi,%rsi
    1a: 4a 8d 44 07 ff lea -0x1(%rdi,%r8,1),%rax
    1f: 76 09 jbe 2a <acb_CopyRight>
    21: 48 39 c6 cmp %rax,%rsi
    24: 0f 86 9e 00 00 00 jbe c8 <acb_CopyLeft>

    000000000000002a <acb_CopyRight>:
    2a: 48 8d 44 d7 f8 lea -0x8(%rdi,%rdx,8),%rax
    2f: 48 8d 4c d6 f8 lea -0x8(%rsi,%rdx,8),%rcx
    34: 48 f7 da neg %rdx
    37: eb 7d jmp b6 <acb_CopyRight+0x8c>
    39: 0f 1f 80 00 00 00 00 nopl 0x0(%rax)
    40: 48 8b 74 d0 08 mov 0x8(%rax,%rdx,8),%rsi
    45: 48 89 74 d1 08 mov %rsi,0x8(%rcx,%rdx,8)
    4a: 48 83 c2 01 add $0x1,%rdx
    4e: 75 f0 jne 40 <acb_CopyRight+0x16>
    50: 49 f7 c0 04 00 00 00 test $0x4,%r8
    57: 74 0e je 67 <acb_CopyRight+0x3d>
    59: 8b 70 08 mov 0x8(%rax),%esi
    5c: 89 71 08 mov %esi,0x8(%rcx)
    5f: 48 83 c0 04 add $0x4,%rax
    63: 48 83 c1 04 add $0x4,%rcx
    67: 49 f7 c0 02 00 00 00 test $0x2,%r8
    6e: 74 0c je 7c <acb_CopyRight+0x52>
    70: 66 8b 70 08 mov 0x8(%rax),%si
    74: 66 89 71 08 mov %si,0x8(%rcx)
    78: 48 83 c1 02 add $0x2,%rcx
    7c: 49 f7 c0 01 00 00 00 test $0x1,%r8
    83: 74 08 je 8d <acb_CopyRight+0x63>
    85: 42 8a 44 07 ff mov -0x1(%rdi,%r8,1),%al
    8a: 88 41 08 mov %al,0x8(%rcx)
    8d: c3 ret
    8e: 66 90 xchg %ax,%ax
    90: 48 8b 74 d0 e8 mov -0x18(%rax,%rdx,8),%rsi
    95: 48 89 74 d1 e8 mov %rsi,-0x18(%rcx,%rdx,8)
    9a: 48 8b 74 d0 f0 mov -0x10(%rax,%rdx,8),%rsi
    9f: 48 89 74 d1 f0 mov %rsi,-0x10(%rcx,%rdx,8)
    a4: 48 8b 74 d0 f8 mov -0x8(%rax,%rdx,8),%rsi
    a9: 48 89 74 d1 f8 mov %rsi,-0x8(%rcx,%rdx,8)
    ae: 48 8b 34 d0 mov (%rax,%rdx,8),%rsi
    b2: 48 89 34 d1 mov %rsi,(%rcx,%rdx,8)
    b6: 48 83 c2 04 add $0x4,%rdx
    ba: 7e d4 jle 90 <acb_CopyRight+0x66>
    bc: 48 83 ea 04 sub $0x4,%rdx
    c0: 0f 8c 7a ff ff ff jl 40 <acb_CopyRight+0x16>
    c6: eb 88 jmp 50 <acb_CopyRight+0x26>

    00000000000000c8 <acb_CopyLeft>:
    c8: 49 f7 c0 01 00 00 00 test $0x1,%r8
    cf: 74 0e je df <acb_CopyLeft+0x17>
    d1: 42 8a 4c 07 ff mov -0x1(%rdi,%r8,1),%cl
    d6: 42 88 4c 06 ff mov %cl,-0x1(%rsi,%r8,1)
    db: 49 83 e8 01 sub $0x1,%r8
    df: 49 f7 c0 02 00 00 00 test $0x2,%r8
    e6: 74 0c je f4 <acb_CopyLeft+0x2c>
    e8: 66 42 8b 4c 07 fe mov -0x2(%rdi,%r8,1),%cx
    ee: 66 42 89 4c 06 fe mov %cx,-0x2(%rsi,%r8,1)
    f4: 49 f7 c0 04 00 00 00 test $0x4,%r8
    fb: 74 59 je 156 <acb_CopyLeft+0x8e>
    fd: 8b 0c d7 mov (%rdi,%rdx,8),%ecx
    100: 89 0c d6 mov %ecx,(%rsi,%rdx,8)
    103: eb 51 jmp 156 <acb_CopyLeft+0x8e>
    105: 66 66 2e 0f 1f 84 00 data16 cs nopw 0x0(%rax,%rax,1)
    10c: 00 00 00 00
    110: 48 8b 4c d7 f8 mov -0x8(%rdi,%rdx,8),%rcx
    115: 48 89 4c d6 f8 mov %rcx,-0x8(%rsi,%rdx,8)
    11a: 48 83 ea 01 sub $0x1,%rdx
    11e: 75 f0 jne 110 <acb_CopyLeft+0x48>
    120: c3 ret
    121: 66 66 2e 0f 1f 84 00 data16 cs nopw 0x0(%rax,%rax,1)
    128: 00 00 00 00
    12c: 0f 1f 40 00 nopl 0x0(%rax)
    130: 48 8b 4c d7 18 mov 0x18(%rdi,%rdx,8),%rcx
    135: 48 89 4c d6 18 mov %rcx,0x18(%rsi,%rdx,8)
    13a: 48 8b 4c d7 10 mov 0x10(%rdi,%rdx,8),%rcx
    13f: 48 89 4c d6 10 mov %rcx,0x10(%rsi,%rdx,8)
    144: 48 8b 4c d7 08 mov 0x8(%rdi,%rdx,8),%rcx
    149: 48 89 4c d6 08 mov %rcx,0x8(%rsi,%rdx,8)
    14e: 48 8b 0c d7 mov (%rdi,%rdx,8),%rcx
    152: 48 89 0c d6 mov %rcx,(%rsi,%rdx,8)
    156: 48 83 ea 04 sub $0x4,%rdx
    15a: 7d d4 jge 130 <acb_CopyLeft+0x68>
    15c: 48 83 c2 04 add $0x4,%rdx
    160: 7f ae jg 110 <acb_CopyLeft+0x48>
    162: c3 ret
    163: 66 66 2e 0f 1f 84 00 data16 cs nopw 0x0(%rax,%rax,1)
    16a: 00 00 00 00
    16e: 66 90 xchg %ax,%ax

    0000000000000170 <_Copy_arrayof_conjoint_jshorts>:
    170: 49 89 d0 mov %rdx,%r8
    173: 48 c1 ea 02 shr $0x2,%rdx
    177: 48 39 fe cmp %rdi,%rsi
    17a: 4a 8d 44 47 fe lea -0x2(%rdi,%r8,2),%rax
    17f: 76 05 jbe 186 <acs_CopyRight>
    181: 48 39 c6 cmp %rax,%rsi
    184: 76 7e jbe 204 <acs_CopyLeft>

    0000000000000186 <acs_CopyRight>:
    186: 48 8d 44 d7 f8 lea -0x8(%rdi,%rdx,8),%rax
    18b: 48 8d 4c d6 f8 lea -0x8(%rsi,%rdx,8),%rcx
    190: 48 f7 da neg %rdx
    193: eb 61 jmp 1f6 <acs_CopyRight+0x70>
    195: 48 8b 74 d0 08 mov 0x8(%rax,%rdx,8),%rsi
    19a: 48 89 74 d1 08 mov %rsi,0x8(%rcx,%rdx,8)
    19f: 48 83 c2 01 add $0x1,%rdx
    1a3: 75 f0 jne 195 <acs_CopyRight+0xf>
    1a5: 49 f7 c0 02 00 00 00 test $0x2,%r8
    1ac: 74 0a je 1b8 <acs_CopyRight+0x32>
    1ae: 8b 70 08 mov 0x8(%rax),%esi
    1b1: 89 71 08 mov %esi,0x8(%rcx)
    1b4: 48 83 c1 04 add $0x4,%rcx
    1b8: 49 f7 c0 01 00 00 00 test $0x1,%r8
    1bf: 74 0a je 1cb <acs_CopyRight+0x45>
    1c1: 66 42 8b 74 47 fe mov -0x2(%rdi,%r8,2),%si
    1c7: 66 89 71 08 mov %si,0x8(%rcx)
    1cb: c3 ret
    1cc: 0f 1f 40 00 nopl 0x0(%rax)
    1d0: 48 8b 74 d0 e8 mov -0x18(%rax,%rdx,8),%rsi
    1d5: 48 89 74 d1 e8 mov %rsi,-0x18(%rcx,%rdx,8)
    1da: 48 8b 74 d0 f0 mov -0x10(%rax,%rdx,8),%rsi
    1df: 48 89 74 d1 f0 mov %rsi,-0x10(%rcx,%rdx,8)
    1e4: 48 8b 74 d0 f8 mov -0x8(%rax,%rdx,8),%rsi
    1e9: 48 89 74 d1 f8 mov %rsi,-0x8(%rcx,%rdx,8)
    1ee: 48 8b 34 d0 mov (%rax,%rdx,8),%rsi
    1f2: 48 89 34 d1 mov %rsi,(%rcx,%rdx,8)
    1f6: 48 83 c2 04 add $0x4,%rdx
    1fa: 7e d4 jle 1d0 <acs_CopyRight+0x4a>
    1fc: 48 83 ea 04 sub $0x4,%rdx
    200: 7c 93 jl 195 <acs_CopyRight+0xf>
    202: eb a1 jmp 1a5 <acs_CopyRight+0x1f>

    0000000000000204 <acs_CopyLeft>:
    204: 49 f7 c0 01 00 00 00 test $0x1,%r8
    20b: 74 0c je 219 <acs_CopyLeft+0x15>
    20d: 66 42 8b 4c 47 fe mov -0x2(%rdi,%r8,2),%cx
    213: 66 42 89 4c 46 fe mov %cx,-0x2(%rsi,%r8,2)
    219: 49 f7 c0 02 00 00 00 test $0x2,%r8
    220: 74 44 je 266 <acs_CopyLeft+0x62>
    222: 8b 0c d7 mov (%rdi,%rdx,8),%ecx
    225: 89 0c d6 mov %ecx,(%rsi,%rdx,8)
    228: eb 3c jmp 266 <acs_CopyLeft+0x62>
    22a: 48 8b 4c d7 f8 mov -0x8(%rdi,%rdx,8),%rcx
    22f: 48 89 4c d6 f8 mov %rcx,-0x8(%rsi,%rdx,8)
    234: 48 83 ea 01 sub $0x1,%rdx
    238: 75 f0 jne 22a <acs_CopyLeft+0x26>
    23a: c3 ret
    23b: 0f 1f 44 00 00 nopl 0x0(%rax,%rax,1)
    240: 48 8b 4c d7 18 mov 0x18(%rdi,%rdx,8),%rcx
    245: 48 89 4c d6 18 mov %rcx,0x18(%rsi,%rdx,8)
    24a: 48 8b 4c d7 10 mov 0x10(%rdi,%rdx,8),%rcx
    24f: 48 89 4c d6 10 mov %rcx,0x10(%rsi,%rdx,8)
    254: 48 8b 4c d7 08 mov 0x8(%rdi,%rdx,8),%rcx
    259: 48 89 4c d6 08 mov %rcx,0x8(%rsi,%rdx,8)
    25e: 48 8b 0c d7 mov (%rdi,%rdx,8),%rcx
    262: 48 89 0c d6 mov %rcx,(%rsi,%rdx,8)
    266: 48 83 ea 04 sub $0x4,%rdx
    26a: 7d d4 jge 240 <acs_CopyLeft+0x3c>
    26c: 48 83 c2 04 add $0x4,%rdx
    270: 7f b8 jg 22a <acs_CopyLeft+0x26>
    272: c3 ret
    273: 66 66 2e 0f 1f 84 00 data16 cs nopw 0x0(%rax,%rax,1)
    27a: 00 00 00 00
    27e: 66 90 xchg %ax,%ax

    0000000000000280 <_Copy_arrayof_conjoint_jints>:
    280: 49 89 d0 mov %rdx,%r8
    283: 48 d1 ea shr %rdx
    286: 48 39 fe cmp %rdi,%rsi
    289: 4a 8d 44 87 fc lea -0x4(%rdi,%r8,4),%rax
    28e: 76 05 jbe 295 <aci_CopyRight>
    290: 48 39 c6 cmp %rax,%rsi
    293: 76 6f jbe 304 <aci_CopyLeft>

    0000000000000295 <aci_CopyRight>:
    295: 48 8d 44 d7 f8 lea -0x8(%rdi,%rdx,8),%rax
    29a: 48 8d 4c d6 f8 lea -0x8(%rsi,%rdx,8),%rcx
    29f: 48 f7 da neg %rdx
    2a2: eb 52 jmp 2f6 <aci_CopyRight+0x61>
    2a4: 66 66 2e 0f 1f 84 00 data16 cs nopw 0x0(%rax,%rax,1)
    2ab: 00 00 00 00
    2af: 90 nop
    2b0: 48 8b 74 d0 08 mov 0x8(%rax,%rdx,8),%rsi
    2b5: 48 89 74 d1 08 mov %rsi,0x8(%rcx,%rdx,8)
    2ba: 48 83 c2 01 add $0x1,%rdx
    2be: 75 f0 jne 2b0 <aci_CopyRight+0x1b>
    2c0: 49 f7 c0 01 00 00 00 test $0x1,%r8
    2c7: 74 06 je 2cf <aci_CopyRight+0x3a>
    2c9: 8b 70 08 mov 0x8(%rax),%esi
    2cc: 89 71 08 mov %esi,0x8(%rcx)
    2cf: c3 ret
    2d0: 48 8b 74 d0 e8 mov -0x18(%rax,%rdx,8),%rsi
    2d5: 48 89 74 d1 e8 mov %rsi,-0x18(%rcx,%rdx,8)
    2da: 48 8b 74 d0 f0 mov -0x10(%rax,%rdx,8),%rsi
    2df: 48 89 74 d1 f0 mov %rsi,-0x10(%rcx,%rdx,8)
    2e4: 48 8b 74 d0 f8 mov -0x8(%rax,%rdx,8),%rsi
    2e9: 48 89 74 d1 f8 mov %rsi,-0x8(%rcx,%rdx,8)
    2ee: 48 8b 34 d0 mov (%rax,%rdx,8),%rsi
    2f2: 48 89 34 d1 mov %rsi,(%rcx,%rdx,8)
    2f6: 48 83 c2 04 add $0x4,%rdx
    2fa: 7e d4 jle 2d0 <aci_CopyRight+0x3b>
    2fc: 48 83 ea 04 sub $0x4,%rdx
    300: 7c ae jl 2b0 <aci_CopyRight+0x1b>
    302: eb bc jmp 2c0 <aci_CopyRight+0x2b>

    0000000000000304 <aci_CopyLeft>:
    304: 49 f7 c0 01 00 00 00 test $0x1,%r8
    30b: 74 49 je 356 <aci_CopyLeft+0x52>
    30d: 42 8b 4c 87 fc mov -0x4(%rdi,%r8,4),%ecx
    312: 42 89 4c 86 fc mov %ecx,-0x4(%rsi,%r8,4)
    317: eb 3d jmp 356 <aci_CopyLeft+0x52>
    319: 48 8b 4c d7 f8 mov -0x8(%rdi,%rdx,8),%rcx
    31e: 48 89 4c d6 f8 mov %rcx,-0x8(%rsi,%rdx,8)
    323: 48 83 ea 01 sub $0x1,%rdx
    327: 75 f0 jne 319 <aci_CopyLeft+0x15>
    329: c3 ret
    32a: 66 0f 1f 44 00 00 nopw 0x0(%rax,%rax,1)
    330: 48 8b 4c d7 18 mov 0x18(%rdi,%rdx,8),%rcx
    335: 48 89 4c d6 18 mov %rcx,0x18(%rsi,%rdx,8)
    33a: 48 8b 4c d7 10 mov 0x10(%rdi,%rdx,8),%rcx
    33f: 48 89 4c d6 10 mov %rcx,0x10(%rsi,%rdx,8)
    344: 48 8b 4c d7 08 mov 0x8(%rdi,%rdx,8),%rcx
    349: 48 89 4c d6 08 mov %rcx,0x8(%rsi,%rdx,8)
    34e: 48 8b 0c d7 mov (%rdi,%rdx,8),%rcx
    352: 48 89 0c d6 mov %rcx,(%rsi,%rdx,8)
    356: 48 83 ea 04 sub $0x4,%rdx
    35a: 7d d4 jge 330 <aci_CopyLeft+0x2c>
    35c: 48 83 c2 04 add $0x4,%rdx
    360: 7f b7 jg 319 <aci_CopyLeft+0x15>
    362: c3 ret
    363: 66 66 2e 0f 1f 84 00 data16 cs nopw 0x0(%rax,%rax,1)
    36a: 00 00 00 00
    36e: 66 90 xchg %ax,%ax

    0000000000000370 <_Copy_arrayof_conjoint_jlongs>:
    370: 48 39 fe cmp %rdi,%rsi
    373: 48 8d 44 d7 f8 lea -0x8(%rdi,%rdx,8),%rax
    378: 76 09 jbe 383 <acl_CopyRight>
    37a: 48 39 c6 cmp %rax,%rsi
    37d: 0f 86 93 00 00 00 jbe 416 <acl_CopyLeft>

    0000000000000383 <acl_CopyRight>:
    383: 48 8d 4c d6 f8 lea -0x8(%rsi,%rdx,8),%rcx
    388: 48 f7 da neg %rdx
    38b: eb 39 jmp 3c6 <acl_CopyRight+0x43>
    38d: 48 8b 74 d0 08 mov 0x8(%rax,%rdx,8),%rsi
    392: 48 89 74 d1 08 mov %rsi,0x8(%rcx,%rdx,8)
    397: 48 83 c2 01 add $0x1,%rdx
    39b: 75 f0 jne 38d <acl_CopyRight+0xa>
    39d: c3 ret
    39e: 66 90 xchg %ax,%ax
    3a0: 48 8b 74 d0 e8 mov -0x18(%rax,%rdx,8),%rsi
    3a5: 48 89 74 d1 e8 mov %rsi,-0x18(%rcx,%rdx,8)
    3aa: 48 8b 74 d0 f0 mov -0x10(%rax,%rdx,8),%rsi
    3af: 48 89 74 d1 f0 mov %rsi,-0x10(%rcx,%rdx,8)
    3b4: 48 8b 74 d0 f8 mov -0x8(%rax,%rdx,8),%rsi
    3b9: 48 89 74 d1 f8 mov %rsi,-0x8(%rcx,%rdx,8)
    3be: 48 8b 34 d0 mov (%rax,%rdx,8),%rsi
    3c2: 48 89 34 d1 mov %rsi,(%rcx,%rdx,8)
    3c6: 48 83 c2 04 add $0x4,%rdx
    3ca: 7e d4 jle 3a0 <acl_CopyRight+0x1d>
    3cc: 48 83 ea 04 sub $0x4,%rdx
    3d0: 7c bb jl 38d <acl_CopyRight+0xa>
    3d2: c3 ret
    3d3: 48 8b 4c d7 f8 mov -0x8(%rdi,%rdx,8),%rcx
    3d8: 48 89 4c d6 f8 mov %rcx,-0x8(%rsi,%rdx,8)
    3dd: 48 83 ea 01 sub $0x1,%rdx
    3e1: 75 f0 jne 3d3 <acl_CopyRight+0x50>
    3e3: c3 ret
    3e4: 66 66 2e 0f 1f 84 00 data16 cs nopw 0x0(%rax,%rax,1)
    3eb: 00 00 00 00
    3ef: 90 nop
    3f0: 48 8b 4c d7 18 mov 0x18(%rdi,%rdx,8),%rcx
    3f5: 48 89 4c d6 18 mov %rcx,0x18(%rsi,%rdx,8)
    3fa: 48 8b 4c d7 10 mov 0x10(%rdi,%rdx,8),%rcx
    3ff: 48 89 4c d6 10 mov %rcx,0x10(%rsi,%rdx,8)
    404: 48 8b 4c d7 08 mov 0x8(%rdi,%rdx,8),%rcx
    409: 48 89 4c d6 08 mov %rcx,0x8(%rsi,%rdx,8)
    40e: 48 8b 0c d7 mov (%rdi,%rdx,8),%rcx
    412: 48 89 0c d6 mov %rcx,(%rsi,%rdx,8)

    0000000000000416 <acl_CopyLeft>:
    416: 48 83 ea 04 sub $0x4,%rdx
    41a: 7d d4 jge 3f0 <acl_CopyRight+0x6d>
    41c: 48 83 c2 04 add $0x4,%rdx
    420: 7f b1 jg 3d3 <acl_CopyRight+0x50>
    422: c3 ret
    + + +

    总结

    arrays 包含一堆变量.变量有两种值:primitive值 , reference值.

    +

    所以copyarrays复制的是变量,也就是复制的是引用

    +
    1
    Copies an array from the specified source array, beginning at the specified position, to the specified position of the destination array. A subsequence of array components are copied from the source array referenced by src to the destination array referenced by dest. The number of components copied is equal to the length argument. The components at positions srcPos through srcPos+length-1 in the source array are copied into positions destPos through destPos+length-1, respectively, of the destination array.
    + + + + + + + + + + +

    相关阅读

    + + +
    + + + + +
    +
    +
    +
    + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    背景

    了解springboot 的配置怎么加载的,了解spring-boot怎么读取rabbitmq配置的

    +

    堆栈

    spring boot 设置配置

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    38
    39
    40
    41
    42
    43
    44
    45
    46
    47
    48
    49
    50
    51
    52
    53
    54
    55
    configure:40, SimpleRabbitListenerContainerFactoryConfigurer (org.springframework.boot.autoconfigure.amqp)
    simpleRabbitListenerContainerFactory:81, RabbitAnnotationDrivenConfiguration (org.springframework.boot.autoconfigure.amqp)
    invoke0:-1, NativeMethodAccessorImpl (jdk.internal.reflect)
    invoke:62, NativeMethodAccessorImpl (jdk.internal.reflect)
    invoke:43, DelegatingMethodAccessorImpl (jdk.internal.reflect)
    invoke:566, Method (java.lang.reflect)
    instantiate:154, SimpleInstantiationStrategy (org.springframework.beans.factory.support)
    instantiate:652, ConstructorResolver (org.springframework.beans.factory.support)
    instantiateUsingFactoryMethod:637, ConstructorResolver (org.springframework.beans.factory.support)
    instantiateUsingFactoryMethod:1341, AbstractAutowireCapableBeanFactory (org.springframework.beans.factory.support)
    createBeanInstance:1181, AbstractAutowireCapableBeanFactory (org.springframework.beans.factory.support)
    doCreateBean:556, AbstractAutowireCapableBeanFactory (org.springframework.beans.factory.support)
    createBean:516, AbstractAutowireCapableBeanFactory (org.springframework.beans.factory.support)
    lambda$doGetBean$0:324, AbstractBeanFactory (org.springframework.beans.factory.support)
    getObject:-1, 1555928242 (org.springframework.beans.factory.support.AbstractBeanFactory$$Lambda$290)
    getSingleton:234, DefaultSingletonBeanRegistry (org.springframework.beans.factory.support)
    doGetBean:322, AbstractBeanFactory (org.springframework.beans.factory.support)
    getBean:202, AbstractBeanFactory (org.springframework.beans.factory.support)
    preInstantiateSingletons:897, DefaultListableBeanFactory (org.springframework.beans.factory.support)
    finishBeanFactoryInitialization:879, AbstractApplicationContext (org.springframework.context.support)
    refresh:551, AbstractApplicationContext (org.springframework.context.support)
    refresh:755, SpringApplication (org.springframework.boot)
    refresh:747, SpringApplication (org.springframework.boot)
    refreshContext:402, SpringApplication (org.springframework.boot)
    run:312, SpringApplication (org.springframework.boot)
    loadContext:120, SpringBootContextLoader (org.springframework.boot.test.context)
    loadContextInternal:99, DefaultCacheAwareContextLoaderDelegate (org.springframework.test.context.cache)
    loadContext:124, DefaultCacheAwareContextLoaderDelegate (org.springframework.test.context.cache)
    getApplicationContext:123, DefaultTestContext (org.springframework.test.context.support)
    setUpRequestContextIfNecessary:190, ServletTestExecutionListener (org.springframework.test.context.web)
    prepareTestInstance:132, ServletTestExecutionListener (org.springframework.test.context.web)
    prepareTestInstance:244, TestContextManager (org.springframework.test.context)
    createTest:227, SpringJUnit4ClassRunner (org.springframework.test.context.junit4)
    runReflectiveCall:289, SpringJUnit4ClassRunner$1 (org.springframework.test.context.junit4)
    run:12, ReflectiveCallable (org.junit.internal.runners.model)
    methodBlock:291, SpringJUnit4ClassRunner (org.springframework.test.context.junit4)
    runChild:246, SpringJUnit4ClassRunner (org.springframework.test.context.junit4)
    runChild:97, SpringJUnit4ClassRunner (org.springframework.test.context.junit4)
    run:331, ParentRunner$4 (org.junit.runners)
    schedule:79, ParentRunner$1 (org.junit.runners)
    runChildren:329, ParentRunner (org.junit.runners)
    access$100:66, ParentRunner (org.junit.runners)
    evaluate:293, ParentRunner$2 (org.junit.runners)
    evaluate:61, RunBeforeTestClassCallbacks (org.springframework.test.context.junit4.statements)
    evaluate:70, RunAfterTestClassCallbacks (org.springframework.test.context.junit4.statements)
    evaluate:306, ParentRunner$3 (org.junit.runners)
    run:413, ParentRunner (org.junit.runners)
    run:190, SpringJUnit4ClassRunner (org.springframework.test.context.junit4)
    run:137, JUnitCore (org.junit.runner)
    startRunnerWithArgs:69, JUnit4IdeaTestRunner (com.intellij.junit4)
    execute:38, IdeaTestRunner$Repeater$1 (com.intellij.rt.junit)
    repeat:11, TestsRepeater (com.intellij.rt.execution.junit)
    startRunnerWithArgs:35, IdeaTestRunner$Repeater (com.intellij.rt.junit)
    prepareStreamsAndStart:235, JUnitStarter (com.intellij.rt.junit)
    main:54, JUnitStarter (com.intellij.rt.junit)
    + + +

    设置属性

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    38
    39
    40
    41
    42
    43
    44
    45
    46
    47
    48
    49
    50
    51
    52
    53
    54
    55
    configure:40, SimpleRabbitListenerContainerFactoryConfigurer (org.springframework.boot.autoconfigure.amqp)
    simpleRabbitListenerContainerFactory:81, RabbitAnnotationDrivenConfiguration (org.springframework.boot.autoconfigure.amqp)
    invoke0:-1, NativeMethodAccessorImpl (jdk.internal.reflect)
    invoke:62, NativeMethodAccessorImpl (jdk.internal.reflect)
    invoke:43, DelegatingMethodAccessorImpl (jdk.internal.reflect)
    invoke:566, Method (java.lang.reflect)
    instantiate:154, SimpleInstantiationStrategy (org.springframework.beans.factory.support)
    instantiate:652, ConstructorResolver (org.springframework.beans.factory.support)
    instantiateUsingFactoryMethod:637, ConstructorResolver (org.springframework.beans.factory.support)
    instantiateUsingFactoryMethod:1341, AbstractAutowireCapableBeanFactory (org.springframework.beans.factory.support)
    createBeanInstance:1181, AbstractAutowireCapableBeanFactory (org.springframework.beans.factory.support)
    doCreateBean:556, AbstractAutowireCapableBeanFactory (org.springframework.beans.factory.support)
    createBean:516, AbstractAutowireCapableBeanFactory (org.springframework.beans.factory.support)
    lambda$doGetBean$0:324, AbstractBeanFactory (org.springframework.beans.factory.support)
    getObject:-1, 622043416 (org.springframework.beans.factory.support.AbstractBeanFactory$$Lambda$290)
    getSingleton:234, DefaultSingletonBeanRegistry (org.springframework.beans.factory.support)
    doGetBean:322, AbstractBeanFactory (org.springframework.beans.factory.support)
    getBean:202, AbstractBeanFactory (org.springframework.beans.factory.support)
    preInstantiateSingletons:897, DefaultListableBeanFactory (org.springframework.beans.factory.support)
    finishBeanFactoryInitialization:879, AbstractApplicationContext (org.springframework.context.support)
    refresh:551, AbstractApplicationContext (org.springframework.context.support)
    refresh:755, SpringApplication (org.springframework.boot)
    refresh:747, SpringApplication (org.springframework.boot)
    refreshContext:402, SpringApplication (org.springframework.boot)
    run:312, SpringApplication (org.springframework.boot)
    loadContext:120, SpringBootContextLoader (org.springframework.boot.test.context)
    loadContextInternal:99, DefaultCacheAwareContextLoaderDelegate (org.springframework.test.context.cache)
    loadContext:124, DefaultCacheAwareContextLoaderDelegate (org.springframework.test.context.cache)
    getApplicationContext:123, DefaultTestContext (org.springframework.test.context.support)
    setUpRequestContextIfNecessary:190, ServletTestExecutionListener (org.springframework.test.context.web)
    prepareTestInstance:132, ServletTestExecutionListener (org.springframework.test.context.web)
    prepareTestInstance:244, TestContextManager (org.springframework.test.context)
    createTest:227, SpringJUnit4ClassRunner (org.springframework.test.context.junit4)
    runReflectiveCall:289, SpringJUnit4ClassRunner$1 (org.springframework.test.context.junit4)
    run:12, ReflectiveCallable (org.junit.internal.runners.model)
    methodBlock:291, SpringJUnit4ClassRunner (org.springframework.test.context.junit4)
    runChild:246, SpringJUnit4ClassRunner (org.springframework.test.context.junit4)
    runChild:97, SpringJUnit4ClassRunner (org.springframework.test.context.junit4)
    run:331, ParentRunner$4 (org.junit.runners)
    schedule:79, ParentRunner$1 (org.junit.runners)
    runChildren:329, ParentRunner (org.junit.runners)
    access$100:66, ParentRunner (org.junit.runners)
    evaluate:293, ParentRunner$2 (org.junit.runners)
    evaluate:61, RunBeforeTestClassCallbacks (org.springframework.test.context.junit4.statements)
    evaluate:70, RunAfterTestClassCallbacks (org.springframework.test.context.junit4.statements)
    evaluate:306, ParentRunner$3 (org.junit.runners)
    run:413, ParentRunner (org.junit.runners)
    run:190, SpringJUnit4ClassRunner (org.springframework.test.context.junit4)
    run:137, JUnitCore (org.junit.runner)
    startRunnerWithArgs:69, JUnit4IdeaTestRunner (com.intellij.junit4)
    execute:38, IdeaTestRunner$Repeater$1 (com.intellij.rt.junit)
    repeat:11, TestsRepeater (com.intellij.rt.execution.junit)
    startRunnerWithArgs:35, IdeaTestRunner$Repeater (com.intellij.rt.junit)
    prepareStreamsAndStart:235, JUnitStarter (com.intellij.rt.junit)
    main:54, JUnitStarter (com.intellij.rt.junit)
    + + +

    rabbitmq 配置注入

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    38
    39
    40
    41
    42
    43
    44
    45
    46
    47
    48
    49
    50
    51
    52
    53
    54
    55
    <init>:57, RabbitAnnotationDrivenConfiguration (org.springframework.boot.autoconfigure.amqp)
    newInstance0:-1, NativeConstructorAccessorImpl (jdk.internal.reflect)
    newInstance:62, NativeConstructorAccessorImpl (jdk.internal.reflect)
    newInstance:45, DelegatingConstructorAccessorImpl (jdk.internal.reflect)
    newInstance:490, Constructor (java.lang.reflect)
    instantiateClass:204, BeanUtils (org.springframework.beans)
    instantiate:117, SimpleInstantiationStrategy (org.springframework.beans.factory.support)
    instantiate:310, ConstructorResolver (org.springframework.beans.factory.support)
    autowireConstructor:295, ConstructorResolver (org.springframework.beans.factory.support)
    autowireConstructor:1361, AbstractAutowireCapableBeanFactory (org.springframework.beans.factory.support)
    createBeanInstance:1208, AbstractAutowireCapableBeanFactory (org.springframework.beans.factory.support)
    doCreateBean:556, AbstractAutowireCapableBeanFactory (org.springframework.beans.factory.support)
    createBean:516, AbstractAutowireCapableBeanFactory (org.springframework.beans.factory.support)
    lambda$doGetBean$0:324, AbstractBeanFactory (org.springframework.beans.factory.support)
    getObject:-1, 809260538 (org.springframework.beans.factory.support.AbstractBeanFactory$$Lambda$290)
    getSingleton:234, DefaultSingletonBeanRegistry (org.springframework.beans.factory.support)
    doGetBean:322, AbstractBeanFactory (org.springframework.beans.factory.support)
    getBean:202, AbstractBeanFactory (org.springframework.beans.factory.support)
    preInstantiateSingletons:897, DefaultListableBeanFactory (org.springframework.beans.factory.support)
    finishBeanFactoryInitialization:879, AbstractApplicationContext (org.springframework.context.support)
    refresh:551, AbstractApplicationContext (org.springframework.context.support)
    refresh:755, SpringApplication (org.springframework.boot)
    refresh:747, SpringApplication (org.springframework.boot)
    refreshContext:402, SpringApplication (org.springframework.boot)
    run:312, SpringApplication (org.springframework.boot)
    loadContext:120, SpringBootContextLoader (org.springframework.boot.test.context)
    loadContextInternal:99, DefaultCacheAwareContextLoaderDelegate (org.springframework.test.context.cache)
    loadContext:124, DefaultCacheAwareContextLoaderDelegate (org.springframework.test.context.cache)
    getApplicationContext:123, DefaultTestContext (org.springframework.test.context.support)
    setUpRequestContextIfNecessary:190, ServletTestExecutionListener (org.springframework.test.context.web)
    prepareTestInstance:132, ServletTestExecutionListener (org.springframework.test.context.web)
    prepareTestInstance:244, TestContextManager (org.springframework.test.context)
    createTest:227, SpringJUnit4ClassRunner (org.springframework.test.context.junit4)
    runReflectiveCall:289, SpringJUnit4ClassRunner$1 (org.springframework.test.context.junit4)
    run:12, ReflectiveCallable (org.junit.internal.runners.model)
    methodBlock:291, SpringJUnit4ClassRunner (org.springframework.test.context.junit4)
    runChild:246, SpringJUnit4ClassRunner (org.springframework.test.context.junit4)
    runChild:97, SpringJUnit4ClassRunner (org.springframework.test.context.junit4)
    run:331, ParentRunner$4 (org.junit.runners)
    schedule:79, ParentRunner$1 (org.junit.runners)
    runChildren:329, ParentRunner (org.junit.runners)
    access$100:66, ParentRunner (org.junit.runners)
    evaluate:293, ParentRunner$2 (org.junit.runners)
    evaluate:61, RunBeforeTestClassCallbacks (org.springframework.test.context.junit4.statements)
    evaluate:70, RunAfterTestClassCallbacks (org.springframework.test.context.junit4.statements)
    evaluate:306, ParentRunner$3 (org.junit.runners)
    run:413, ParentRunner (org.junit.runners)
    run:190, SpringJUnit4ClassRunner (org.springframework.test.context.junit4)
    run:137, JUnitCore (org.junit.runner)
    startRunnerWithArgs:69, JUnit4IdeaTestRunner (com.intellij.junit4)
    execute:38, IdeaTestRunner$Repeater$1 (com.intellij.rt.junit)
    repeat:11, TestsRepeater (com.intellij.rt.execution.junit)
    startRunnerWithArgs:35, IdeaTestRunner$Repeater (com.intellij.rt.junit)
    prepareStreamsAndStart:235, JUnitStarter (com.intellij.rt.junit)
    main:54, JUnitStarter (com.intellij.rt.junit)
    + + +

    初始化空难的配置

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    38
    39
    40
    41
    42
    43
    44
    45
    46
    47
    48
    49
    50
    51
    52
    53
    54
    55
    56
    57
    58
    59
    60
    61
    62
    63
    64
    65
    66
    67
    68
    69
    70
    71
    72
    73
    74
    75
    76
    77
    78
    79
    80
    81
    82
    83
    84
    85
    86
    87
    88
    89
    90
    91
    92
    93
    94
    95
    96
    97
    98
    99
    100
    101
    102
    103
    104
    105
    106
    107
    108
    109
    110
    111
    112
    113
    114
    115
    116
    117
    118
    119
    120
    121
    122
    123
    124
    125
    126
    127
    128
    <init>:47, RabbitProperties (org.springframework.boot.autoconfigure.amqp)
    newInstance0:-1, NativeConstructorAccessorImpl (jdk.internal.reflect)
    newInstance:62, NativeConstructorAccessorImpl (jdk.internal.reflect)
    newInstance:45, DelegatingConstructorAccessorImpl (jdk.internal.reflect)
    newInstance:490, Constructor (java.lang.reflect)
    instantiateClass:204, BeanUtils (org.springframework.beans)
    instantiate:87, SimpleInstantiationStrategy (org.springframework.beans.factory.support)
    instantiateBean:1315, AbstractAutowireCapableBeanFactory (org.springframework.beans.factory.support)
    createBeanInstance:1218, AbstractAutowireCapableBeanFactory (org.springframework.beans.factory.support)
    doCreateBean:556, AbstractAutowireCapableBeanFactory (org.springframework.beans.factory.support)
    createBean:516, AbstractAutowireCapableBeanFactory (org.springframework.beans.factory.support)
    lambda$doGetBean$0:324, AbstractBeanFactory (org.springframework.beans.factory.support)
    getObject:-1, 47925969 (org.springframework.beans.factory.support.AbstractBeanFactory$$Lambda$290)
    getSingleton:234, DefaultSingletonBeanRegistry (org.springframework.beans.factory.support)
    doGetBean:322, AbstractBeanFactory (org.springframework.beans.factory.support)
    getBean:202, AbstractBeanFactory (org.springframework.beans.factory.support)
    resolveCandidate:276, DependencyDescriptor (org.springframework.beans.factory.config)
    doResolveDependency:1307, DefaultListableBeanFactory (org.springframework.beans.factory.support)
    resolveDependency:1227, DefaultListableBeanFactory (org.springframework.beans.factory.support)
    resolveAutowiredArgument:886, ConstructorResolver (org.springframework.beans.factory.support)
    createArgumentArray:790, ConstructorResolver (org.springframework.beans.factory.support)
    instantiateUsingFactoryMethod:540, ConstructorResolver (org.springframework.beans.factory.support)
    instantiateUsingFactoryMethod:1341, AbstractAutowireCapableBeanFactory (org.springframework.beans.factory.support)
    createBeanInstance:1181, AbstractAutowireCapableBeanFactory (org.springframework.beans.factory.support)
    doCreateBean:556, AbstractAutowireCapableBeanFactory (org.springframework.beans.factory.support)
    createBean:516, AbstractAutowireCapableBeanFactory (org.springframework.beans.factory.support)
    lambda$doGetBean$0:324, AbstractBeanFactory (org.springframework.beans.factory.support)
    getObject:-1, 47925969 (org.springframework.beans.factory.support.AbstractBeanFactory$$Lambda$290)
    getSingleton:234, DefaultSingletonBeanRegistry (org.springframework.beans.factory.support)
    doGetBean:322, AbstractBeanFactory (org.springframework.beans.factory.support)
    getBean:202, AbstractBeanFactory (org.springframework.beans.factory.support)
    resolveCandidate:276, DependencyDescriptor (org.springframework.beans.factory.config)
    doResolveDependency:1307, DefaultListableBeanFactory (org.springframework.beans.factory.support)
    resolveDependency:1227, DefaultListableBeanFactory (org.springframework.beans.factory.support)
    resolveAutowiredArgument:886, ConstructorResolver (org.springframework.beans.factory.support)
    createArgumentArray:790, ConstructorResolver (org.springframework.beans.factory.support)
    instantiateUsingFactoryMethod:540, ConstructorResolver (org.springframework.beans.factory.support)
    instantiateUsingFactoryMethod:1341, AbstractAutowireCapableBeanFactory (org.springframework.beans.factory.support)
    createBeanInstance:1181, AbstractAutowireCapableBeanFactory (org.springframework.beans.factory.support)
    doCreateBean:556, AbstractAutowireCapableBeanFactory (org.springframework.beans.factory.support)
    createBean:516, AbstractAutowireCapableBeanFactory (org.springframework.beans.factory.support)
    lambda$doGetBean$0:324, AbstractBeanFactory (org.springframework.beans.factory.support)
    getObject:-1, 47925969 (org.springframework.beans.factory.support.AbstractBeanFactory$$Lambda$290)
    getSingleton:234, DefaultSingletonBeanRegistry (org.springframework.beans.factory.support)
    doGetBean:322, AbstractBeanFactory (org.springframework.beans.factory.support)
    getBean:202, AbstractBeanFactory (org.springframework.beans.factory.support)
    resolveCandidate:276, DependencyDescriptor (org.springframework.beans.factory.config)
    doResolveDependency:1307, DefaultListableBeanFactory (org.springframework.beans.factory.support)
    resolveDependency:1227, DefaultListableBeanFactory (org.springframework.beans.factory.support)
    resolveFieldValue:657, AutowiredAnnotationBeanPostProcessor$AutowiredFieldElement (org.springframework.beans.factory.annotation)
    inject:640, AutowiredAnnotationBeanPostProcessor$AutowiredFieldElement (org.springframework.beans.factory.annotation)
    inject:119, InjectionMetadata (org.springframework.beans.factory.annotation)
    postProcessProperties:399, AutowiredAnnotationBeanPostProcessor (org.springframework.beans.factory.annotation)
    populateBean:1425, AbstractAutowireCapableBeanFactory (org.springframework.beans.factory.support)
    doCreateBean:593, AbstractAutowireCapableBeanFactory (org.springframework.beans.factory.support)
    createBean:516, AbstractAutowireCapableBeanFactory (org.springframework.beans.factory.support)
    lambda$doGetBean$0:324, AbstractBeanFactory (org.springframework.beans.factory.support)
    getObject:-1, 47925969 (org.springframework.beans.factory.support.AbstractBeanFactory$$Lambda$290)
    getSingleton:234, DefaultSingletonBeanRegistry (org.springframework.beans.factory.support)
    doGetBean:322, AbstractBeanFactory (org.springframework.beans.factory.support)
    getBean:202, AbstractBeanFactory (org.springframework.beans.factory.support)
    resolveCandidate:276, DependencyDescriptor (org.springframework.beans.factory.config)
    doResolveDependency:1307, DefaultListableBeanFactory (org.springframework.beans.factory.support)
    resolveDependency:1227, DefaultListableBeanFactory (org.springframework.beans.factory.support)
    resolveFieldValue:657, AutowiredAnnotationBeanPostProcessor$AutowiredFieldElement (org.springframework.beans.factory.annotation)
    inject:640, AutowiredAnnotationBeanPostProcessor$AutowiredFieldElement (org.springframework.beans.factory.annotation)
    inject:119, InjectionMetadata (org.springframework.beans.factory.annotation)
    postProcessProperties:399, AutowiredAnnotationBeanPostProcessor (org.springframework.beans.factory.annotation)
    populateBean:1425, AbstractAutowireCapableBeanFactory (org.springframework.beans.factory.support)
    doCreateBean:593, AbstractAutowireCapableBeanFactory (org.springframework.beans.factory.support)
    createBean:516, AbstractAutowireCapableBeanFactory (org.springframework.beans.factory.support)
    lambda$doGetBean$0:324, AbstractBeanFactory (org.springframework.beans.factory.support)
    getObject:-1, 47925969 (org.springframework.beans.factory.support.AbstractBeanFactory$$Lambda$290)
    getSingleton:234, DefaultSingletonBeanRegistry (org.springframework.beans.factory.support)
    doGetBean:322, AbstractBeanFactory (org.springframework.beans.factory.support)
    getBean:207, AbstractBeanFactory (org.springframework.beans.factory.support)
    resolveBeanByName:453, AbstractAutowireCapableBeanFactory (org.springframework.beans.factory.support)
    autowireResource:527, CommonAnnotationBeanPostProcessor (org.springframework.context.annotation)
    getResource:497, CommonAnnotationBeanPostProcessor (org.springframework.context.annotation)
    getResourceToInject:650, CommonAnnotationBeanPostProcessor$ResourceElement (org.springframework.context.annotation)
    inject:228, InjectionMetadata$InjectedElement (org.springframework.beans.factory.annotation)
    inject:119, InjectionMetadata (org.springframework.beans.factory.annotation)
    postProcessProperties:318, CommonAnnotationBeanPostProcessor (org.springframework.context.annotation)
    populateBean:1425, AbstractAutowireCapableBeanFactory (org.springframework.beans.factory.support)
    doCreateBean:593, AbstractAutowireCapableBeanFactory (org.springframework.beans.factory.support)
    createBean:516, AbstractAutowireCapableBeanFactory (org.springframework.beans.factory.support)
    lambda$doGetBean$0:324, AbstractBeanFactory (org.springframework.beans.factory.support)
    getObject:-1, 47925969 (org.springframework.beans.factory.support.AbstractBeanFactory$$Lambda$290)
    getSingleton:234, DefaultSingletonBeanRegistry (org.springframework.beans.factory.support)
    doGetBean:322, AbstractBeanFactory (org.springframework.beans.factory.support)
    getBean:202, AbstractBeanFactory (org.springframework.beans.factory.support)
    preInstantiateSingletons:897, DefaultListableBeanFactory (org.springframework.beans.factory.support)
    finishBeanFactoryInitialization:879, AbstractApplicationContext (org.springframework.context.support)
    refresh:551, AbstractApplicationContext (org.springframework.context.support)
    refresh:755, SpringApplication (org.springframework.boot)
    refresh:747, SpringApplication (org.springframework.boot)
    refreshContext:402, SpringApplication (org.springframework.boot)
    run:312, SpringApplication (org.springframework.boot)
    loadContext:120, SpringBootContextLoader (org.springframework.boot.test.context)
    loadContextInternal:99, DefaultCacheAwareContextLoaderDelegate (org.springframework.test.context.cache)
    loadContext:124, DefaultCacheAwareContextLoaderDelegate (org.springframework.test.context.cache)
    getApplicationContext:123, DefaultTestContext (org.springframework.test.context.support)
    setUpRequestContextIfNecessary:190, ServletTestExecutionListener (org.springframework.test.context.web)
    prepareTestInstance:132, ServletTestExecutionListener (org.springframework.test.context.web)
    prepareTestInstance:244, TestContextManager (org.springframework.test.context)
    createTest:227, SpringJUnit4ClassRunner (org.springframework.test.context.junit4)
    runReflectiveCall:289, SpringJUnit4ClassRunner$1 (org.springframework.test.context.junit4)
    run:12, ReflectiveCallable (org.junit.internal.runners.model)
    methodBlock:291, SpringJUnit4ClassRunner (org.springframework.test.context.junit4)
    runChild:246, SpringJUnit4ClassRunner (org.springframework.test.context.junit4)
    runChild:97, SpringJUnit4ClassRunner (org.springframework.test.context.junit4)
    run:331, ParentRunner$4 (org.junit.runners)
    schedule:79, ParentRunner$1 (org.junit.runners)
    runChildren:329, ParentRunner (org.junit.runners)
    access$100:66, ParentRunner (org.junit.runners)
    evaluate:293, ParentRunner$2 (org.junit.runners)
    evaluate:61, RunBeforeTestClassCallbacks (org.springframework.test.context.junit4.statements)
    evaluate:70, RunAfterTestClassCallbacks (org.springframework.test.context.junit4.statements)
    evaluate:306, ParentRunner$3 (org.junit.runners)
    run:413, ParentRunner (org.junit.runners)
    run:190, SpringJUnit4ClassRunner (org.springframework.test.context.junit4)
    run:137, JUnitCore (org.junit.runner)
    startRunnerWithArgs:69, JUnit4IdeaTestRunner (com.intellij.junit4)
    execute:38, IdeaTestRunner$Repeater$1 (com.intellij.rt.junit)
    repeat:11, TestsRepeater (com.intellij.rt.execution.junit)
    startRunnerWithArgs:35, IdeaTestRunner$Repeater (com.intellij.rt.junit)
    prepareStreamsAndStart:235, JUnitStarter (com.intellij.rt.junit)
    main:54, JUnitStarter (com.intellij.rt.junit)
    + + +

    相关阅读

    + + +
    + + + + +
    +
    +
    +
    + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    背景

    nacos 是一个服务注册/发现中间件

    +

    获取配置的堆栈

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    38
    39
    40
    41
    42
    43
    44
    45
    46
    47
    48
    49
    50
    51
    52
    53
    54
    55
    56
    57
    58
    59
    60
    61
    62
    63
    64
    65
    66
    67
    68
    69
    70
    71
    72
    73
    74
    75
    76
    77
    78
    79
    80
    81
    82
    83
    84
    85
    86
    87
    88
    89
    90
    91
    92
    93
    94
    95
    96
    97
    98
    99
    100
    101
    102
    103
    104
    105
    106
    107
    108
    109
    110
    111
    112
    113
    114
    115
    116
    run:744, ClientWorker$ConfigRpcTransportClient$4 (com.alibaba.nacos.client.config.impl)
    call:515, Executors$RunnableAdapter (java.util.concurrent)
    run$$$capture:264, FutureTask (java.util.concurrent)
    run:-1, FutureTask (java.util.concurrent)
    - Async stack trace
    <init>:151, FutureTask (java.util.concurrent)
    <init>:215, ScheduledThreadPoolExecutor$ScheduledFutureTask (java.util.concurrent)
    schedule:561, ScheduledThreadPoolExecutor (java.util.concurrent)
    startInternal:739, ClientWorker$ConfigRpcTransportClient (com.alibaba.nacos.client.config.impl)
    start:255, ConfigTransportClient (com.alibaba.nacos.client.config.impl)
    <init>:472, ClientWorker (com.alibaba.nacos.client.config.impl)
    <init>:81, NacosConfigService (com.alibaba.nacos.client.config)
    newInstance0:-2, NativeConstructorAccessorImpl (jdk.internal.reflect)
    newInstance:62, NativeConstructorAccessorImpl (jdk.internal.reflect)
    newInstance:45, DelegatingConstructorAccessorImpl (jdk.internal.reflect)
    newInstance:490, Constructor (java.lang.reflect)
    createConfigService:43, ConfigFactory (com.alibaba.nacos.api.config)
    createConfigService:44, NacosFactory (com.alibaba.nacos.api)
    createConfigService:55, NacosConfigManager (com.alibaba.cloud.nacos)
    <init>:43, NacosConfigManager (com.alibaba.cloud.nacos)
    nacosConfigManager:43, NacosConfigBootstrapConfiguration (com.alibaba.cloud.nacos)
    invoke0:-2, NativeMethodAccessorImpl (jdk.internal.reflect)
    invoke:62, NativeMethodAccessorImpl (jdk.internal.reflect)
    invoke:43, DelegatingMethodAccessorImpl (jdk.internal.reflect)
    invoke:566, Method (java.lang.reflect)
    instantiate:154, SimpleInstantiationStrategy (org.springframework.beans.factory.support)
    instantiate:652, ConstructorResolver (org.springframework.beans.factory.support)
    instantiateUsingFactoryMethod:637, ConstructorResolver (org.springframework.beans.factory.support)
    instantiateUsingFactoryMethod:1341, AbstractAutowireCapableBeanFactory (org.springframework.beans.factory.support)
    createBeanInstance:1181, AbstractAutowireCapableBeanFactory (org.springframework.beans.factory.support)
    doCreateBean:556, AbstractAutowireCapableBeanFactory (org.springframework.beans.factory.support)
    createBean:516, AbstractAutowireCapableBeanFactory (org.springframework.beans.factory.support)
    lambda$doGetBean$0:324, AbstractBeanFactory (org.springframework.beans.factory.support)
    getSingleton:234, DefaultSingletonBeanRegistry (org.springframework.beans.factory.support)
    doGetBean:322, AbstractBeanFactory (org.springframework.beans.factory.support)
    getBean:202, AbstractBeanFactory (org.springframework.beans.factory.support)
    resolveCandidate:276, DependencyDescriptor (org.springframework.beans.factory.config)
    doResolveDependency:1307, DefaultListableBeanFactory (org.springframework.beans.factory.support)
    resolveDependency:1227, DefaultListableBeanFactory (org.springframework.beans.factory.support)
    resolveAutowiredArgument:886, ConstructorResolver (org.springframework.beans.factory.support)
    createArgumentArray:790, ConstructorResolver (org.springframework.beans.factory.support)
    instantiateUsingFactoryMethod:540, ConstructorResolver (org.springframework.beans.factory.support)
    instantiateUsingFactoryMethod:1341, AbstractAutowireCapableBeanFactory (org.springframework.beans.factory.support)
    createBeanInstance:1181, AbstractAutowireCapableBeanFactory (org.springframework.beans.factory.support)
    doCreateBean:556, AbstractAutowireCapableBeanFactory (org.springframework.beans.factory.support)
    createBean:516, AbstractAutowireCapableBeanFactory (org.springframework.beans.factory.support)
    lambda$doGetBean$0:324, AbstractBeanFactory (org.springframework.beans.factory.support)
    getSingleton:234, DefaultSingletonBeanRegistry (org.springframework.beans.factory.support)
    doGetBean:322, AbstractBeanFactory (org.springframework.beans.factory.support)
    getBean:202, AbstractBeanFactory (org.springframework.beans.factory.support)
    resolveCandidate:276, DependencyDescriptor (org.springframework.beans.factory.config)
    addCandidateEntry:1525, DefaultListableBeanFactory (org.springframework.beans.factory.support)
    findAutowireCandidates:1489, DefaultListableBeanFactory (org.springframework.beans.factory.support)
    resolveMultipleBeans:1378, DefaultListableBeanFactory (org.springframework.beans.factory.support)
    doResolveDependency:1265, DefaultListableBeanFactory (org.springframework.beans.factory.support)
    resolveDependency:1227, DefaultListableBeanFactory (org.springframework.beans.factory.support)
    resolveFieldValue:657, AutowiredAnnotationBeanPostProcessor$AutowiredFieldElement (org.springframework.beans.factory.annotation)
    inject:640, AutowiredAnnotationBeanPostProcessor$AutowiredFieldElement (org.springframework.beans.factory.annotation)
    inject:119, InjectionMetadata (org.springframework.beans.factory.annotation)
    postProcessProperties:399, AutowiredAnnotationBeanPostProcessor (org.springframework.beans.factory.annotation)
    populateBean:1425, AbstractAutowireCapableBeanFactory (org.springframework.beans.factory.support)
    doCreateBean:593, AbstractAutowireCapableBeanFactory (org.springframework.beans.factory.support)
    createBean:516, AbstractAutowireCapableBeanFactory (org.springframework.beans.factory.support)
    lambda$doGetBean$0:324, AbstractBeanFactory (org.springframework.beans.factory.support)
    getSingleton:234, DefaultSingletonBeanRegistry (org.springframework.beans.factory.support)
    doGetBean:322, AbstractBeanFactory (org.springframework.beans.factory.support)
    getBean:202, AbstractBeanFactory (org.springframework.beans.factory.support)
    preInstantiateSingletons:897, DefaultListableBeanFactory (org.springframework.beans.factory.support)
    finishBeanFactoryInitialization:879, AbstractApplicationContext (org.springframework.context.support)
    refresh:551, AbstractApplicationContext (org.springframework.context.support)
    refresh:755, SpringApplication (org.springframework.boot)
    refresh:747, SpringApplication (org.springframework.boot)
    refreshContext:402, SpringApplication (org.springframework.boot)
    run:312, SpringApplication (org.springframework.boot)
    run:140, SpringApplicationBuilder (org.springframework.boot.builder)
    bootstrapServiceContext:212, BootstrapApplicationListener (org.springframework.cloud.bootstrap)
    onApplicationEvent:117, BootstrapApplicationListener (org.springframework.cloud.bootstrap)
    onApplicationEvent:74, BootstrapApplicationListener (org.springframework.cloud.bootstrap)
    doInvokeListener:172, SimpleApplicationEventMulticaster (org.springframework.context.event)
    invokeListener:165, SimpleApplicationEventMulticaster (org.springframework.context.event)
    multicastEvent:139, SimpleApplicationEventMulticaster (org.springframework.context.event)
    multicastEvent:127, SimpleApplicationEventMulticaster (org.springframework.context.event)
    environmentPrepared:80, EventPublishingRunListener (org.springframework.boot.context.event)
    environmentPrepared:53, SpringApplicationRunListeners (org.springframework.boot)
    prepareEnvironment:342, SpringApplication (org.springframework.boot)
    run:307, SpringApplication (org.springframework.boot)
    loadContext:120, SpringBootContextLoader (org.springframework.boot.test.context)
    loadContextInternal:99, DefaultCacheAwareContextLoaderDelegate (org.springframework.test.context.cache)
    loadContext:124, DefaultCacheAwareContextLoaderDelegate (org.springframework.test.context.cache)
    getApplicationContext:123, DefaultTestContext (org.springframework.test.context.support)
    setUpRequestContextIfNecessary:190, ServletTestExecutionListener (org.springframework.test.context.web)
    prepareTestInstance:132, ServletTestExecutionListener (org.springframework.test.context.web)
    prepareTestInstance:244, TestContextManager (org.springframework.test.context)
    createTest:227, SpringJUnit4ClassRunner (org.springframework.test.context.junit4)
    runReflectiveCall:289, SpringJUnit4ClassRunner$1 (org.springframework.test.context.junit4)
    run:12, ReflectiveCallable (org.junit.internal.runners.model)
    methodBlock:291, SpringJUnit4ClassRunner (org.springframework.test.context.junit4)
    runChild:246, SpringJUnit4ClassRunner (org.springframework.test.context.junit4)
    runChild:97, SpringJUnit4ClassRunner (org.springframework.test.context.junit4)
    run:331, ParentRunner$4 (org.junit.runners)
    schedule:79, ParentRunner$1 (org.junit.runners)
    runChildren:329, ParentRunner (org.junit.runners)
    access$100:66, ParentRunner (org.junit.runners)
    evaluate:293, ParentRunner$2 (org.junit.runners)
    evaluate:61, RunBeforeTestClassCallbacks (org.springframework.test.context.junit4.statements)
    evaluate:70, RunAfterTestClassCallbacks (org.springframework.test.context.junit4.statements)
    evaluate:306, ParentRunner$3 (org.junit.runners)
    run:413, ParentRunner (org.junit.runners)
    run:190, SpringJUnit4ClassRunner (org.springframework.test.context.junit4)
    run:137, JUnitCore (org.junit.runner)
    startRunnerWithArgs:69, JUnit4IdeaTestRunner (com.intellij.junit4)
    execute:38, IdeaTestRunner$Repeater$1 (com.intellij.rt.junit)
    repeat:11, TestsRepeater (com.intellij.rt.execution.junit)
    startRunnerWithArgs:35, IdeaTestRunner$Repeater (com.intellij.rt.junit)
    prepareStreamsAndStart:235, JUnitStarter (com.intellij.rt.junit)
    main:54, JUnitStarter (com.intellij.rt.junit)
    + + +
    + + + + +
    +
    +
    +
    + + + + + + + + + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    背景

    javac 是java的编译器,目前对编译原理非常感兴趣,而且对类型擦除和java的类型系统感兴趣.所以需要调试javac

    +

    How to do

    java9 之后新增了模块module功能.在这之前,javac是单独有个tools.java负载编译的.在java9之后,相关代码在jdk.compiler的module下面

    + + +
    + + + + +
    +
    +
    +
    + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    背景

      +
    • 了解java的.java文件的词法分析
    • +
    • 了解java的编译过程
    • +
    +

    parser

    类型检查

    +
    1
    src/jdk.compiler/share/classes/com/sun/tools/javac/comp/Check.java
    +

    java的的语法词法分析,生成parse树

    +
    1
    src/jdk.compiler/share/classes/com/sun/tools/javac/parser/JavacParser.java
    + + +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    (gdb) p (char *) buf
    $3 = 0x7ffff5980170 "/home/dai/javademo/learn_java/generic/GenericMethod.java:8: error: name clash: sayHi(List<Integer>) and sayHi(List<String>) have the same erasure\n"
    (gdb) bt
    #0 __GI___libc_write (fd=fd@entry=2, buf=buf@entry=0x7ffff5980170, nbytes=nbytes@entry=146) at ../sysdeps/unix/sysv/linux/write.c:25
    #1 0x00007ffff5851e28 in handleWrite (fd=2, buf=buf@entry=0x7ffff5980170, len=len@entry=146) at /home/dai/jdk/src/java.base/unix/native/libjava/io_util_md.c:196
    #2 0x00007ffff58518ba in writeBytes (env=0x7ffff00295d0, this=0x7ffff5982290, bytes=<optimized out>, off=0, len=146, append=<optimized out>, fid=0xd08e043)
    at /home/dai/jdk/src/java.base/share/native/libjava/io_util.c:189
    #3 0x00007ffff584a2ab in Java_java_io_FileOutputStream_writeBytes (env=<optimized out>, this=<optimized out>, bytes=<optimized out>, off=<optimized out>, len=<optimized out>, append=<optimized out>)
    at /home/dai/jdk/src/java.base/share/native/libjava/FileOutputStream.c:70
    #4 0x00007fffe100f6cb in ?? ()
    #5 0x00007ffff7d41000 in ?? ()
    #6 0x0000555555581520 in ?? ()
    #7 0x00007ffff00292f0 in ?? ()
    #8 0x00007fffb4d44e58 in ?? ()
    #9 0x00007fffe100f199 in ?? ()
    #10 0x00007ffff5982208 in ?? ()
    #11 0x00007fffb41f2960 in ?? ()
    #12 0x00007ffff5982290 in ?? ()
    #13 0x00007fffb41f3b38 in ?? ()
    #14 0x0000000000000000 in ?? ()
    + + +

    相关阅读

    + + +
    + + + + +
    +
    +
    +
    + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    背景

    了解内存模型

    +
      +
    • 编译时防止重排
    • +
    +
    1
    2
    3
    4
    "memory"
    The "memory" clobber tells the compiler that the assembly code performs memory reads or writes to items other than those listed in the input and output operands (for example, accessing the memory pointed to by one of the input parameters). To ensure memory contains correct values, GCC may need to flush specific register values to memory before executing the asm. Further, the compiler does not assume that any values read from memory before an asm remain unchanged after that asm; it reloads them as needed. Using the "memory" clobber effectively forms a read/write memory barrier for the compiler.

    Note that this clobber does not prevent the processor from doing speculative reads past the asm statement. To prevent that, you need processor-specific fence instructions.
    + + +

    false sharding

    demo

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    #include <thread>

    alignas(128) volatile int counter[1024]{};

    void update(int idx) {
    for (int j = 0; j < 100000000; j++) ++counter[idx];
    }

    static const int stride = SIZE/sizeof(counter[0]);
    int main() {
    std::thread t1(update, 0*stride);
    std::thread t2(update, 1*stride);
    std::thread t3(update, 2*stride);
    std::thread t4(update, 3*stride);
    t1.join();
    t2.join();
    t3.join();
    t4.join();
    }
    + +

    编译

    +
    1
    2
    g++ -DSIZE=64 -pthread -O2 cacheline.c  && perf stat -etask-clock,context-switches,cpu-migrations,cycles -r20 ./a.out

    + + +

    相关阅读

    + + +
    + + + + +
    +
    +
    +
    + + + + + + + + + + +
    + + + + +
    + + + + + + + + +
    +
    + +
    + +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/page/9/index.html b/page/9/index.html new file mode 100644 index 0000000000..edf9d40703 --- /dev/null +++ b/page/9/index.html @@ -0,0 +1,1114 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + dinosaur + + + + + + + + + + + + +
    +
    + +
    +
    + + + + + +
    + + + + + + + + +
    + +
    + +
    +
    + + +
    + + 0% +
    + + +
    +
    +
    + + +
    + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    背景

    spring boot 使用gradle 构建 , 需要了解gradle的使用

    +

    下载安装

    +

    这里可以下载

    +

    配置环境变量

      +
    • windows
    • +
    +

    解压前文件是

    +

    gredle zip

    +

    解压后路径:

    +

    gradle

    +

    gradle envirnment

    +

    测试安装成功

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    $ gradle -version

    Welcome to Gradle 7.6!

    Here are the highlights of this release:
    - Added support for Java 19.
    - Introduced `--rerun` flag for individual task rerun.
    - Improved dependency block for test suites to be strongly typed.
    - Added a pluggable system for Java toolchains provisioning.

    For more details see https://docs.gradle.org/7.6/release-notes.html


    ------------------------------------------------------------
    Gradle 7.6
    ------------------------------------------------------------

    Build time: 2022-11-25 13:35:10 UTC
    Revision: daece9dbc5b79370cc8e4fd6fe4b2cd400e150a8

    Kotlin: 1.7.10
    Groovy: 3.0.13
    Ant: Apache Ant(TM) version 1.10.11 compiled on July 10 2021
    JVM: 11 (Oracle Corporation 11+28)
    OS: Windows 10 10.0 amd64
    + + +

    第一个gradle 项目

    来源文档

    +
    1
    2
    $ mkdir demo
    $ cd demo
    + +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    38
    39
    40
    41
    $ gradle init
    Starting a Gradle Daemon (subsequent builds will be faster)
    <-------------> 0% INITIALIZING [2s]77ms] <-------------> 0% INITIALIZING [783ms]<

    Select type of project to generate:
    1: basic
    2: application
    3: library
    4: Gradle plugin
    Enter selection (default: basic) [1..4] 2

    Select implementation language:
    1: C++ 2: Groovy [11s]]7s]
    3: Java
    4: Kotlin
    5: Scala
    6: Swift
    Enter selection (default: Java) [1..6] 3

    Split functionality across multiple subprojects?:
    1: no - only one application project 2: yes - application and library projects
    Enter selection (default: no - only one application project) [1..2] 1

    Select build script DSL:
    1: Groovy 2: Kotlin
    Enter selection (default: Groovy) [1..2] 1

    Generate build using new APIs and b
    Select test framework:
    1: JUnit 4 2: TestNG
    3: Spock
    4: JUnit Jupiter
    Enter selection (default: JUnit Jupiter) [1..4] 1

    Project name (default: demo):
    Source package (default: demo):

    > Task :init EGet more help with your project: https://docs.gradle.org/7.6/samples/sample_building_java_applications.html

    BUILD SUCCESSFUL in 1m 13s
    2 actionable tasks: 2 executed
    + + +

    目录结构

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    ├─.gradle
    ├─app
    │ └─src
    │ ├─main
    │ │ ├─java
    │ │ │ └─demo
    │ │ └─resources
    │ └─test
    │ ├─java
    │ │ └─demo
    │ └─resources
    └─gradle
    └─wrapper
    + + + + +
    + + + + +
    +
    +
    +
    + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    背景

    编译spring boot

    +

    流程

    github 主页有写怎么编译

    +
      +
    • 下载代码
      1
      2
      3
      4
      5
      6
      7
      8
      ## 下载代码
      git clone https://github.com/spring-projects/spring-boot.git
      ## 切换目录
      cd spring-boot
      ## 编译
      ./gradlew


    • +
    +

    如果下载国外的包比较慢,可以添加代理

    +
    1
    vim build.gradle
    + +

    编译好的jar包在哪呢?
    在每个子模块的build/libs 里面

    +
    1
    2
    3
    4
    5
    $ tree spring-boot-project/spring-boot/build/libs/
    spring-boot-project/spring-boot/build/libs/
    ├── spring-boot-3.0.1-SNAPSHOT.jar
    └── spring-boot-3.0.1-SNAPSHOT-sources.jar

    + + +

    spring boot 启动

    maven 的启动:
    spring-boot-project/spring-boot-tools/spring-boot-maven-plugin/src/main/java/org/springframework/boot/maven/AbstractRunMojo.java

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    @Override
    public void execute() throws MojoExecutionException, MojoFailureException {
    if (this.skip) {
    getLog().debug("skipping run as per configuration.");
    return;
    }
    String startClass = (this.mainClass != null) ? this.mainClass
    : SpringBootApplicationClassFinder.findSingleClass(this.classesDirectory); // 查找main类
    run(startClass); // 启动
    }
    + + + +

    相关阅读

    + + +
    + + + + +
    +
    +
    +
    + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    背景

    线上遇到ConcurrentHashMap 空指针异常,发现ConcurrentHashMap 不能getput 一个 null的值

    + + +
    + + + + +
    +
    +
    +
    + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    背景

    了解java 线程池内容

    +

    线程池

    在java中,线程都是调用pthread_create 来生成的线程的 , 但是对于线程池,则是在上面封装的管理类

    +

    相关阅读

    + + +
    + + + + +
    +
    +
    +
    + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    背景

    了解java打包的过程和入口

    +

    例子

    我工作环境的spring boot jar 包打包后是这样的:

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    Manifest-Version: 1.0
    Created-By: Maven Jar Plugin 3.2.0
    Build-Jdk-Spec: 11
    Implementation-Title: mdp-biz-engine-rest
    Implementation-Version: 3.0.0-SNAPSHOT
    Main-Class: org.springframework.boot.loader.JarLauncher
    Start-Class: com.xxx.Application
    Spring-Boot-Version: 2.3.12.RELEASE
    Spring-Boot-Classes: BOOT-INF/classes/
    Spring-Boot-Lib: BOOT-INF/lib/
    Spring-Boot-Classpath-Index: BOOT-INF/classpath.idx
    + + +

    相关阅读

    + + +
    + + + + +
    +
    +
    +
    + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    背景

    遇到java int overflow的问题,想了解java的数字类型溢出是怎么处理的

    +
    1
    2
    jshell> 2147483647 + 1
    $3 ==> -2147483648
    + +

    jls确认规则

    对于+操作,如果结果溢出会怎么处理?

    +

    数字类型有两部分:符号位数字位 , 对于溢出的数字,规则如下

    +

    数字有两部分:

    +
      +
    • 符号位 : 符号位和数学上的结果的符号相反
    • +
    • 数字位 : 2进制补码的低位
    • +
    +

    jls 文档

    +
    1
    2
    3
    4
    If an integer addition overflows, then the result is the low-order bits of the
    mathematical sum as represented in some sufficiently large two's-complement
    format. If overflow occurs, then the sign of the result is not the same as the sign of
    the mathematical sum of the two operand values.
    + +

    例子解释

    1
    2147483647 + 1 
    +

    这里面 21474836471 都是int 的字面量 , +操作之后会溢出,

    +

    10进制的值2147483647 对应的16进制是 7fffffff

    +

    扩展之后的值 2147483647 +1 对应的16进制是 ...000 10000000
    然后 2147483647 +1 2的补码 ...111 011111111111111 ,

    +

    所以:
    低位就是 11111111111
    符号位: 和之前相反,所以是 1
    所以 int 的每一位都是 1 , 所以是 -2147483648

    +

    相关阅读

    + + +
    + + + + +
    +
    +
    +
    + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    背景

    常用的spring boot 问题收集

    +

    注解

    @Bean@Component 注解优先使用哪个注入

    好像得看代码实现,可能和版本有关

    +

    bean 和 component 注解优先使用哪个注入

    +

    @Component 的使用

    @Component 挂在类上面 , @Bean 挂在方法里面,@Bean 更加灵活

    +

    [Component ]https://www.baeldung.com/spring-component-annotation

    + + +
    + + + + +
    +
    +
    +
    + + + + + + + + + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    背景

    我们新的项目使用mockito来mock数据,所以需要学习mockito的使用

    +

    使用

    如何使用?

    +

    可以去官网https://site.mockito.org/ 查看如何使用

    + + +
    + + + + +
    +
    +
    +
    + + + + + + + +
    + + + + + +
    +

    + + +

    + + +
    + + + + +
    + + +

    背景

    如何创建maven 扩展

    +

    步骤

    使用maven创建一个叫hello-maven-plugin 的插件

    +
    1
    2
    3
    4
    5
    mvn archetype:generate \
    -DgroupId=sample.plugin \
    -DartifactId=hello-maven-plugin \
    -DarchetypeGroupId=org.apache.maven.archetypes \
    -DarchetypeArtifactId=maven-archetype-plugin
    + + +

    构建的tree

    +
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    $ tree  .
    .
    └── hello-maven-plugin
    ├── pom.xml
    └── src
    ├── it
    │ ├── settings.xml
    │ └── simple-it
    │ ├── pom.xml
    │ └── verify.groovy
    └── main
    └── java
    └── sample
    └── plugin
    └── MyMojo.java
    + +

    可以看到创建了一个hello-maven-plugin 目录, 其中pom.xml文件

    +

    这是核心的pom内容:

    +
    1
    2
    3
    4
    5
    6
    <groupId>sample.plugin</groupId>
    <artifactId>hello-maven-plugin</artifactId>
    <version>1.0-SNAPSHOT</version>
    <packaging>maven-plugin</packaging>

    <name>hello-maven-plugin Maven Plugin</name>
    + + + + + +

    相关阅读

    + + +
    + + + + +
    +
    +
    +
    + + + + + + + + + + +
    + + + + +
    + + + + + + + + +
    +
    + +
    + +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/placeholder b/placeholder deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/sitemap.txt b/sitemap.txt new file mode 100644 index 0000000000..899a196a81 --- /dev/null +++ b/sitemap.txt @@ -0,0 +1,330 @@ +https://shakudada.xyz/2023/10/27/clickhouse-%E7%89%A9%E5%8C%96%E8%A7%86%E5%9B%BE/ +https://shakudada.xyz/2023/08/15/java-juc/ +https://shakudada.xyz/tags/index.html +https://shakudada.xyz/2021/03/17/%E8%AE%BA%E6%96%87%E7%9A%84%E6%9C%89%E8%B6%A3%E6%80%A7/ +https://shakudada.xyz/2021/03/31/%E8%B7%B3%E8%A1%A8/ +https://shakudada.xyz/2021/05/24/%E9%9C%8D%E5%B0%94%E9%80%BB%E8%BE%91-%E4%BB%8E%E5%BF%AB%E6%8E%92%E5%BC%80%E5%A7%8B/ +https://shakudada.xyz/2020/05/18/%E8%8C%83%E7%95%B4%E5%92%8C%E7%B1%B3%E7%94%B0%E5%BC%95%E7%90%86/ +https://shakudada.xyz/2020/03/15/%E8%A7%84%E5%88%99%E7%B3%BB%E7%BB%9F/ +https://shakudada.xyz/2021/11/30/%E7%A7%9F%E7%BA%A6/ +https://shakudada.xyz/2020/11/27/%E7%BA%A6%E6%9D%9F%E5%92%8C%E7%BB%93%E6%9E%84/ +https://shakudada.xyz/2020/02/20/%E7%BC%96%E8%AF%91%E5%8E%9F%E7%90%86/ +https://shakudada.xyz/2022/12/16/%E7%BC%96%E8%AF%91sping-boot/ +https://shakudada.xyz/2022/03/30/%E8%8C%83%E5%9E%8B%E6%A3%80%E6%9F%A5/ +https://shakudada.xyz/2019/11/30/%E6%AD%A3%E5%88%99%E6%A8%A1%E5%BC%8F/ +https://shakudada.xyz/2020/12/07/%E6%B3%9B%E5%9E%8B/ +https://shakudada.xyz/2019/09/21/%E7%8E%AF%E5%A2%83%E5%8F%98%E9%87%8F%E6%98%AF%E4%BB%80%E4%B9%88/ +https://shakudada.xyz/2020/08/11/%E6%9C%80%E5%A4%A7%E7%86%B5/ +https://shakudada.xyz/2021/10/10/%E6%97%B6%E9%97%B4%E8%BD%AE%E7%AE%97%E6%B3%95/ +https://shakudada.xyz/2020/12/25/%E5%BD%A2%E5%BC%8F%E5%8C%96%E8%AF%AD%E4%B9%89/ +https://shakudada.xyz/2020/09/27/%E5%BE%AA%E7%8E%AF%E4%B8%8D%E5%8F%98%E5%BC%8Floop-invariants/ +https://shakudada.xyz/2021/03/15/%E5%BF%83%E8%B7%B3%E5%92%8Ctcp/ +https://shakudada.xyz/2020/10/19/%E6%84%9F%E7%9F%A5%E6%9C%BA/ +https://shakudada.xyz/2020/09/01/%E6%88%91%E7%9A%84es%E4%B9%8B%E8%B7%AF/ +https://shakudada.xyz/2020/11/12/%E6%95%B0%E7%90%86%E9%80%BB%E8%BE%91/ +https://shakudada.xyz/2021/03/05/%E5%A6%82%E4%BD%95%E5%86%99%E4%B8%80%E4%B8%AA%E6%AD%A3%E7%A1%AE%E7%9A%84%E4%BB%A3%E7%A0%81/ +https://shakudada.xyz/2021/05/24/%E5%A0%86%E6%A0%88/ +https://shakudada.xyz/2020/04/27/%E5%AD%97%E7%AC%A6%E4%B8%B2%E5%88%B0%E4%BB%A3%E7%A0%81/ +https://shakudada.xyz/2020/07/13/%E5%BB%B6%E8%BF%9F%E6%B1%82%E5%80%BC/ +https://shakudada.xyz/2020/06/01/%E4%BE%9D%E8%B5%96%E5%92%8C%E5%86%B2%E7%AA%81/ +https://shakudada.xyz/2020/06/18/%E5%8F%8C%E5%90%91%E7%BB%91%E5%AE%9A/ +https://shakudada.xyz/2019/12/09/%E5%8D%8F%E7%A8%8B%E5%88%87%E6%8D%A2/ +https://shakudada.xyz/2020/06/11/%E5%8F%AF%E6%89%A9%E5%B1%95%E6%80%A7/ +https://shakudada.xyz/2022/11/09/zookeeper-connetion-loss/ +https://shakudada.xyz/2020/05/06/%E4%B8%80%E4%B8%AAsql%E7%9A%84%E7%BB%84%E6%88%90/ +https://shakudada.xyz/2021/03/22/%E4%B8%80%E6%AC%A1tcp%E9%94%99%E8%AF%AF%E6%8E%92%E6%9F%A5/ +https://shakudada.xyz/2022/05/20/%E4%BD%BF%E7%94%A8gtest/ +https://shakudada.xyz/2023/01/15/%E4%BD%BF%E7%94%A8k8s-%E6%90%AD%E5%BB%BAredis-%E9%9B%86%E7%BE%A4/ +https://shakudada.xyz/2023/04/20/volatile-java-%E5%AE%9E%E7%8E%B0/ +https://shakudada.xyz/2023/06/09/why-bison-can-be-find-in-cmake/ +https://shakudada.xyz/2021/05/21/vxlan/ +https://shakudada.xyz/2022/11/21/xid-equal-to-close-xid/ +https://shakudada.xyz/2023/02/28/utf8-encoding-and-java/ +https://shakudada.xyz/2022/11/30/tomcat-%E7%BC%96%E8%AF%91/ +https://shakudada.xyz/2022/12/26/thread-pool/ +https://shakudada.xyz/2021/01/06/three-value-prediate/ +https://shakudada.xyz/2020/10/28/todolist/ +https://shakudada.xyz/2020/05/27/tired/ +https://shakudada.xyz/2021/04/01/tcp%E5%8D%8F%E8%AE%AE/ +https://shakudada.xyz/2023/03/23/tersorflow-%E5%85%A5%E9%97%A8/ +https://shakudada.xyz/2019/12/04/stop/ +https://shakudada.xyz/2021/07/15/tcp-nodelay/ +https://shakudada.xyz/2020/12/17/tcp/ +https://shakudada.xyz/2019/09/14/tcpdump-resp/ +https://shakudada.xyz/2022/12/16/spring-boot-repackage-%E5%92%8C%E5%85%A5%E5%8F%A3/ +https://shakudada.xyz/2022/12/15/spring-boot-%E5%9F%BA%E7%A1%80/ +https://shakudada.xyz/2022/12/07/spring-boot/ +https://shakudada.xyz/2022/11/29/springboot-%E8%AF%B7%E6%B1%82%E6%B5%81%E7%A8%8B/ +https://shakudada.xyz/2020/07/21/sql-join/ +https://shakudada.xyz/2020/03/16/ssa-optimistic/ +https://shakudada.xyz/2021/06/22/skiplist/ +https://shakudada.xyz/2021/12/30/roaring-bitmap/ +https://shakudada.xyz/2023/10/20/simpleDatetimeformatter-vs-datetimeformatter/ +https://shakudada.xyz/2021/08/09/redis-%E4%B8%BB%E4%BB%8E%E5%88%87%E6%8D%A2%E5%92%8C%E9%AB%98%E5%8F%AF%E7%94%A8/ +https://shakudada.xyz/2020/05/11/raft/ +https://shakudada.xyz/2021/09/25/redis/ +https://shakudada.xyz/2021/09/26/redis-cluster/ +https://shakudada.xyz/2022/12/29/redission-%E8%B0%83%E7%94%A8%E6%B5%81%E7%A8%8B/ +https://shakudada.xyz/2021/10/15/rabbitmq-ack-reject/ +https://shakudada.xyz/2022/12/22/rabbitmq-spring-boot/ +https://shakudada.xyz/2021/11/04/rabbitmq%E5%BF%83%E8%B7%B3%E9%97%AE%E9%A2%98%E5%92%8Cphp/ +https://shakudada.xyz/2021/11/09/rabbit%E6%B5%81%E7%A8%8B/ +https://shakudada.xyz/2019/11/27/php-tokenlizer/ +https://shakudada.xyz/2020/01/07/php-%E5%8F%8D%E5%B0%84/ +https://shakudada.xyz/2019/10/19/php-try-catch/ +https://shakudada.xyz/2020/12/21/pushdown/ +https://shakudada.xyz/2019/12/11/php-cgi-windows-curl/ +https://shakudada.xyz/2021/01/06/pdf-format/ +https://shakudada.xyz/2019/09/12/php-imply-cast/ +https://shakudada.xyz/2020/05/08/php-opcode-to-handler/ +https://shakudada.xyz/2019/12/12/php-pdo-%E7%9B%B8%E5%85%B3%E5%8F%82%E6%95%B0/ +https://shakudada.xyz/2022/08/16/paper/ +https://shakudada.xyz/2023/09/19/nginx-temp-proxy-%E6%9D%83%E9%99%90%E5%AF%BC%E8%87%B4%E6%8A%A5%E9%94%99/ +https://shakudada.xyz/2020/10/21/parser/ +https://shakudada.xyz/2021/01/28/mysql%E7%9A%84select/ +https://shakudada.xyz/2023/06/14/nacos-client-and-serve/ +https://shakudada.xyz/2019/12/08/namespace%E4%B8%8Edocker/ +https://shakudada.xyz/2022/12/22/nacos-%E8%8E%B7%E5%8F%96%E9%85%8D%E7%BD%AE/ +https://shakudada.xyz/2021/04/14/mysql%E4%B8%BB%E4%BB%8E/ +https://shakudada.xyz/2021/04/28/mysql%E6%8F%A1%E6%89%8B/ +https://shakudada.xyz/2021/03/27/mysql-%E4%B8%BB%E4%BB%8E%E5%A4%8D%E5%88%B6/ +https://shakudada.xyz/2021/03/11/mysqlbinlog/ +https://shakudada.xyz/2019/12/05/mysql-%E9%9A%90%E5%BC%8F%E8%BD%AC%E6%8D%A2/ +https://shakudada.xyz/2019/12/12/mysql%E4%B8%A5%E6%A0%BC%E6%A8%A1%E5%BC%8F/ +https://shakudada.xyz/2021/05/11/mysql-binlog%E8%8E%B7%E5%8F%96/ +https://shakudada.xyz/2019/12/23/mysql-error-sqlstate/ +https://shakudada.xyz/2019/10/08/mysql-string-max-length/ +https://shakudada.xyz/2019/11/28/mysql-explain-impossible-condition/ +https://shakudada.xyz/2020/11/12/mvcc/ +https://shakudada.xyz/2023/08/15/mybatisplus-Column-status-cannot-be-null/ +https://shakudada.xyz/2022/12/12/mybatis-dollor-and-sharp/ +https://shakudada.xyz/2021/07/29/mysql-5-7-in-%E7%9A%84%E4%BC%98%E5%8C%96%E5%BC%95%E8%B5%B7%E7%9A%84bug/ +https://shakudada.xyz/2021/02/18/mvcc-translate/ +https://shakudada.xyz/2022/12/12/mockito-%E4%BD%BF%E7%94%A8/ +https://shakudada.xyz/2021/06/04/mongoinsert/ +https://shakudada.xyz/2022/12/04/micro-k8s-%E4%BD%BF%E7%94%A8/ +https://shakudada.xyz/2022/12/19/memory-model/ +https://shakudada.xyz/2023/06/27/milvus-%E7%BC%96%E8%AF%91%E4%BD%BF%E7%94%A8/ +https://shakudada.xyz/2020/03/10/math/ +https://shakudada.xyz/2022/12/27/maven-scope/ +https://shakudada.xyz/2019/09/28/maven%E6%89%93%E5%8C%85NoClassDefFoundError-on-Maven-dependency/ +https://shakudada.xyz/2022/09/05/max-min-heap/ +https://shakudada.xyz/2023/07/04/lucene-%E5%88%86%E8%AF%8D/ +https://shakudada.xyz/2023/06/19/lucene-%E6%90%9C%E7%B4%A2%E8%BF%87%E7%A8%8B/ +https://shakudada.xyz/2022/06/19/lucene-%E7%BC%96%E8%AF%91%E5%AE%89%E8%A3%85/ +https://shakudada.xyz/2021/10/21/lucence%E6%BA%90%E7%A0%81%E5%88%86%E6%9E%90/ +https://shakudada.xyz/2023/10/19/lsmtree/ +https://shakudada.xyz/2022/06/27/lucene-10%E6%BA%90%E7%A0%81%E5%88%86%E6%9E%90/ +https://shakudada.xyz/2022/08/19/lucene-tim%E6%A0%BC%E5%BC%8F/ +https://shakudada.xyz/2019/10/18/lex%E5%92%8Cyacc%E4%BE%8B%E5%AD%90/ +https://shakudada.xyz/2021/10/18/llvm/ +https://shakudada.xyz/2023/07/05/llvm-ir-%E4%BE%8B%E5%AD%90/ +https://shakudada.xyz/2022/03/27/lr-parser/ +https://shakudada.xyz/2023/07/13/kmp-correct/ +https://shakudada.xyz/2019/10/02/learn-es-invert-index/ +https://shakudada.xyz/2023/04/14/kafka%E7%BC%96%E8%AF%91%E5%92%8C%E5%90%AF%E5%8A%A8-1/ +https://shakudada.xyz/2020/06/12/js-define%E5%87%BD%E6%95%B0/ +https://shakudada.xyz/2019/12/12/js-vue%E5%9F%BA%E7%A1%80/ +https://shakudada.xyz/2022/04/12/jvm%E7%BA%BF%E7%A8%8B%E5%AE%9E%E7%8E%B0/ +https://shakudada.xyz/2023/02/13/jstak/ +https://shakudada.xyz/2022/11/16/jdk-%E5%8F%8D%E6%B1%87%E7%BC%96/ +https://shakudada.xyz/2023/08/10/jdbc-Communications-link-failure/ +https://shakudada.xyz/2022/02/18/jdk-%E7%BC%96%E8%AF%91/ +https://shakudada.xyz/2022/09/20/jdk%E7%BC%96%E8%AF%91/ +https://shakudada.xyz/2022/12/26/java%E5%9F%BA%E7%A1%80/ +https://shakudada.xyz/2019/11/14/java%E7%9A%84package%E4%B8%8E%E6%96%87%E4%BB%B6%E8%B7%AF%E5%BE%84/ +https://shakudada.xyz/2022/04/12/java%E7%B1%BB%E5%88%9D%E5%A7%8B%E5%8C%96/ +https://shakudada.xyz/2023/05/27/java-%E7%BA%BF%E7%A8%8B%E6%B1%A0/ +https://shakudada.xyz/2022/04/12/java%E5%92%8Cspringboot/ +https://shakudada.xyz/2022/11/29/javac/ +https://shakudada.xyz/2023/02/14/java-%E5%B8%B8%E7%94%A8%E5%91%BD%E4%BB%A4/ +https://shakudada.xyz/2023/05/24/java-%E6%95%B0%E7%BB%84%E5%A3%B0%E6%98%8E%E4%BD%8D%E7%BD%AE%E5%8C%BA%E5%88%AB/ +https://shakudada.xyz/2019/11/26/java-%E5%BC%82%E5%B8%B8/ +https://shakudada.xyz/2022/12/01/java-%E6%96%B9%E6%B3%95%E7%AD%BE%E5%90%8D/ +https://shakudada.xyz/2023/04/03/java-%E5%9F%BA%E6%9C%AC%E7%B1%BB%E5%9E%8B/ +https://shakudada.xyz/2022/09/21/java-%E5%A0%86%E6%A0%88/ +https://shakudada.xyz/2023/01/11/java-%E5%AF%B9%E8%B1%A1%E5%A4%A7%E5%B0%8F/ +https://shakudada.xyz/2023/01/11/java-%E4%B8%80%E6%AC%A1gc%E6%8E%92%E6%9F%A5/ +https://shakudada.xyz/2023/09/21/java-%E4%B8%9A%E5%8A%A1oom%E6%8E%92%E6%9F%A5/ +https://shakudada.xyz/2022/11/30/java-%E4%BD%BF%E7%94%A8lua-script/ +https://shakudada.xyz/2022/12/21/java-%E5%8A%A8%E6%80%81%E4%BB%A3%E7%90%86/ +https://shakudada.xyz/2022/12/06/java-volalite/ +https://shakudada.xyz/2023/03/29/java-unsafe/ +https://shakudada.xyz/2023/10/25/java-wait-notify/ +https://shakudada.xyz/2022/09/11/java-thread-local-%E5%88%9D%E5%A7%8B%E5%8C%96%E6%97%B6%E6%9C%BA/ +https://shakudada.xyz/2022/12/16/java-thread-pool/ +https://shakudada.xyz/2023/08/25/java-unbox/ +https://shakudada.xyz/2023/06/27/java-sort-default-order/ +https://shakudada.xyz/2022/08/23/java-static-%E5%9D%97/ +https://shakudada.xyz/2019/11/25/java-string-%E7%9B%B8%E5%85%B3%E5%86%85%E5%AE%B9/ +https://shakudada.xyz/2023/09/22/java-oom-hprof%E6%96%87%E4%BB%B6%E7%94%9F%E6%88%90%E6%97%B6%E6%9C%BA/ +https://shakudada.xyz/2022/12/20/java-parser/ +https://shakudada.xyz/2023/04/14/java-rabbitmq-%E5%88%9D%E5%A7%8B%E5%8C%96/ +https://shakudada.xyz/2022/12/07/java-redis-client/ +https://shakudada.xyz/2023/10/18/java-mybatis-plus-date-handler/ +https://shakudada.xyz/2023/08/24/java-main/ +https://shakudada.xyz/2023/08/04/java-nio/ +https://shakudada.xyz/2022/12/15/java-int-overflow-%E6%8E%A2%E7%A9%B6/ +https://shakudada.xyz/2023/04/18/java-integer-divison/ +https://shakudada.xyz/2023/09/06/java-jdk-%E4%B8%BA%E4%BB%80%E4%B9%88%E4%B8%80%E4%B8%AA%E7%BA%BF%E7%A8%8B%E7%A9%BA%E6%8C%87%E9%92%88%E4%B8%8D%E9%80%80%E5%87%BA/ +https://shakudada.xyz/2022/11/30/java-assert/ +https://shakudada.xyz/2023/07/30/java-generic/ +https://shakudada.xyz/2023/09/06/java-branch-bytecode/ +https://shakudada.xyz/2020/09/25/invariants/ +https://shakudada.xyz/2023/05/04/insert-ignore-%E6%AD%BB%E9%94%81/ +https://shakudada.xyz/2019/12/12/java-Class-forName/ +https://shakudada.xyz/2021/03/30/ipc/ +https://shakudada.xyz/2022/12/22/java-arraycopy/ +https://shakudada.xyz/2019/12/02/ik%E5%88%86%E8%AF%8D/ +https://shakudada.xyz/2019/10/13/https-tls-ssl/ +https://shakudada.xyz/2020/03/04/induction/ +https://shakudada.xyz/2019/09/13/hello-world/ +https://shakudada.xyz/2023/07/10/hidden-and-shadow-in-java/ +https://shakudada.xyz/2022/12/21/how-to-debug-javac/ +https://shakudada.xyz/2022/09/06/httpServerletRequest-autowired-%E5%8E%9F%E5%9B%A0/ +https://shakudada.xyz/2022/12/19/gradle-%E4%BD%BF%E7%94%A8/ +https://shakudada.xyz/2019/09/21/hello-world-java/ +https://shakudada.xyz/2020/07/10/group-concat%E7%9C%8Bmysql%E5%87%BD%E6%95%B0/ +https://shakudada.xyz/2019/09/12/golang-lock/ +https://shakudada.xyz/2020/04/10/golang-stack/ +https://shakudada.xyz/2019/09/18/golang-interface-%E6%AF%94%E8%BE%83/ +https://shakudada.xyz/2020/06/01/functor-1/ +https://shakudada.xyz/2019/09/17/go-micro-hello-world/ +https://shakudada.xyz/2020/05/28/functor/ +https://shakudada.xyz/2023/10/25/fst/ +https://shakudada.xyz/2023/05/25/fst-%E7%BB%93%E6%9E%84/ +https://shakudada.xyz/2023/07/04/found-duplicate-key-xxx-spring-boot/ +https://shakudada.xyz/2023/05/16/flink-%E7%BC%96%E8%AF%91/ +https://shakudada.xyz/2023/09/10/elastic-search-%E7%BC%96%E8%AF%91%E5%92%8C%E8%B0%83%E8%AF%95/ +https://shakudada.xyz/2022/04/02/dubbo-rpc/ +https://shakudada.xyz/2021/03/27/docker%E6%8C%81%E4%B9%85%E5%8C%96/ +https://shakudada.xyz/2023/03/09/each-jvm-bytecode-implement-in-x86-with-asm/ +https://shakudada.xyz/2021/05/11/c%E5%AD%97%E8%8A%82%E5%AF%B9%E9%BD%90/ +https://shakudada.xyz/2023/04/09/direct-memory-in-java/ +https://shakudada.xyz/2021/03/10/docker-compose-spec/ +https://shakudada.xyz/2019/11/26/docker%E4%B8%8Eiptable/ +https://shakudada.xyz/2020/06/12/curry/ +https://shakudada.xyz/2022/01/04/croaring-bitmap/ +https://shakudada.xyz/2020/09/18/crf/ +https://shakudada.xyz/2019/09/19/cors-%E7%9B%B8%E5%85%B3/ +https://shakudada.xyz/2022/12/10/create-a-maven-plugin/ +https://shakudada.xyz/2022/10/24/cpp-flag/ +https://shakudada.xyz/2020/04/15/coding/ +https://shakudada.xyz/2019/09/15/compile-and-debug-linux-kernel/ +https://shakudada.xyz/2019/11/21/composer-ext/ +https://shakudada.xyz/2019/10/22/composer%E7%9A%84psr4/ +https://shakudada.xyz/2023/10/16/clickhouse-%E7%89%A9%E5%8C%96%E8%A7%86%E5%9B%BE%E5%92%8C%E4%BD%8D%E5%9B%BE/ +https://shakudada.xyz/2022/05/22/clickhouse-%E8%AF%B7%E6%B1%82%E7%9A%84%E7%94%9F%E5%91%BD%E5%91%A8%E6%9C%9F/ +https://shakudada.xyz/2019/12/19/clickhouse-%E7%BC%96%E8%AF%91%E5%AE%89%E8%A3%85/ +https://shakudada.xyz/2022/12/26/cms-gc/ +https://shakudada.xyz/2022/07/06/clickhouse-parser/ +https://shakudada.xyz/2023/02/21/clickhouse-mybatis-batch-insert-cpu-raise-up/ +https://shakudada.xyz/2022/05/15/clickhouse-%E5%BB%BA%E8%A1%A8/ +https://shakudada.xyz/2023/10/16/clickhouse-cloud-dbeaver%E8%BF%9E%E6%8E%A5%E4%B8%8D%E4%B8%8A/ +https://shakudada.xyz/2022/06/01/clickhouse-function/ +https://shakudada.xyz/2023/06/14/clickhouse-jdbc-1002-error/ +https://shakudada.xyz/2021/04/02/canal%E9%9C%80%E8%A6%81%E6%B3%A8%E6%84%8F%E7%9A%84%E7%82%B9/ +https://shakudada.xyz/2022/05/13/clickhosue-insert-insert-deduplicate/ +https://shakudada.xyz/2023/01/12/clickhouse-400-error/ +https://shakudada.xyz/2021/03/15/c-%E6%A0%87%E5%87%86%E5%BA%93%E7%9A%84vector/ +https://shakudada.xyz/2022/06/26/c-%E5%9F%BA%E7%A1%80/ +https://shakudada.xyz/2021/04/14/c99%E6%9F%94%E6%80%A7%E6%95%B0%E7%BB%84/ +https://shakudada.xyz/2023/10/25/bm25-and-search/ +https://shakudada.xyz/2020/11/18/btree/ +https://shakudada.xyz/2020/07/07/c-auto-cast/ +https://shakudada.xyz/2021/03/27/build-grpc/ +https://shakudada.xyz/2023/05/18/bigint-%E6%98%A0%E5%B0%84/ +https://shakudada.xyz/2022/04/19/bloom-filter/ +https://shakudada.xyz/2020/06/29/basic-paxos/ +https://shakudada.xyz/2023/04/12/arroyo-%E7%BC%96%E8%AF%91%E5%92%8C%E4%BD%BF%E7%94%A8/ +https://shakudada.xyz/2023/08/16/antlr-%E4%BD%BF%E7%94%A8/ +https://shakudada.xyz/2019/10/31/ast%E6%9E%84%E9%80%A0/ +https://shakudada.xyz/2023/08/16/WARNING-An-illegal-reflective-access-operation-has-occurred-groovy/ +https://shakudada.xyz/2023/06/05/WFST-%E5%92%8Clucene-%E5%92%8Cfst/ +https://shakudada.xyz/2020/04/08/a-language-to-machine-code/ +https://shakudada.xyz/2023/06/06/Payload-value-must-not-be-empty/ +https://shakudada.xyz/2021/01/13/RSA/ +https://shakudada.xyz/2023/07/16/System-arraycopy-in-java/ +https://shakudada.xyz/2023/07/03/Unable-to-make-protected-final-java-lang-Class-java-lang-ClassLoader-defineClass/ +https://shakudada.xyz/2023/08/25/Hydration-completed-but-contains-mismatches/ +https://shakudada.xyz/2023/06/07/Invalid-JSON-text-in-argument-2-in-mysql8/ +https://shakudada.xyz/2023/07/04/Numeric-overflow-in-expression-idea-java/ +https://shakudada.xyz/2023/01/30/Garbage-First-Garbage-Collection-%E7%AE%80%E5%8D%95%E6%A6%82%E5%86%B5/ +https://shakudada.xyz/2022/12/16/ConcurrentHashMap-npe/ +https://shakudada.xyz/2023/02/04/Double-Checked-Locking-is-Broken/ +https://shakudada.xyz/2021/04/13/20210413%E5%8F%8D%E6%80%9D%E8%BF%87%E5%8E%BB/ +https://shakudada.xyz/2022/08/26/3-Method-Reference-Expressions/ +https://shakudada.xyz/ +https://shakudada.xyz/tags/%E5%8F%8D%E6%80%9D/ +https://shakudada.xyz/tags/java/ +https://shakudada.xyz/tags/juc/ +https://shakudada.xyz/tags/js/ +https://shakudada.xyz/tags/mysql/ +https://shakudada.xyz/tags/rsa/ +https://shakudada.xyz/tags/groovy/ +https://shakudada.xyz/tags/lucene/ +https://shakudada.xyz/tags/compile/ +https://shakudada.xyz/tags/arroyo/ +https://shakudada.xyz/tags/hash/ +https://shakudada.xyz/tags/compiler/ +https://shakudada.xyz/tags/model/ +https://shakudada.xyz/tags/search/ +https://shakudada.xyz/tags/db/ +https://shakudada.xyz/tags/paxos/ +https://shakudada.xyz/tags/c/ +https://shakudada.xyz/tags/grpc/ +https://shakudada.xyz/tags/c/ +https://shakudada.xyz/tags/canal/ +https://shakudada.xyz/tags/clickhouse/ +https://shakudada.xyz/tags/jdbc/ +https://shakudada.xyz/tags/linux/ +https://shakudada.xyz/tags/php/ +https://shakudada.xyz/tags/http/ +https://shakudada.xyz/tags/cpp/ +https://shakudada.xyz/tags/function/ +https://shakudada.xyz/tags/bitmap/ +https://shakudada.xyz/tags/nlp/ +https://shakudada.xyz/tags/docker/ +https://shakudada.xyz/tags/jvm/ +https://shakudada.xyz/tags/bytecode/ +https://shakudada.xyz/tags/flink/ +https://shakudada.xyz/tags/elasticsearch/ +https://shakudada.xyz/tags/golang/ +https://shakudada.xyz/tags/es/ +https://shakudada.xyz/tags/nio/ +https://shakudada.xyz/tags/redisson/ +https://shakudada.xyz/tags/thread-pool/ +https://shakudada.xyz/tags/redis/ +https://shakudada.xyz/tags/lettuce/ +https://shakudada.xyz/tags/druid/ +https://shakudada.xyz/tags/string/ +https://shakudada.xyz/tags/algorithm/ +https://shakudada.xyz/tags/llvm/ +https://shakudada.xyz/tags/lsmtree/ +https://shakudada.xyz/tags/maven/ +https://shakudada.xyz/tags/k8s/ +https://shakudada.xyz/tags/memory/ +https://shakudada.xyz/tags/mvcc/ +https://shakudada.xyz/tags/io/ +https://shakudada.xyz/tags/nacos/ +https://shakudada.xyz/tags/paper/ +https://shakudada.xyz/tags/nginx/ +https://shakudada.xyz/tags/curl/ +https://shakudada.xyz/tags/rabbitmq/ +https://shakudada.xyz/tags/spring-boot/ +https://shakudada.xyz/tags/sql/ +https://shakudada.xyz/tags/raft/ +https://shakudada.xyz/tags/netty/ +https://shakudada.xyz/tags/redission/ +https://shakudada.xyz/tags/roaring-bitmap/ +https://shakudada.xyz/tags/springboot/ +https://shakudada.xyz/tags/shell/ +https://shakudada.xyz/tags/tensorflow/ +https://shakudada.xyz/tags/utf-8/ +https://shakudada.xyz/tags/tomcat/ +https://shakudada.xyz/tags/servelet/ +https://shakudada.xyz/tags/cmake/ +https://shakudada.xyz/tags/zookeeper/ +https://shakudada.xyz/tags/k3s/ +https://shakudada.xyz/tags/ml/ +https://shakudada.xyz/tags/regular/ diff --git a/sitemap.xml b/sitemap.xml new file mode 100644 index 0000000000..055aa6e1f3 --- /dev/null +++ b/sitemap.xml @@ -0,0 +1,2830 @@ + + + + + https://shakudada.xyz/2023/10/27/clickhouse-%E7%89%A9%E5%8C%96%E8%A7%86%E5%9B%BE/ + + 2023-10-27 + + monthly + 0.6 + + + + https://shakudada.xyz/2023/08/15/java-juc/ + + 2023-10-27 + + monthly + 0.6 + + + + https://shakudada.xyz/tags/index.html + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2021/03/17/%E8%AE%BA%E6%96%87%E7%9A%84%E6%9C%89%E8%B6%A3%E6%80%A7/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2021/03/31/%E8%B7%B3%E8%A1%A8/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2021/05/24/%E9%9C%8D%E5%B0%94%E9%80%BB%E8%BE%91-%E4%BB%8E%E5%BF%AB%E6%8E%92%E5%BC%80%E5%A7%8B/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2020/05/18/%E8%8C%83%E7%95%B4%E5%92%8C%E7%B1%B3%E7%94%B0%E5%BC%95%E7%90%86/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2020/03/15/%E8%A7%84%E5%88%99%E7%B3%BB%E7%BB%9F/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2021/11/30/%E7%A7%9F%E7%BA%A6/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2020/11/27/%E7%BA%A6%E6%9D%9F%E5%92%8C%E7%BB%93%E6%9E%84/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2020/02/20/%E7%BC%96%E8%AF%91%E5%8E%9F%E7%90%86/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2022/12/16/%E7%BC%96%E8%AF%91sping-boot/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2022/03/30/%E8%8C%83%E5%9E%8B%E6%A3%80%E6%9F%A5/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2019/11/30/%E6%AD%A3%E5%88%99%E6%A8%A1%E5%BC%8F/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2020/12/07/%E6%B3%9B%E5%9E%8B/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2019/09/21/%E7%8E%AF%E5%A2%83%E5%8F%98%E9%87%8F%E6%98%AF%E4%BB%80%E4%B9%88/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2020/08/11/%E6%9C%80%E5%A4%A7%E7%86%B5/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2021/10/10/%E6%97%B6%E9%97%B4%E8%BD%AE%E7%AE%97%E6%B3%95/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2020/12/25/%E5%BD%A2%E5%BC%8F%E5%8C%96%E8%AF%AD%E4%B9%89/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2020/09/27/%E5%BE%AA%E7%8E%AF%E4%B8%8D%E5%8F%98%E5%BC%8Floop-invariants/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2021/03/15/%E5%BF%83%E8%B7%B3%E5%92%8Ctcp/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2020/10/19/%E6%84%9F%E7%9F%A5%E6%9C%BA/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2020/09/01/%E6%88%91%E7%9A%84es%E4%B9%8B%E8%B7%AF/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2020/11/12/%E6%95%B0%E7%90%86%E9%80%BB%E8%BE%91/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2021/03/05/%E5%A6%82%E4%BD%95%E5%86%99%E4%B8%80%E4%B8%AA%E6%AD%A3%E7%A1%AE%E7%9A%84%E4%BB%A3%E7%A0%81/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2021/05/24/%E5%A0%86%E6%A0%88/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2020/04/27/%E5%AD%97%E7%AC%A6%E4%B8%B2%E5%88%B0%E4%BB%A3%E7%A0%81/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2020/07/13/%E5%BB%B6%E8%BF%9F%E6%B1%82%E5%80%BC/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2020/06/01/%E4%BE%9D%E8%B5%96%E5%92%8C%E5%86%B2%E7%AA%81/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2020/06/18/%E5%8F%8C%E5%90%91%E7%BB%91%E5%AE%9A/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2019/12/09/%E5%8D%8F%E7%A8%8B%E5%88%87%E6%8D%A2/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2020/06/11/%E5%8F%AF%E6%89%A9%E5%B1%95%E6%80%A7/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2022/11/09/zookeeper-connetion-loss/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2020/05/06/%E4%B8%80%E4%B8%AAsql%E7%9A%84%E7%BB%84%E6%88%90/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2021/03/22/%E4%B8%80%E6%AC%A1tcp%E9%94%99%E8%AF%AF%E6%8E%92%E6%9F%A5/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2022/05/20/%E4%BD%BF%E7%94%A8gtest/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2023/01/15/%E4%BD%BF%E7%94%A8k8s-%E6%90%AD%E5%BB%BAredis-%E9%9B%86%E7%BE%A4/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2023/04/20/volatile-java-%E5%AE%9E%E7%8E%B0/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2023/06/09/why-bison-can-be-find-in-cmake/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2021/05/21/vxlan/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2022/11/21/xid-equal-to-close-xid/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2023/02/28/utf8-encoding-and-java/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2022/11/30/tomcat-%E7%BC%96%E8%AF%91/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2022/12/26/thread-pool/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2021/01/06/three-value-prediate/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2020/10/28/todolist/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2020/05/27/tired/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2021/04/01/tcp%E5%8D%8F%E8%AE%AE/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2023/03/23/tersorflow-%E5%85%A5%E9%97%A8/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2019/12/04/stop/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2021/07/15/tcp-nodelay/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2020/12/17/tcp/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2019/09/14/tcpdump-resp/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2022/12/16/spring-boot-repackage-%E5%92%8C%E5%85%A5%E5%8F%A3/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2022/12/15/spring-boot-%E5%9F%BA%E7%A1%80/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2022/12/07/spring-boot/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2022/11/29/springboot-%E8%AF%B7%E6%B1%82%E6%B5%81%E7%A8%8B/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2020/07/21/sql-join/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2020/03/16/ssa-optimistic/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2021/06/22/skiplist/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2021/12/30/roaring-bitmap/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2023/10/20/simpleDatetimeformatter-vs-datetimeformatter/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2021/08/09/redis-%E4%B8%BB%E4%BB%8E%E5%88%87%E6%8D%A2%E5%92%8C%E9%AB%98%E5%8F%AF%E7%94%A8/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2020/05/11/raft/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2021/09/25/redis/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2021/09/26/redis-cluster/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2022/12/29/redission-%E8%B0%83%E7%94%A8%E6%B5%81%E7%A8%8B/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2021/10/15/rabbitmq-ack-reject/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2022/12/22/rabbitmq-spring-boot/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2021/11/04/rabbitmq%E5%BF%83%E8%B7%B3%E9%97%AE%E9%A2%98%E5%92%8Cphp/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2021/11/09/rabbit%E6%B5%81%E7%A8%8B/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2019/11/27/php-tokenlizer/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2020/01/07/php-%E5%8F%8D%E5%B0%84/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2019/10/19/php-try-catch/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2020/12/21/pushdown/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2019/12/11/php-cgi-windows-curl/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2021/01/06/pdf-format/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2019/09/12/php-imply-cast/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2020/05/08/php-opcode-to-handler/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2019/12/12/php-pdo-%E7%9B%B8%E5%85%B3%E5%8F%82%E6%95%B0/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2022/08/16/paper/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2023/09/19/nginx-temp-proxy-%E6%9D%83%E9%99%90%E5%AF%BC%E8%87%B4%E6%8A%A5%E9%94%99/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2020/10/21/parser/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2021/01/28/mysql%E7%9A%84select/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2023/06/14/nacos-client-and-serve/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2019/12/08/namespace%E4%B8%8Edocker/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2022/12/22/nacos-%E8%8E%B7%E5%8F%96%E9%85%8D%E7%BD%AE/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2021/04/14/mysql%E4%B8%BB%E4%BB%8E/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2021/04/28/mysql%E6%8F%A1%E6%89%8B/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2021/03/27/mysql-%E4%B8%BB%E4%BB%8E%E5%A4%8D%E5%88%B6/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2021/03/11/mysqlbinlog/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2019/12/05/mysql-%E9%9A%90%E5%BC%8F%E8%BD%AC%E6%8D%A2/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2019/12/12/mysql%E4%B8%A5%E6%A0%BC%E6%A8%A1%E5%BC%8F/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2021/05/11/mysql-binlog%E8%8E%B7%E5%8F%96/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2019/12/23/mysql-error-sqlstate/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2019/10/08/mysql-string-max-length/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2019/11/28/mysql-explain-impossible-condition/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2020/11/12/mvcc/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2023/08/15/mybatisplus-Column-status-cannot-be-null/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2022/12/12/mybatis-dollor-and-sharp/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2021/07/29/mysql-5-7-in-%E7%9A%84%E4%BC%98%E5%8C%96%E5%BC%95%E8%B5%B7%E7%9A%84bug/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2021/02/18/mvcc-translate/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2022/12/12/mockito-%E4%BD%BF%E7%94%A8/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2021/06/04/mongoinsert/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2022/12/04/micro-k8s-%E4%BD%BF%E7%94%A8/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2022/12/19/memory-model/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2023/06/27/milvus-%E7%BC%96%E8%AF%91%E4%BD%BF%E7%94%A8/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2020/03/10/math/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2022/12/27/maven-scope/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2019/09/28/maven%E6%89%93%E5%8C%85NoClassDefFoundError-on-Maven-dependency/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2022/09/05/max-min-heap/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2023/07/04/lucene-%E5%88%86%E8%AF%8D/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2023/06/19/lucene-%E6%90%9C%E7%B4%A2%E8%BF%87%E7%A8%8B/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2022/06/19/lucene-%E7%BC%96%E8%AF%91%E5%AE%89%E8%A3%85/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2021/10/21/lucence%E6%BA%90%E7%A0%81%E5%88%86%E6%9E%90/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2023/10/19/lsmtree/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2022/06/27/lucene-10%E6%BA%90%E7%A0%81%E5%88%86%E6%9E%90/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2022/08/19/lucene-tim%E6%A0%BC%E5%BC%8F/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2019/10/18/lex%E5%92%8Cyacc%E4%BE%8B%E5%AD%90/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2021/10/18/llvm/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2023/07/05/llvm-ir-%E4%BE%8B%E5%AD%90/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2022/03/27/lr-parser/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2023/07/13/kmp-correct/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2019/10/02/learn-es-invert-index/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2023/04/14/kafka%E7%BC%96%E8%AF%91%E5%92%8C%E5%90%AF%E5%8A%A8-1/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2020/06/12/js-define%E5%87%BD%E6%95%B0/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2019/12/12/js-vue%E5%9F%BA%E7%A1%80/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2022/04/12/jvm%E7%BA%BF%E7%A8%8B%E5%AE%9E%E7%8E%B0/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2023/02/13/jstak/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2022/11/16/jdk-%E5%8F%8D%E6%B1%87%E7%BC%96/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2023/08/10/jdbc-Communications-link-failure/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2022/02/18/jdk-%E7%BC%96%E8%AF%91/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2022/09/20/jdk%E7%BC%96%E8%AF%91/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2022/12/26/java%E5%9F%BA%E7%A1%80/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2019/11/14/java%E7%9A%84package%E4%B8%8E%E6%96%87%E4%BB%B6%E8%B7%AF%E5%BE%84/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2022/04/12/java%E7%B1%BB%E5%88%9D%E5%A7%8B%E5%8C%96/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2023/05/27/java-%E7%BA%BF%E7%A8%8B%E6%B1%A0/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2022/04/12/java%E5%92%8Cspringboot/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2022/11/29/javac/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2023/02/14/java-%E5%B8%B8%E7%94%A8%E5%91%BD%E4%BB%A4/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2023/05/24/java-%E6%95%B0%E7%BB%84%E5%A3%B0%E6%98%8E%E4%BD%8D%E7%BD%AE%E5%8C%BA%E5%88%AB/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2019/11/26/java-%E5%BC%82%E5%B8%B8/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2022/12/01/java-%E6%96%B9%E6%B3%95%E7%AD%BE%E5%90%8D/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2023/04/03/java-%E5%9F%BA%E6%9C%AC%E7%B1%BB%E5%9E%8B/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2022/09/21/java-%E5%A0%86%E6%A0%88/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2023/01/11/java-%E5%AF%B9%E8%B1%A1%E5%A4%A7%E5%B0%8F/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2023/01/11/java-%E4%B8%80%E6%AC%A1gc%E6%8E%92%E6%9F%A5/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2023/09/21/java-%E4%B8%9A%E5%8A%A1oom%E6%8E%92%E6%9F%A5/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2022/11/30/java-%E4%BD%BF%E7%94%A8lua-script/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2022/12/21/java-%E5%8A%A8%E6%80%81%E4%BB%A3%E7%90%86/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2022/12/06/java-volalite/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2023/03/29/java-unsafe/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2023/10/25/java-wait-notify/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2022/09/11/java-thread-local-%E5%88%9D%E5%A7%8B%E5%8C%96%E6%97%B6%E6%9C%BA/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2022/12/16/java-thread-pool/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2023/08/25/java-unbox/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2023/06/27/java-sort-default-order/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2022/08/23/java-static-%E5%9D%97/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2019/11/25/java-string-%E7%9B%B8%E5%85%B3%E5%86%85%E5%AE%B9/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2023/09/22/java-oom-hprof%E6%96%87%E4%BB%B6%E7%94%9F%E6%88%90%E6%97%B6%E6%9C%BA/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2022/12/20/java-parser/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2023/04/14/java-rabbitmq-%E5%88%9D%E5%A7%8B%E5%8C%96/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2022/12/07/java-redis-client/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2023/10/18/java-mybatis-plus-date-handler/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2023/08/24/java-main/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2023/08/04/java-nio/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2022/12/15/java-int-overflow-%E6%8E%A2%E7%A9%B6/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2023/04/18/java-integer-divison/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2023/09/06/java-jdk-%E4%B8%BA%E4%BB%80%E4%B9%88%E4%B8%80%E4%B8%AA%E7%BA%BF%E7%A8%8B%E7%A9%BA%E6%8C%87%E9%92%88%E4%B8%8D%E9%80%80%E5%87%BA/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2022/11/30/java-assert/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2023/07/30/java-generic/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2023/09/06/java-branch-bytecode/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2020/09/25/invariants/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2023/05/04/insert-ignore-%E6%AD%BB%E9%94%81/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2019/12/12/java-Class-forName/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2021/03/30/ipc/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2022/12/22/java-arraycopy/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2019/12/02/ik%E5%88%86%E8%AF%8D/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2019/10/13/https-tls-ssl/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2020/03/04/induction/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2019/09/13/hello-world/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2023/07/10/hidden-and-shadow-in-java/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2022/12/21/how-to-debug-javac/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2022/09/06/httpServerletRequest-autowired-%E5%8E%9F%E5%9B%A0/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2022/12/19/gradle-%E4%BD%BF%E7%94%A8/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2019/09/21/hello-world-java/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2020/07/10/group-concat%E7%9C%8Bmysql%E5%87%BD%E6%95%B0/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2019/09/12/golang-lock/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2020/04/10/golang-stack/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2019/09/18/golang-interface-%E6%AF%94%E8%BE%83/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2020/06/01/functor-1/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2019/09/17/go-micro-hello-world/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2020/05/28/functor/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2023/10/25/fst/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2023/05/25/fst-%E7%BB%93%E6%9E%84/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2023/07/04/found-duplicate-key-xxx-spring-boot/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2023/05/16/flink-%E7%BC%96%E8%AF%91/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2023/09/10/elastic-search-%E7%BC%96%E8%AF%91%E5%92%8C%E8%B0%83%E8%AF%95/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2022/04/02/dubbo-rpc/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2021/03/27/docker%E6%8C%81%E4%B9%85%E5%8C%96/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2023/03/09/each-jvm-bytecode-implement-in-x86-with-asm/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2021/05/11/c%E5%AD%97%E8%8A%82%E5%AF%B9%E9%BD%90/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2023/04/09/direct-memory-in-java/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2021/03/10/docker-compose-spec/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2019/11/26/docker%E4%B8%8Eiptable/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2020/06/12/curry/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2022/01/04/croaring-bitmap/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2020/09/18/crf/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2019/09/19/cors-%E7%9B%B8%E5%85%B3/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2022/12/10/create-a-maven-plugin/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2022/10/24/cpp-flag/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2020/04/15/coding/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2019/09/15/compile-and-debug-linux-kernel/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2019/11/21/composer-ext/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2019/10/22/composer%E7%9A%84psr4/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2023/10/16/clickhouse-%E7%89%A9%E5%8C%96%E8%A7%86%E5%9B%BE%E5%92%8C%E4%BD%8D%E5%9B%BE/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2022/05/22/clickhouse-%E8%AF%B7%E6%B1%82%E7%9A%84%E7%94%9F%E5%91%BD%E5%91%A8%E6%9C%9F/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2019/12/19/clickhouse-%E7%BC%96%E8%AF%91%E5%AE%89%E8%A3%85/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2022/12/26/cms-gc/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2022/07/06/clickhouse-parser/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2023/02/21/clickhouse-mybatis-batch-insert-cpu-raise-up/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2022/05/15/clickhouse-%E5%BB%BA%E8%A1%A8/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2023/10/16/clickhouse-cloud-dbeaver%E8%BF%9E%E6%8E%A5%E4%B8%8D%E4%B8%8A/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2022/06/01/clickhouse-function/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2023/06/14/clickhouse-jdbc-1002-error/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2021/04/02/canal%E9%9C%80%E8%A6%81%E6%B3%A8%E6%84%8F%E7%9A%84%E7%82%B9/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2022/05/13/clickhosue-insert-insert-deduplicate/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2023/01/12/clickhouse-400-error/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2021/03/15/c-%E6%A0%87%E5%87%86%E5%BA%93%E7%9A%84vector/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2022/06/26/c-%E5%9F%BA%E7%A1%80/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2021/04/14/c99%E6%9F%94%E6%80%A7%E6%95%B0%E7%BB%84/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2023/10/25/bm25-and-search/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2020/11/18/btree/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2020/07/07/c-auto-cast/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2021/03/27/build-grpc/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2023/05/18/bigint-%E6%98%A0%E5%B0%84/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2022/04/19/bloom-filter/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2020/06/29/basic-paxos/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2023/04/12/arroyo-%E7%BC%96%E8%AF%91%E5%92%8C%E4%BD%BF%E7%94%A8/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2023/08/16/antlr-%E4%BD%BF%E7%94%A8/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2019/10/31/ast%E6%9E%84%E9%80%A0/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2023/08/16/WARNING-An-illegal-reflective-access-operation-has-occurred-groovy/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2023/06/05/WFST-%E5%92%8Clucene-%E5%92%8Cfst/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2020/04/08/a-language-to-machine-code/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2023/06/06/Payload-value-must-not-be-empty/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2021/01/13/RSA/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2023/07/16/System-arraycopy-in-java/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2023/07/03/Unable-to-make-protected-final-java-lang-Class-java-lang-ClassLoader-defineClass/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2023/08/25/Hydration-completed-but-contains-mismatches/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2023/06/07/Invalid-JSON-text-in-argument-2-in-mysql8/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2023/07/04/Numeric-overflow-in-expression-idea-java/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2023/01/30/Garbage-First-Garbage-Collection-%E7%AE%80%E5%8D%95%E6%A6%82%E5%86%B5/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2022/12/16/ConcurrentHashMap-npe/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2023/02/04/Double-Checked-Locking-is-Broken/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2021/04/13/20210413%E5%8F%8D%E6%80%9D%E8%BF%87%E5%8E%BB/ + + 2023-10-25 + + monthly + 0.6 + + + + https://shakudada.xyz/2022/08/26/3-Method-Reference-Expressions/ + + 2023-10-25 + + monthly + 0.6 + + + + + https://shakudada.xyz/ + 2023-10-27 + daily + 1.0 + + + + + https://shakudada.xyz/tags/%E5%8F%8D%E6%80%9D/ + 2023-10-27 + weekly + 0.2 + + + + https://shakudada.xyz/tags/java/ + 2023-10-27 + weekly + 0.2 + + + + https://shakudada.xyz/tags/juc/ + 2023-10-27 + weekly + 0.2 + + + + https://shakudada.xyz/tags/js/ + 2023-10-27 + weekly + 0.2 + + + + https://shakudada.xyz/tags/mysql/ + 2023-10-27 + weekly + 0.2 + + + + https://shakudada.xyz/tags/rsa/ + 2023-10-27 + weekly + 0.2 + + + + https://shakudada.xyz/tags/groovy/ + 2023-10-27 + weekly + 0.2 + + + + https://shakudada.xyz/tags/lucene/ + 2023-10-27 + weekly + 0.2 + + + + https://shakudada.xyz/tags/compile/ + 2023-10-27 + weekly + 0.2 + + + + https://shakudada.xyz/tags/arroyo/ + 2023-10-27 + weekly + 0.2 + + + + https://shakudada.xyz/tags/hash/ + 2023-10-27 + weekly + 0.2 + + + + https://shakudada.xyz/tags/compiler/ + 2023-10-27 + weekly + 0.2 + + + + https://shakudada.xyz/tags/model/ + 2023-10-27 + weekly + 0.2 + + + + https://shakudada.xyz/tags/search/ + 2023-10-27 + weekly + 0.2 + + + + https://shakudada.xyz/tags/db/ + 2023-10-27 + weekly + 0.2 + + + + https://shakudada.xyz/tags/paxos/ + 2023-10-27 + weekly + 0.2 + + + + https://shakudada.xyz/tags/c/ + 2023-10-27 + weekly + 0.2 + + + + https://shakudada.xyz/tags/grpc/ + 2023-10-27 + weekly + 0.2 + + + + https://shakudada.xyz/tags/c/ + 2023-10-27 + weekly + 0.2 + + + + https://shakudada.xyz/tags/canal/ + 2023-10-27 + weekly + 0.2 + + + + https://shakudada.xyz/tags/clickhouse/ + 2023-10-27 + weekly + 0.2 + + + + https://shakudada.xyz/tags/jdbc/ + 2023-10-27 + weekly + 0.2 + + + + https://shakudada.xyz/tags/linux/ + 2023-10-27 + weekly + 0.2 + + + + https://shakudada.xyz/tags/php/ + 2023-10-27 + weekly + 0.2 + + + + https://shakudada.xyz/tags/http/ + 2023-10-27 + weekly + 0.2 + + + + https://shakudada.xyz/tags/cpp/ + 2023-10-27 + weekly + 0.2 + + + + https://shakudada.xyz/tags/function/ + 2023-10-27 + weekly + 0.2 + + + + https://shakudada.xyz/tags/bitmap/ + 2023-10-27 + weekly + 0.2 + + + + https://shakudada.xyz/tags/nlp/ + 2023-10-27 + weekly + 0.2 + + + + https://shakudada.xyz/tags/docker/ + 2023-10-27 + weekly + 0.2 + + + + https://shakudada.xyz/tags/jvm/ + 2023-10-27 + weekly + 0.2 + + + + https://shakudada.xyz/tags/bytecode/ + 2023-10-27 + weekly + 0.2 + + + + https://shakudada.xyz/tags/flink/ + 2023-10-27 + weekly + 0.2 + + + + https://shakudada.xyz/tags/elasticsearch/ + 2023-10-27 + weekly + 0.2 + + + + https://shakudada.xyz/tags/golang/ + 2023-10-27 + weekly + 0.2 + + + + https://shakudada.xyz/tags/es/ + 2023-10-27 + weekly + 0.2 + + + + https://shakudada.xyz/tags/nio/ + 2023-10-27 + weekly + 0.2 + + + + https://shakudada.xyz/tags/redisson/ + 2023-10-27 + weekly + 0.2 + + + + https://shakudada.xyz/tags/thread-pool/ + 2023-10-27 + weekly + 0.2 + + + + https://shakudada.xyz/tags/redis/ + 2023-10-27 + weekly + 0.2 + + + + https://shakudada.xyz/tags/lettuce/ + 2023-10-27 + weekly + 0.2 + + + + https://shakudada.xyz/tags/druid/ + 2023-10-27 + weekly + 0.2 + + + + https://shakudada.xyz/tags/string/ + 2023-10-27 + weekly + 0.2 + + + + https://shakudada.xyz/tags/algorithm/ + 2023-10-27 + weekly + 0.2 + + + + https://shakudada.xyz/tags/llvm/ + 2023-10-27 + weekly + 0.2 + + + + https://shakudada.xyz/tags/lsmtree/ + 2023-10-27 + weekly + 0.2 + + + + https://shakudada.xyz/tags/maven/ + 2023-10-27 + weekly + 0.2 + + + + https://shakudada.xyz/tags/k8s/ + 2023-10-27 + weekly + 0.2 + + + + https://shakudada.xyz/tags/memory/ + 2023-10-27 + weekly + 0.2 + + + + https://shakudada.xyz/tags/mvcc/ + 2023-10-27 + weekly + 0.2 + + + + https://shakudada.xyz/tags/io/ + 2023-10-27 + weekly + 0.2 + + + + https://shakudada.xyz/tags/nacos/ + 2023-10-27 + weekly + 0.2 + + + + https://shakudada.xyz/tags/paper/ + 2023-10-27 + weekly + 0.2 + + + + https://shakudada.xyz/tags/nginx/ + 2023-10-27 + weekly + 0.2 + + + + https://shakudada.xyz/tags/curl/ + 2023-10-27 + weekly + 0.2 + + + + https://shakudada.xyz/tags/rabbitmq/ + 2023-10-27 + weekly + 0.2 + + + + https://shakudada.xyz/tags/spring-boot/ + 2023-10-27 + weekly + 0.2 + + + + https://shakudada.xyz/tags/sql/ + 2023-10-27 + weekly + 0.2 + + + + https://shakudada.xyz/tags/raft/ + 2023-10-27 + weekly + 0.2 + + + + https://shakudada.xyz/tags/netty/ + 2023-10-27 + weekly + 0.2 + + + + https://shakudada.xyz/tags/redission/ + 2023-10-27 + weekly + 0.2 + + + + https://shakudada.xyz/tags/roaring-bitmap/ + 2023-10-27 + weekly + 0.2 + + + + https://shakudada.xyz/tags/springboot/ + 2023-10-27 + weekly + 0.2 + + + + https://shakudada.xyz/tags/shell/ + 2023-10-27 + weekly + 0.2 + + + + https://shakudada.xyz/tags/tensorflow/ + 2023-10-27 + weekly + 0.2 + + + + https://shakudada.xyz/tags/utf-8/ + 2023-10-27 + weekly + 0.2 + + + + https://shakudada.xyz/tags/tomcat/ + 2023-10-27 + weekly + 0.2 + + + + https://shakudada.xyz/tags/servelet/ + 2023-10-27 + weekly + 0.2 + + + + https://shakudada.xyz/tags/cmake/ + 2023-10-27 + weekly + 0.2 + + + + https://shakudada.xyz/tags/zookeeper/ + 2023-10-27 + weekly + 0.2 + + + + https://shakudada.xyz/tags/k3s/ + 2023-10-27 + weekly + 0.2 + + + + https://shakudada.xyz/tags/ml/ + 2023-10-27 + weekly + 0.2 + + + + https://shakudada.xyz/tags/regular/ + 2023-10-27 + weekly + 0.2 + + + + + diff --git a/tags/algorithm/index.html b/tags/algorithm/index.html new file mode 100644 index 0000000000..effbffb1de --- /dev/null +++ b/tags/algorithm/index.html @@ -0,0 +1,397 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 标签: algorithm | dinosaur + + + + + + + + + + + + +
    +
    + +
    +
    + + + + + +
    + + + + + + + + +
    + +
    + +
    +
    + + +
    + + 0% +
    + + +
    +
    +
    + + +
    + + + + + +
    +
    +
    +

    algorithm + 标签 +

    +
    + + +
    + 2023 +
    + + + +
    +
    + + + + + + + + +
    + + + + +
    + + + + + + + + +
    +
    + +
    + +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tags/arroyo/index.html b/tags/arroyo/index.html new file mode 100644 index 0000000000..9664bc5571 --- /dev/null +++ b/tags/arroyo/index.html @@ -0,0 +1,397 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 标签: arroyo | dinosaur + + + + + + + + + + + + +
    +
    + +
    +
    + + + + + +
    + + + + + + + + +
    + +
    + +
    +
    + + +
    + + 0% +
    + + +
    +
    +
    + + +
    + + + + + +
    +
    +
    +

    arroyo + 标签 +

    +
    + + +
    + 2023 +
    + + + +
    +
    + + + + + + + + +
    + + + + +
    + + + + + + + + +
    +
    + +
    + +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tags/bitmap/index.html b/tags/bitmap/index.html new file mode 100644 index 0000000000..c39598d151 --- /dev/null +++ b/tags/bitmap/index.html @@ -0,0 +1,397 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 标签: bitmap | dinosaur + + + + + + + + + + + + +
    +
    + +
    +
    + + + + + +
    + + + + + + + + +
    + +
    + +
    +
    + + +
    + + 0% +
    + + +
    +
    +
    + + +
    + + + + + +
    +
    +
    +

    bitmap + 标签 +

    +
    + + +
    + 2022 +
    + + + +
    +
    + + + + + + + + +
    + + + + +
    + + + + + + + + +
    +
    + +
    + +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tags/bytecode/index.html b/tags/bytecode/index.html new file mode 100644 index 0000000000..70aa761d85 --- /dev/null +++ b/tags/bytecode/index.html @@ -0,0 +1,397 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 标签: bytecode | dinosaur + + + + + + + + + + + + +
    +
    + +
    +
    + + + + + +
    + + + + + + + + +
    + +
    + +
    +
    + + +
    + + 0% +
    + + +
    +
    +
    + + +
    + + + + + +
    +
    +
    +

    bytecode + 标签 +

    +
    + + +
    + 2023 +
    + + + +
    +
    + + + + + + + + +
    + + + + +
    + + + + + + + + +
    +
    + +
    + +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tags/c/index.html b/tags/c/index.html new file mode 100644 index 0000000000..bb13f4c10b --- /dev/null +++ b/tags/c/index.html @@ -0,0 +1,440 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 标签: c++ | dinosaur + + + + + + + + + + + + +
    +
    + +
    +
    + + + + + +
    + + + + + + + + +
    + +
    + +
    +
    + + +
    + + 0% +
    + + +
    +
    +
    + + +
    + + + + + +
    +
    +
    +

    c++ + 标签 +

    +
    + + +
    + 2022 +
    + + + + +
    + 2021 +
    + + + +
    +
    + + + + + + + + +
    + + + + +
    + + + + + + + + +
    +
    + +
    + +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tags/canal/index.html b/tags/canal/index.html new file mode 100644 index 0000000000..760ca89de2 --- /dev/null +++ b/tags/canal/index.html @@ -0,0 +1,397 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 标签: canal | dinosaur + + + + + + + + + + + + +
    +
    + +
    +
    + + + + + +
    + + + + + + + + +
    + +
    + +
    +
    + + +
    + + 0% +
    + + +
    +
    +
    + + +
    + + + + + +
    +
    +
    +

    canal + 标签 +

    +
    + + +
    + 2021 +
    + + + +
    +
    + + + + + + + + +
    + + + + +
    + + + + + + + + +
    +
    + +
    + +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tags/clickhouse/index.html b/tags/clickhouse/index.html new file mode 100644 index 0000000000..9804070f64 --- /dev/null +++ b/tags/clickhouse/index.html @@ -0,0 +1,583 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 标签: clickhouse | dinosaur + + + + + + + + + + + + +
    +
    + +
    +
    + + + + + +
    + + + + + + + + +
    + +
    + +
    +
    + + +
    + + 0% +
    + + +
    +
    +
    + + +
    + + + + + +
    +
    +
    +

    clickhouse + 标签 +

    +
    + + +
    + 2023 +
    + + + + + + + + + + + + +
    + 2022 +
    + + + + + + + + + +
    +
    + + + + + + + + + +
    + + + + +
    + + + + + + + + +
    +
    + +
    + +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tags/clickhouse/page/2/index.html b/tags/clickhouse/page/2/index.html new file mode 100644 index 0000000000..2d6ac89df1 --- /dev/null +++ b/tags/clickhouse/page/2/index.html @@ -0,0 +1,463 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 标签: clickhouse | dinosaur + + + + + + + + + + + + +
    +
    + +
    +
    + + + + + +
    + + + + + + + + +
    + +
    + +
    +
    + + +
    + + 0% +
    + + +
    +
    +
    + + +
    + + + + + +
    +
    +
    +

    clickhouse + 标签 +

    +
    + + +
    + 2022 +
    + + + + + + +
    + 2019 +
    + + + +
    +
    + + + + + + + + + +
    + + + + +
    + + + + + + + + +
    +
    + +
    + +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tags/cmake/index.html b/tags/cmake/index.html new file mode 100644 index 0000000000..6ed2bf15e6 --- /dev/null +++ b/tags/cmake/index.html @@ -0,0 +1,397 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 标签: cmake | dinosaur + + + + + + + + + + + + +
    +
    + +
    +
    + + + + + +
    + + + + + + + + +
    + +
    + +
    +
    + + +
    + + 0% +
    + + +
    +
    +
    + + +
    + + + + + +
    +
    +
    +

    cmake + 标签 +

    +
    + + +
    + 2023 +
    + + + +
    +
    + + + + + + + + +
    + + + + +
    + + + + + + + + +
    +
    + +
    + +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tags/compile/index.html b/tags/compile/index.html new file mode 100644 index 0000000000..35dc08bb04 --- /dev/null +++ b/tags/compile/index.html @@ -0,0 +1,420 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 标签: compile | dinosaur + + + + + + + + + + + + +
    +
    + +
    +
    + + + + + +
    + + + + + + + + +
    + +
    + +
    +
    + + +
    + + 0% +
    + + +
    +
    +
    + + +
    + + + + + +
    +
    +
    +

    compile + 标签 +

    +
    + + +
    + 2022 +
    + + +
    + 2020 +
    + + + +
    +
    + + + + + + + + +
    + + + + +
    + + + + + + + + +
    +
    + +
    + +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tags/compiler/index.html b/tags/compiler/index.html new file mode 100644 index 0000000000..6a29f21a57 --- /dev/null +++ b/tags/compiler/index.html @@ -0,0 +1,417 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 标签: compiler | dinosaur + + + + + + + + + + + + +
    +
    + +
    +
    + + + + + +
    + + + + + + + + +
    + +
    + +
    +
    + + +
    + + 0% +
    + + +
    +
    +
    + + +
    + + + + + +
    +
    +
    +

    compiler + 标签 +

    +
    + + +
    + 2019 +
    + + + + + +
    +
    + + + + + + + + +
    + + + + +
    + + + + + + + + +
    +
    + +
    + +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tags/cpp/index.html b/tags/cpp/index.html new file mode 100644 index 0000000000..f06a138ed7 --- /dev/null +++ b/tags/cpp/index.html @@ -0,0 +1,397 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 标签: cpp | dinosaur + + + + + + + + + + + + +
    +
    + +
    +
    + + + + + +
    + + + + + + + + +
    + +
    + +
    +
    + + +
    + + 0% +
    + + +
    +
    +
    + + +
    + + + + + +
    +
    +
    +

    cpp + 标签 +

    +
    + + +
    + 2022 +
    + + + +
    +
    + + + + + + + + +
    + + + + +
    + + + + + + + + +
    +
    + +
    + +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tags/curl/index.html b/tags/curl/index.html new file mode 100644 index 0000000000..d13af98b72 --- /dev/null +++ b/tags/curl/index.html @@ -0,0 +1,397 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 标签: curl | dinosaur + + + + + + + + + + + + +
    +
    + +
    +
    + + + + + +
    + + + + + + + + +
    + +
    + +
    +
    + + +
    + + 0% +
    + + +
    +
    +
    + + +
    + + + + + +
    +
    +
    +

    curl + 标签 +

    +
    + + +
    + 2023 +
    + + + +
    +
    + + + + + + + + +
    + + + + +
    + + + + + + + + +
    +
    + +
    + +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tags/db/index.html b/tags/db/index.html new file mode 100644 index 0000000000..280b14aa8b --- /dev/null +++ b/tags/db/index.html @@ -0,0 +1,440 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 标签: db | dinosaur + + + + + + + + + + + + +
    +
    + +
    +
    + + + + + +
    + + + + + + + + +
    + +
    + +
    +
    + + +
    + + 0% +
    + + +
    +
    +
    + + +
    + + + + + +
    +
    +
    +

    db + 标签 +

    +
    + + +
    + 2023 +
    + + +
    + 2020 +
    + + + + + +
    +
    + + + + + + + + +
    + + + + +
    + + + + + + + + +
    +
    + +
    + +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tags/docker/index.html b/tags/docker/index.html new file mode 100644 index 0000000000..a8d252f249 --- /dev/null +++ b/tags/docker/index.html @@ -0,0 +1,480 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 标签: docker | dinosaur + + + + + + + + + + + + +
    +
    + +
    +
    + + + + + +
    + + + + + + + + +
    + +
    + +
    +
    + + +
    + + 0% +
    + + +
    +
    +
    + + +
    + + + + + +
    +
    +
    +

    docker + 标签 +

    +
    + + +
    + 2021 +
    + + + + +
    + 2019 +
    + + + + + + + +
    +
    + + + + + + + + +
    + + + + +
    + + + + + + + + +
    +
    + +
    + +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tags/druid/index.html b/tags/druid/index.html new file mode 100644 index 0000000000..64679bf121 --- /dev/null +++ b/tags/druid/index.html @@ -0,0 +1,397 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 标签: druid | dinosaur + + + + + + + + + + + + +
    +
    + +
    +
    + + + + + +
    + + + + + + + + +
    + +
    + +
    +
    + + +
    + + 0% +
    + + +
    +
    +
    + + +
    + + + + + +
    +
    +
    +

    druid + 标签 +

    +
    + + +
    + 2023 +
    + + + +
    +
    + + + + + + + + +
    + + + + +
    + + + + + + + + +
    +
    + +
    + +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tags/elasticsearch/index.html b/tags/elasticsearch/index.html new file mode 100644 index 0000000000..bca0a628e7 --- /dev/null +++ b/tags/elasticsearch/index.html @@ -0,0 +1,397 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 标签: elasticsearch | dinosaur + + + + + + + + + + + + +
    +
    + +
    +
    + + + + + +
    + + + + + + + + +
    + +
    + +
    +
    + + +
    + + 0% +
    + + +
    +
    +
    + + +
    + + + + + +
    +
    +
    +

    elasticsearch + 标签 +

    +
    + + +
    + 2023 +
    + + + +
    +
    + + + + + + + + +
    + + + + +
    + + + + + + + + +
    +
    + +
    + +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tags/es/index.html b/tags/es/index.html new file mode 100644 index 0000000000..110e31d0c0 --- /dev/null +++ b/tags/es/index.html @@ -0,0 +1,417 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 标签: es | dinosaur + + + + + + + + + + + + +
    +
    + +
    +
    + + + + + +
    + + + + + + + + +
    + +
    + +
    +
    + + +
    + + 0% +
    + + +
    +
    +
    + + +
    + + + + + +
    +
    +
    +

    es + 标签 +

    +
    + + +
    + 2019 +
    + + + + + +
    +
    + + + + + + + + +
    + + + + +
    + + + + + + + + +
    +
    + +
    + +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tags/flink/index.html b/tags/flink/index.html new file mode 100644 index 0000000000..ab085f0026 --- /dev/null +++ b/tags/flink/index.html @@ -0,0 +1,397 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 标签: flink | dinosaur + + + + + + + + + + + + +
    +
    + +
    +
    + + + + + +
    + + + + + + + + +
    + +
    + +
    +
    + + +
    + + 0% +
    + + +
    +
    +
    + + +
    + + + + + +
    +
    +
    +

    flink + 标签 +

    +
    + + +
    + 2023 +
    + + + +
    +
    + + + + + + + + +
    + + + + +
    + + + + + + + + +
    +
    + +
    + +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tags/function/index.html b/tags/function/index.html new file mode 100644 index 0000000000..2ee50f14fe --- /dev/null +++ b/tags/function/index.html @@ -0,0 +1,397 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 标签: function | dinosaur + + + + + + + + + + + + +
    +
    + +
    +
    + + + + + +
    + + + + + + + + +
    + +
    + +
    +
    + + +
    + + 0% +
    + + +
    +
    +
    + + +
    + + + + + +
    +
    +
    +

    function + 标签 +

    +
    + + +
    + 2020 +
    + + + +
    +
    + + + + + + + + +
    + + + + +
    + + + + + + + + +
    +
    + +
    + +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tags/golang/index.html b/tags/golang/index.html new file mode 100644 index 0000000000..22060b8217 --- /dev/null +++ b/tags/golang/index.html @@ -0,0 +1,437 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 标签: golang | dinosaur + + + + + + + + + + + + +
    +
    + +
    +
    + + + + + +
    + + + + + + + + +
    + +
    + +
    +
    + + +
    + + 0% +
    + + +
    +
    +
    + + +
    + + + + + +
    +
    +
    +

    golang + 标签 +

    +
    + + +
    + 2019 +
    + + + + + + + +
    +
    + + + + + + + + +
    + + + + +
    + + + + + + + + +
    +
    + +
    + +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tags/groovy/index.html b/tags/groovy/index.html new file mode 100644 index 0000000000..f9976223c5 --- /dev/null +++ b/tags/groovy/index.html @@ -0,0 +1,397 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 标签: groovy | dinosaur + + + + + + + + + + + + +
    +
    + +
    +
    + + + + + +
    + + + + + + + + +
    + +
    + +
    +
    + + +
    + + 0% +
    + + +
    +
    +
    + + +
    + + + + + +
    +
    +
    +

    groovy + 标签 +

    +
    + + +
    + 2023 +
    + + + +
    +
    + + + + + + + + +
    + + + + +
    + + + + + + + + +
    +
    + +
    + +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tags/grpc/index.html b/tags/grpc/index.html new file mode 100644 index 0000000000..5d611e35c4 --- /dev/null +++ b/tags/grpc/index.html @@ -0,0 +1,397 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 标签: grpc | dinosaur + + + + + + + + + + + + +
    +
    + +
    +
    + + + + + +
    + + + + + + + + +
    + +
    + +
    +
    + + +
    + + 0% +
    + + +
    +
    +
    + + +
    + + + + + +
    +
    +
    +

    grpc + 标签 +

    +
    + + +
    + 2021 +
    + + + +
    +
    + + + + + + + + +
    + + + + +
    + + + + + + + + +
    +
    + +
    + +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tags/hash/index.html b/tags/hash/index.html new file mode 100644 index 0000000000..7631a0d4c9 --- /dev/null +++ b/tags/hash/index.html @@ -0,0 +1,397 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 标签: hash | dinosaur + + + + + + + + + + + + +
    +
    + +
    +
    + + + + + +
    + + + + + + + + +
    + +
    + +
    +
    + + +
    + + 0% +
    + + +
    +
    +
    + + +
    + + + + + +
    +
    +
    +

    hash + 标签 +

    +
    + + +
    + 2022 +
    + + + +
    +
    + + + + + + + + +
    + + + + +
    + + + + + + + + +
    +
    + +
    + +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tags/http/index.html b/tags/http/index.html new file mode 100644 index 0000000000..6194e3949c --- /dev/null +++ b/tags/http/index.html @@ -0,0 +1,417 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 标签: http | dinosaur + + + + + + + + + + + + +
    +
    + +
    +
    + + + + + +
    + + + + + + + + +
    + +
    + +
    +
    + + +
    + + 0% +
    + + +
    +
    +
    + + +
    + + + + + +
    +
    +
    +

    http + 标签 +

    +
    + + +
    + 2019 +
    + + + + + +
    +
    + + + + + + + + +
    + + + + +
    + + + + + + + + +
    +
    + +
    + +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tags/index.html b/tags/index.html new file mode 100644 index 0000000000..4643fa9e8d --- /dev/null +++ b/tags/index.html @@ -0,0 +1,397 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + tags | dinosaur + + + + + + + + + + + + + +
    +
    + +
    +
    + + + + + +
    + + + + + + + + +
    + +
    + +
    +
    + + +
    + + 0% +
    + + +
    +
    +
    + + + + + + + + + +
    + + + + + + + + +
    +
    + +
    + +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tags/io/index.html b/tags/io/index.html new file mode 100644 index 0000000000..07eae0299e --- /dev/null +++ b/tags/io/index.html @@ -0,0 +1,397 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 标签: io | dinosaur + + + + + + + + + + + + +
    +
    + +
    +
    + + + + + +
    + + + + + + + + +
    + +
    + +
    +
    + + +
    + + 0% +
    + + +
    +
    +
    + + +
    + + + + + +
    +
    +
    +

    io + 标签 +

    +
    + + +
    + 2019 +
    + + + +
    +
    + + + + + + + + +
    + + + + +
    + + + + + + + + +
    +
    + +
    + +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tags/java/index.html b/tags/java/index.html new file mode 100644 index 0000000000..f8f377cf82 --- /dev/null +++ b/tags/java/index.html @@ -0,0 +1,580 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 标签: java | dinosaur + + + + + + + + + + + + +
    +
    + +
    +
    + + + + + +
    + + + + + + + + +
    + +
    + +
    +
    + + +
    + + 0% +
    + + +
    +
    +
    + + +
    + + + + + +
    +
    +
    +

    java + 标签 +

    +
    + + +
    + 2023 +
    + + + + + + + + + + + + + + + + + + + + + +
    +
    + + + + + + + + + +
    + + + + +
    + + + + + + + + +
    +
    + +
    + +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tags/java/page/2/index.html b/tags/java/page/2/index.html new file mode 100644 index 0000000000..6932fcb13b --- /dev/null +++ b/tags/java/page/2/index.html @@ -0,0 +1,580 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 标签: java | dinosaur + + + + + + + + + + + + +
    +
    + +
    +
    + + + + + +
    + + + + + + + + +
    + +
    + +
    +
    + + +
    + + 0% +
    + + +
    +
    +
    + + +
    + + + + + +
    +
    +
    +

    java + 标签 +

    +
    + + +
    + 2023 +
    + + + + + + + + + + + + + + + + + + + + + +
    +
    + + + + + + + + + +
    + + + + +
    + + + + + + + + +
    +
    + +
    + +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tags/java/page/3/index.html b/tags/java/page/3/index.html new file mode 100644 index 0000000000..b32e9957d2 --- /dev/null +++ b/tags/java/page/3/index.html @@ -0,0 +1,580 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 标签: java | dinosaur + + + + + + + + + + + + +
    +
    + +
    +
    + + + + + +
    + + + + + + + + +
    + +
    + +
    +
    + + +
    + + 0% +
    + + +
    +
    +
    + + +
    + + + + + +
    +
    +
    +

    java + 标签 +

    +
    + + +
    + 2023 +
    + + + + + + + + + + + + + + + + + + + + + +
    +
    + + + + + + + + + +
    + + + + +
    + + + + + + + + +
    +
    + +
    + +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tags/java/page/4/index.html b/tags/java/page/4/index.html new file mode 100644 index 0000000000..ecd3789a0e --- /dev/null +++ b/tags/java/page/4/index.html @@ -0,0 +1,580 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 标签: java | dinosaur + + + + + + + + + + + + +
    +
    + +
    +
    + + + + + +
    + + + + + + + + +
    + +
    + +
    +
    + + +
    + + 0% +
    + + +
    +
    +
    + + +
    + + + + + +
    +
    +
    +

    java + 标签 +

    +
    + + +
    + 2023 +
    + + + + + + + + + + + + + + + + + + + + + +
    +
    + + + + + + + + + +
    + + + + +
    + + + + + + + + +
    +
    + +
    + +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tags/java/page/5/index.html b/tags/java/page/5/index.html new file mode 100644 index 0000000000..5bad1f2f3f --- /dev/null +++ b/tags/java/page/5/index.html @@ -0,0 +1,583 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 标签: java | dinosaur + + + + + + + + + + + + +
    +
    + +
    +
    + + + + + +
    + + + + + + + + +
    + +
    + +
    +
    + + +
    + + 0% +
    + + +
    +
    +
    + + +
    + + + + + +
    +
    +
    +

    java + 标签 +

    +
    + + +
    + 2023 +
    + + + + + + +
    + 2022 +
    + + + + + + + + + + + + + + + +
    +
    + + + + + + + + + +
    + + + + +
    + + + + + + + + +
    +
    + +
    + +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tags/java/page/6/index.html b/tags/java/page/6/index.html new file mode 100644 index 0000000000..1074ad1c69 --- /dev/null +++ b/tags/java/page/6/index.html @@ -0,0 +1,580 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 标签: java | dinosaur + + + + + + + + + + + + +
    +
    + +
    +
    + + + + + +
    + + + + + + + + +
    + +
    + +
    +
    + + +
    + + 0% +
    + + +
    +
    +
    + + +
    + + + + + +
    +
    +
    +

    java + 标签 +

    +
    + + +
    + 2022 +
    + + + + + + + + + + + + + + + + + + + + + +
    +
    + + + + + + + + + +
    + + + + +
    + + + + + + + + +
    +
    + +
    + +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tags/java/page/7/index.html b/tags/java/page/7/index.html new file mode 100644 index 0000000000..9c2bd228cb --- /dev/null +++ b/tags/java/page/7/index.html @@ -0,0 +1,580 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 标签: java | dinosaur + + + + + + + + + + + + +
    +
    + +
    +
    + + + + + +
    + + + + + + + + +
    + +
    + +
    +
    + + +
    + + 0% +
    + + +
    +
    +
    + + +
    + + + + + +
    +
    +
    +

    java + 标签 +

    +
    + + +
    + 2022 +
    + + + + + + + + + + + + + + + + + + + + + +
    +
    + + + + + + + + + +
    + + + + +
    + + + + + + + + +
    +
    + +
    + +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tags/java/page/8/index.html b/tags/java/page/8/index.html new file mode 100644 index 0000000000..ad1a6ba61e --- /dev/null +++ b/tags/java/page/8/index.html @@ -0,0 +1,580 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 标签: java | dinosaur + + + + + + + + + + + + +
    +
    + +
    +
    + + + + + +
    + + + + + + + + +
    + +
    + +
    +
    + + +
    + + 0% +
    + + +
    +
    +
    + + +
    + + + + + +
    +
    +
    +

    java + 标签 +

    +
    + + +
    + 2022 +
    + + + + + + + + + + + + + + + + + + + + + +
    +
    + + + + + + + + + +
    + + + + +
    + + + + + + + + +
    +
    + +
    + +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tags/java/page/9/index.html b/tags/java/page/9/index.html new file mode 100644 index 0000000000..5aa16b072c --- /dev/null +++ b/tags/java/page/9/index.html @@ -0,0 +1,563 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 标签: java | dinosaur + + + + + + + + + + + + +
    +
    + +
    +
    + + + + + +
    + + + + + + + + +
    + +
    + +
    +
    + + +
    + + 0% +
    + + +
    +
    +
    + + +
    + + + + + +
    +
    +
    +

    java + 标签 +

    +
    + + +
    + 2022 +
    + + + + +
    + 2019 +
    + + + + + + + + + + + + + + + +
    +
    + + + + + + + + + +
    + + + + +
    + + + + + + + + +
    +
    + +
    + +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tags/jdbc/index.html b/tags/jdbc/index.html new file mode 100644 index 0000000000..7081f5fecb --- /dev/null +++ b/tags/jdbc/index.html @@ -0,0 +1,417 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 标签: jdbc | dinosaur + + + + + + + + + + + + +
    +
    + +
    +
    + + + + + +
    + + + + + + + + +
    + +
    + +
    +
    + + +
    + + 0% +
    + + +
    +
    +
    + + +
    + + + + + +
    +
    +
    +

    jdbc + 标签 +

    +
    + + +
    + 2023 +
    + + + + + +
    +
    + + + + + + + + +
    + + + + +
    + + + + + + + + +
    +
    + +
    + +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tags/js/index.html b/tags/js/index.html new file mode 100644 index 0000000000..479fffe92b --- /dev/null +++ b/tags/js/index.html @@ -0,0 +1,443 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 标签: js | dinosaur + + + + + + + + + + + + +
    +
    + +
    +
    + + + + + +
    + + + + + + + + +
    + +
    + +
    +
    + + +
    + + 0% +
    + + +
    +
    +
    + + +
    + + + + + +
    +
    +
    +

    js + 标签 +

    +
    + + +
    + 2023 +
    + + +
    + 2020 +
    + + +
    + 2019 +
    + + + +
    +
    + + + + + + + + +
    + + + + +
    + + + + + + + + +
    +
    + +
    + +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tags/juc/index.html b/tags/juc/index.html new file mode 100644 index 0000000000..a9cf87316c --- /dev/null +++ b/tags/juc/index.html @@ -0,0 +1,397 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 标签: juc | dinosaur + + + + + + + + + + + + +
    +
    + +
    +
    + + + + + +
    + + + + + + + + +
    + +
    + +
    +
    + + +
    + + 0% +
    + + +
    +
    +
    + + +
    + + + + + +
    +
    +
    +

    juc + 标签 +

    +
    + + +
    + 2022 +
    + + + +
    +
    + + + + + + + + +
    + + + + +
    + + + + + + + + +
    +
    + +
    + +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tags/jvm/index.html b/tags/jvm/index.html new file mode 100644 index 0000000000..e36def894e --- /dev/null +++ b/tags/jvm/index.html @@ -0,0 +1,397 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 标签: jvm | dinosaur + + + + + + + + + + + + +
    +
    + +
    +
    + + + + + +
    + + + + + + + + +
    + +
    + +
    +
    + + +
    + + 0% +
    + + +
    +
    +
    + + +
    + + + + + +
    +
    +
    +

    jvm + 标签 +

    +
    + + +
    + 2023 +
    + + + +
    +
    + + + + + + + + +
    + + + + +
    + + + + + + + + +
    +
    + +
    + +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tags/k3s/index.html b/tags/k3s/index.html new file mode 100644 index 0000000000..d296ab17fb --- /dev/null +++ b/tags/k3s/index.html @@ -0,0 +1,397 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 标签: k3s | dinosaur + + + + + + + + + + + + +
    +
    + +
    +
    + + + + + +
    + + + + + + + + +
    + +
    + +
    +
    + + +
    + + 0% +
    + + +
    +
    +
    + + +
    + + + + + +
    +
    +
    +

    k3s + 标签 +

    +
    + + +
    + 2023 +
    + + + +
    +
    + + + + + + + + +
    + + + + +
    + + + + + + + + +
    +
    + +
    + +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tags/k8s/index.html b/tags/k8s/index.html new file mode 100644 index 0000000000..a6e853093c --- /dev/null +++ b/tags/k8s/index.html @@ -0,0 +1,420 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 标签: k8s | dinosaur + + + + + + + + + + + + +
    +
    + +
    +
    + + + + + +
    + + + + + + + + +
    + +
    + +
    +
    + + +
    + + 0% +
    + + +
    +
    +
    + + +
    + + + + + +
    +
    +
    +

    k8s + 标签 +

    +
    + + +
    + 2023 +
    + + +
    + 2022 +
    + + + +
    +
    + + + + + + + + +
    + + + + +
    + + + + + + + + +
    +
    + +
    + +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tags/lettuce/index.html b/tags/lettuce/index.html new file mode 100644 index 0000000000..ea84970f51 --- /dev/null +++ b/tags/lettuce/index.html @@ -0,0 +1,397 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 标签: lettuce | dinosaur + + + + + + + + + + + + +
    +
    + +
    +
    + + + + + +
    + + + + + + + + +
    + +
    + +
    +
    + + +
    + + 0% +
    + + +
    +
    +
    + + +
    + + + + + +
    +
    +
    +

    lettuce + 标签 +

    +
    + + +
    + 2022 +
    + + + +
    +
    + + + + + + + + +
    + + + + +
    + + + + + + + + +
    +
    + +
    + +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tags/linux/index.html b/tags/linux/index.html new file mode 100644 index 0000000000..c0f8f2957a --- /dev/null +++ b/tags/linux/index.html @@ -0,0 +1,440 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 标签: linux | dinosaur + + + + + + + + + + + + +
    +
    + +
    +
    + + + + + +
    + + + + + + + + +
    + +
    + +
    +
    + + +
    + + 0% +
    + + +
    +
    +
    + + +
    + + + + + +
    +
    +
    +

    linux + 标签 +

    +
    + + +
    + 2021 +
    + + +
    + 2019 +
    + + + + + +
    +
    + + + + + + + + +
    + + + + +
    + + + + + + + + +
    +
    + +
    + +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tags/llvm/index.html b/tags/llvm/index.html new file mode 100644 index 0000000000..8c56d1ffb7 --- /dev/null +++ b/tags/llvm/index.html @@ -0,0 +1,397 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 标签: llvm | dinosaur + + + + + + + + + + + + +
    +
    + +
    +
    + + + + + +
    + + + + + + + + +
    + +
    + +
    +
    + + +
    + + 0% +
    + + +
    +
    +
    + + +
    + + + + + +
    +
    +
    +

    llvm + 标签 +

    +
    + + +
    + 2023 +
    + + + +
    +
    + + + + + + + + +
    + + + + +
    + + + + + + + + +
    +
    + +
    + +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tags/lsmtree/index.html b/tags/lsmtree/index.html new file mode 100644 index 0000000000..a9c92d5277 --- /dev/null +++ b/tags/lsmtree/index.html @@ -0,0 +1,397 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 标签: lsmtree | dinosaur + + + + + + + + + + + + +
    +
    + +
    +
    + + + + + +
    + + + + + + + + +
    + +
    + +
    +
    + + +
    + + 0% +
    + + +
    +
    +
    + + +
    + + + + + +
    +
    +
    +

    lsmtree + 标签 +

    +
    + + +
    + 2023 +
    + + + +
    +
    + + + + + + + + +
    + + + + +
    + + + + + + + + +
    +
    + +
    + +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tags/lucene/index.html b/tags/lucene/index.html new file mode 100644 index 0000000000..0fb2a94e3c --- /dev/null +++ b/tags/lucene/index.html @@ -0,0 +1,583 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 标签: lucene | dinosaur + + + + + + + + + + + + +
    +
    + +
    +
    + + + + + +
    + + + + + + + + +
    + +
    + +
    +
    + + +
    + + 0% +
    + + +
    +
    +
    + + +
    + + + + + +
    +
    +
    +

    lucene + 标签 +

    +
    + + +
    + 2023 +
    + + + + + + + + + + + + +
    + 2022 +
    + + + + + + + + + +
    +
    + + + + + + + + + +
    + + + + +
    + + + + + + + + +
    +
    + +
    + +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tags/lucene/page/2/index.html b/tags/lucene/page/2/index.html new file mode 100644 index 0000000000..163a9172a4 --- /dev/null +++ b/tags/lucene/page/2/index.html @@ -0,0 +1,423 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 标签: lucene | dinosaur + + + + + + + + + + + + +
    +
    + +
    +
    + + + + + +
    + + + + + + + + +
    + +
    + +
    +
    + + +
    + + 0% +
    + + +
    +
    +
    + + +
    + + + + + +
    +
    +
    +

    lucene + 标签 +

    +
    + + +
    + 2021 +
    + + +
    + 2019 +
    + + + +
    +
    + + + + + + + + + +
    + + + + +
    + + + + + + + + +
    +
    + +
    + +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tags/maven/index.html b/tags/maven/index.html new file mode 100644 index 0000000000..9c15269017 --- /dev/null +++ b/tags/maven/index.html @@ -0,0 +1,397 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 标签: maven | dinosaur + + + + + + + + + + + + +
    +
    + +
    +
    + + + + + +
    + + + + + + + + +
    + +
    + +
    +
    + + +
    + + 0% +
    + + +
    +
    +
    + + +
    + + + + + +
    +
    +
    +

    maven + 标签 +

    +
    + + +
    + 2022 +
    + + + +
    +
    + + + + + + + + +
    + + + + +
    + + + + + + + + +
    +
    + +
    + +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tags/memory/index.html b/tags/memory/index.html new file mode 100644 index 0000000000..a956803efe --- /dev/null +++ b/tags/memory/index.html @@ -0,0 +1,397 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 标签: memory | dinosaur + + + + + + + + + + + + +
    +
    + +
    +
    + + + + + +
    + + + + + + + + +
    + +
    + +
    +
    + + +
    + + 0% +
    + + +
    +
    +
    + + +
    + + + + + +
    +
    +
    +

    memory + 标签 +

    +
    + + +
    + 2022 +
    + + + +
    +
    + + + + + + + + +
    + + + + +
    + + + + + + + + +
    +
    + +
    + +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tags/ml/index.html b/tags/ml/index.html new file mode 100644 index 0000000000..86d5c4ae7b --- /dev/null +++ b/tags/ml/index.html @@ -0,0 +1,397 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 标签: ml | dinosaur + + + + + + + + + + + + +
    +
    + +
    +
    + + + + + +
    + + + + + + + + +
    + +
    + +
    +
    + + +
    + + 0% +
    + + +
    +
    +
    + + +
    + + + + + +
    +
    +
    +

    ml + 标签 +

    +
    + + +
    + 2020 +
    + + + +
    +
    + + + + + + + + +
    + + + + +
    + + + + + + + + +
    +
    + +
    + +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tags/model/index.html b/tags/model/index.html new file mode 100644 index 0000000000..d19062c92e --- /dev/null +++ b/tags/model/index.html @@ -0,0 +1,397 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 标签: model | dinosaur + + + + + + + + + + + + +
    +
    + +
    +
    + + + + + +
    + + + + + + + + +
    + +
    + +
    +
    + + +
    + + 0% +
    + + +
    +
    +
    + + +
    + + + + + +
    +
    +
    +

    model + 标签 +

    +
    + + +
    + 2023 +
    + + + +
    +
    + + + + + + + + +
    + + + + +
    + + + + + + + + +
    +
    + +
    + +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tags/mvcc/index.html b/tags/mvcc/index.html new file mode 100644 index 0000000000..e2dc118c1b --- /dev/null +++ b/tags/mvcc/index.html @@ -0,0 +1,397 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 标签: mvcc | dinosaur + + + + + + + + + + + + +
    +
    + +
    +
    + + + + + +
    + + + + + + + + +
    + +
    + +
    +
    + + +
    + + 0% +
    + + +
    +
    +
    + + +
    + + + + + +
    +
    +
    +

    mvcc + 标签 +

    +
    + + +
    + 2021 +
    + + + +
    +
    + + + + + + + + +
    + + + + +
    + + + + + + + + +
    +
    + +
    + +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tags/mysql/index.html b/tags/mysql/index.html new file mode 100644 index 0000000000..51048012cc --- /dev/null +++ b/tags/mysql/index.html @@ -0,0 +1,586 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 标签: mysql | dinosaur + + + + + + + + + + + + +
    +
    + +
    +
    + + + + + +
    + + + + + + + + +
    + +
    + +
    +
    + + +
    + + 0% +
    + + +
    +
    +
    + + +
    + + + + + +
    +
    +
    +

    mysql + 标签 +

    +
    + + +
    + 2023 +
    + + + + +
    + 2021 +
    + + + + + + + + +
    + 2019 +
    + + + + + + + + + +
    +
    + + + + + + + + + +
    + + + + +
    + + + + + + + + +
    +
    + +
    + +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tags/mysql/page/2/index.html b/tags/mysql/page/2/index.html new file mode 100644 index 0000000000..ede845098c --- /dev/null +++ b/tags/mysql/page/2/index.html @@ -0,0 +1,400 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 标签: mysql | dinosaur + + + + + + + + + + + + +
    +
    + +
    +
    + + + + + +
    + + + + + + + + +
    + +
    + +
    +
    + + +
    + + 0% +
    + + +
    +
    +
    + + +
    + + + + + +
    +
    +
    +

    mysql + 标签 +

    +
    + + +
    + 2019 +
    + + + +
    +
    + + + + + + + + + +
    + + + + +
    + + + + + + + + +
    +
    + +
    + +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tags/nacos/index.html b/tags/nacos/index.html new file mode 100644 index 0000000000..868330df30 --- /dev/null +++ b/tags/nacos/index.html @@ -0,0 +1,420 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 标签: nacos | dinosaur + + + + + + + + + + + + +
    +
    + +
    +
    + + + + + +
    + + + + + + + + +
    + +
    + +
    +
    + + +
    + + 0% +
    + + +
    +
    +
    + + +
    + + + + + +
    +
    +
    +

    nacos + 标签 +

    +
    + + +
    + 2023 +
    + + +
    + 2022 +
    + + + +
    +
    + + + + + + + + +
    + + + + +
    + + + + + + + + +
    +
    + +
    + +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tags/netty/index.html b/tags/netty/index.html new file mode 100644 index 0000000000..ae47580510 --- /dev/null +++ b/tags/netty/index.html @@ -0,0 +1,397 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 标签: netty | dinosaur + + + + + + + + + + + + +
    +
    + +
    +
    + + + + + +
    + + + + + + + + +
    + +
    + +
    +
    + + +
    + + 0% +
    + + +
    +
    +
    + + +
    + + + + + +
    +
    +
    +

    netty + 标签 +

    +
    + + +
    + 2022 +
    + + + +
    +
    + + + + + + + + +
    + + + + +
    + + + + + + + + +
    +
    + +
    + +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tags/nginx/index.html b/tags/nginx/index.html new file mode 100644 index 0000000000..090ee7cea8 --- /dev/null +++ b/tags/nginx/index.html @@ -0,0 +1,397 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 标签: nginx | dinosaur + + + + + + + + + + + + +
    +
    + +
    +
    + + + + + +
    + + + + + + + + +
    + +
    + +
    +
    + + +
    + + 0% +
    + + +
    +
    +
    + + +
    + + + + + +
    +
    +
    +

    nginx + 标签 +

    +
    + + +
    + 2023 +
    + + + +
    +
    + + + + + + + + +
    + + + + +
    + + + + + + + + +
    +
    + +
    + +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tags/nio/index.html b/tags/nio/index.html new file mode 100644 index 0000000000..4dd17d02c3 --- /dev/null +++ b/tags/nio/index.html @@ -0,0 +1,397 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 标签: nio | dinosaur + + + + + + + + + + + + +
    +
    + +
    +
    + + + + + +
    + + + + + + + + +
    + +
    + +
    +
    + + +
    + + 0% +
    + + +
    +
    +
    + + +
    + + + + + +
    +
    +
    +

    nio + 标签 +

    +
    + + +
    + 2023 +
    + + + +
    +
    + + + + + + + + +
    + + + + +
    + + + + + + + + +
    +
    + +
    + +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tags/nlp/index.html b/tags/nlp/index.html new file mode 100644 index 0000000000..01e1a26258 --- /dev/null +++ b/tags/nlp/index.html @@ -0,0 +1,437 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 标签: nlp | dinosaur + + + + + + + + + + + + +
    +
    + +
    +
    + + + + + +
    + + + + + + + + +
    + +
    + +
    +
    + + +
    + + 0% +
    + + +
    +
    +
    + + +
    + + + + + +
    +
    +
    +

    nlp + 标签 +

    +
    + + +
    + 2020 +
    + + + + + + + +
    +
    + + + + + + + + +
    + + + + +
    + + + + + + + + +
    +
    + +
    + +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tags/paper/index.html b/tags/paper/index.html new file mode 100644 index 0000000000..029405a4c2 --- /dev/null +++ b/tags/paper/index.html @@ -0,0 +1,420 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 标签: paper | dinosaur + + + + + + + + + + + + +
    +
    + +
    +
    + + + + + +
    + + + + + + + + +
    + +
    + +
    +
    + + +
    + + 0% +
    + + +
    +
    +
    + + +
    + + + + + +
    +
    +
    +

    paper + 标签 +

    +
    + + +
    + 2022 +
    + + +
    + 2021 +
    + + + +
    +
    + + + + + + + + +
    + + + + +
    + + + + + + + + +
    +
    + +
    + +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tags/paxos/index.html b/tags/paxos/index.html new file mode 100644 index 0000000000..1d9b74e035 --- /dev/null +++ b/tags/paxos/index.html @@ -0,0 +1,397 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 标签: paxos | dinosaur + + + + + + + + + + + + +
    +
    + +
    +
    + + + + + +
    + + + + + + + + +
    + +
    + +
    +
    + + +
    + + 0% +
    + + +
    +
    +
    + + +
    + + + + + +
    +
    +
    +

    paxos + 标签 +

    +
    + + +
    + 2020 +
    + + + +
    +
    + + + + + + + + +
    + + + + +
    + + + + + + + + +
    +
    + +
    + +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tags/php/index.html b/tags/php/index.html new file mode 100644 index 0000000000..18469a5904 --- /dev/null +++ b/tags/php/index.html @@ -0,0 +1,586 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 标签: php | dinosaur + + + + + + + + + + + + +
    +
    + +
    +
    + + + + + +
    + + + + + + + + +
    + +
    + +
    +
    + + +
    + + 0% +
    + + +
    +
    +
    + + +
    + + + + + +
    +
    +
    +

    php + 标签 +

    +
    + + +
    + 2021 +
    + + + + +
    + 2020 +
    + + +
    + 2019 +
    + + + + + + + + + + + + + + + +
    +
    + + + + + + + + + +
    + + + + +
    + + + + + + + + +
    +
    + +
    + +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tags/php/page/2/index.html b/tags/php/page/2/index.html new file mode 100644 index 0000000000..a561f29b70 --- /dev/null +++ b/tags/php/page/2/index.html @@ -0,0 +1,400 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 标签: php | dinosaur + + + + + + + + + + + + +
    +
    + +
    +
    + + + + + +
    + + + + + + + + +
    + +
    + +
    +
    + + +
    + + 0% +
    + + +
    +
    +
    + + +
    + + + + + +
    +
    +
    +

    php + 标签 +

    +
    + + +
    + 2019 +
    + + + +
    +
    + + + + + + + + + +
    + + + + +
    + + + + + + + + +
    +
    + +
    + +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tags/rabbitmq/index.html b/tags/rabbitmq/index.html new file mode 100644 index 0000000000..b9dbd5c8f8 --- /dev/null +++ b/tags/rabbitmq/index.html @@ -0,0 +1,457 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 标签: rabbitmq | dinosaur + + + + + + + + + + + + +
    +
    + +
    +
    + + + + + +
    + + + + + + + + +
    + +
    + +
    +
    + + +
    + + 0% +
    + + +
    +
    +
    + + +
    + + + + + +
    +
    +
    +

    rabbitmq + 标签 +

    +
    + + +
    + 2021 +
    + + + + + + + + + +
    +
    + + + + + + + + +
    + + + + +
    + + + + + + + + +
    +
    + +
    + +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tags/raft/index.html b/tags/raft/index.html new file mode 100644 index 0000000000..88d233b385 --- /dev/null +++ b/tags/raft/index.html @@ -0,0 +1,397 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 标签: raft | dinosaur + + + + + + + + + + + + +
    +
    + +
    +
    + + + + + +
    + + + + + + + + +
    + +
    + +
    +
    + + +
    + + 0% +
    + + +
    +
    +
    + + +
    + + + + + +
    +
    +
    +

    raft + 标签 +

    +
    + + +
    + 2020 +
    + + + +
    +
    + + + + + + + + +
    + + + + +
    + + + + + + + + +
    +
    + +
    + +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tags/redis/index.html b/tags/redis/index.html new file mode 100644 index 0000000000..2b1d5f6c23 --- /dev/null +++ b/tags/redis/index.html @@ -0,0 +1,463 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 标签: redis | dinosaur + + + + + + + + + + + + +
    +
    + +
    +
    + + + + + +
    + + + + + + + + +
    + +
    + +
    +
    + + +
    + + 0% +
    + + +
    +
    +
    + + +
    + + + + + +
    +
    +
    +

    redis + 标签 +

    +
    + + +
    + 2023 +
    + + +
    + 2022 +
    + + +
    + 2021 +
    + + + + + +
    +
    + + + + + + + + +
    + + + + +
    + + + + + + + + +
    +
    + +
    + +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tags/redission/index.html b/tags/redission/index.html new file mode 100644 index 0000000000..cd4af48643 --- /dev/null +++ b/tags/redission/index.html @@ -0,0 +1,397 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 标签: redission | dinosaur + + + + + + + + + + + + +
    +
    + +
    +
    + + + + + +
    + + + + + + + + +
    + +
    + +
    +
    + + +
    + + 0% +
    + + +
    +
    +
    + + +
    + + + + + +
    +
    +
    +

    redission + 标签 +

    +
    + + +
    + 2022 +
    + + + +
    +
    + + + + + + + + +
    + + + + +
    + + + + + + + + +
    +
    + +
    + +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tags/redisson/index.html b/tags/redisson/index.html new file mode 100644 index 0000000000..cd1a5e967a --- /dev/null +++ b/tags/redisson/index.html @@ -0,0 +1,397 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 标签: redisson | dinosaur + + + + + + + + + + + + +
    +
    + +
    +
    + + + + + +
    + + + + + + + + +
    + +
    + +
    +
    + + +
    + + 0% +
    + + +
    +
    +
    + + +
    + + + + + +
    +
    +
    +

    redisson + 标签 +

    +
    + + +
    + 2022 +
    + + + +
    +
    + + + + + + + + +
    + + + + +
    + + + + + + + + +
    +
    + +
    + +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tags/regular/index.html b/tags/regular/index.html new file mode 100644 index 0000000000..6042f47ee3 --- /dev/null +++ b/tags/regular/index.html @@ -0,0 +1,397 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 标签: regular | dinosaur + + + + + + + + + + + + +
    +
    + +
    +
    + + + + + +
    + + + + + + + + +
    + +
    + +
    +
    + + +
    + + 0% +
    + + +
    +
    +
    + + +
    + + + + + +
    +
    +
    +

    regular + 标签 +

    +
    + + +
    + 2019 +
    + + + +
    +
    + + + + + + + + +
    + + + + +
    + + + + + + + + +
    +
    + +
    + +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tags/roaring-bitmap/index.html b/tags/roaring-bitmap/index.html new file mode 100644 index 0000000000..8f372f1894 --- /dev/null +++ b/tags/roaring-bitmap/index.html @@ -0,0 +1,397 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 标签: roaring bitmap | dinosaur + + + + + + + + + + + + +
    +
    + +
    +
    + + + + + +
    + + + + + + + + +
    + +
    + +
    +
    + + +
    + + 0% +
    + + +
    +
    +
    + + +
    + + + + + +
    +
    +
    +

    roaring bitmap + 标签 +

    +
    + + +
    + 2021 +
    + + + +
    +
    + + + + + + + + +
    + + + + +
    + + + + + + + + +
    +
    + +
    + +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tags/rsa/index.html b/tags/rsa/index.html new file mode 100644 index 0000000000..3c25e0f548 --- /dev/null +++ b/tags/rsa/index.html @@ -0,0 +1,397 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 标签: rsa | dinosaur + + + + + + + + + + + + +
    +
    + +
    +
    + + + + + +
    + + + + + + + + +
    + +
    + +
    +
    + + +
    + + 0% +
    + + +
    +
    +
    + + +
    + + + + + +
    +
    +
    +

    rsa + 标签 +

    +
    + + +
    + 2021 +
    + + + +
    +
    + + + + + + + + +
    + + + + +
    + + + + + + + + +
    +
    + +
    + +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tags/search/index.html b/tags/search/index.html new file mode 100644 index 0000000000..8f4c01783a --- /dev/null +++ b/tags/search/index.html @@ -0,0 +1,420 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 标签: search | dinosaur + + + + + + + + + + + + +
    +
    + +
    +
    + + + + + +
    + + + + + + + + +
    + +
    + +
    +
    + + +
    + + 0% +
    + + +
    +
    +
    + + +
    + + + + + +
    +
    +
    +

    search + 标签 +

    +
    + + +
    + 2023 +
    + + +
    + 2022 +
    + + + +
    +
    + + + + + + + + +
    + + + + +
    + + + + + + + + +
    +
    + +
    + +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tags/servelet/index.html b/tags/servelet/index.html new file mode 100644 index 0000000000..9a3756ccd9 --- /dev/null +++ b/tags/servelet/index.html @@ -0,0 +1,397 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 标签: servelet | dinosaur + + + + + + + + + + + + +
    +
    + +
    +
    + + + + + +
    + + + + + + + + +
    + +
    + +
    +
    + + +
    + + 0% +
    + + +
    +
    +
    + + +
    + + + + + +
    +
    +
    +

    servelet + 标签 +

    +
    + + +
    + 2022 +
    + + + +
    +
    + + + + + + + + +
    + + + + +
    + + + + + + + + +
    +
    + +
    + +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tags/shell/index.html b/tags/shell/index.html new file mode 100644 index 0000000000..52cf587a6e --- /dev/null +++ b/tags/shell/index.html @@ -0,0 +1,397 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 标签: shell | dinosaur + + + + + + + + + + + + +
    +
    + +
    +
    + + + + + +
    + + + + + + + + +
    + +
    + +
    +
    + + +
    + + 0% +
    + + +
    +
    +
    + + +
    + + + + + +
    +
    +
    +

    shell + 标签 +

    +
    + + +
    + 2019 +
    + + + +
    +
    + + + + + + + + +
    + + + + +
    + + + + + + + + +
    +
    + +
    + +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tags/spring-boot/index.html b/tags/spring-boot/index.html new file mode 100644 index 0000000000..e33793ebfd --- /dev/null +++ b/tags/spring-boot/index.html @@ -0,0 +1,417 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 标签: spring-boot | dinosaur + + + + + + + + + + + + +
    +
    + +
    +
    + + + + + +
    + + + + + + + + +
    + +
    + +
    +
    + + +
    + + 0% +
    + + +
    +
    +
    + + +
    + + + + + +
    +
    +
    +

    spring-boot + 标签 +

    +
    + + +
    + 2022 +
    + + + + + +
    +
    + + + + + + + + +
    + + + + +
    + + + + + + + + +
    +
    + +
    + +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tags/springboot/index.html b/tags/springboot/index.html new file mode 100644 index 0000000000..16f8b3fda0 --- /dev/null +++ b/tags/springboot/index.html @@ -0,0 +1,397 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 标签: springboot | dinosaur + + + + + + + + + + + + +
    +
    + +
    +
    + + + + + +
    + + + + + + + + +
    + +
    + +
    +
    + + +
    + + 0% +
    + + +
    +
    +
    + + +
    + + + + + +
    +
    +
    +

    springboot + 标签 +

    +
    + + +
    + 2022 +
    + + + +
    +
    + + + + + + + + +
    + + + + +
    + + + + + + + + +
    +
    + +
    + +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tags/sql/index.html b/tags/sql/index.html new file mode 100644 index 0000000000..c4a447cb2b --- /dev/null +++ b/tags/sql/index.html @@ -0,0 +1,460 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 标签: sql | dinosaur + + + + + + + + + + + + +
    +
    + +
    +
    + + + + + +
    + + + + + + + + +
    + +
    + +
    +
    + + +
    + + 0% +
    + + +
    +
    +
    + + +
    + + + + + +
    +
    +
    +

    sql + 标签 +

    +
    + + +
    + 2021 +
    + + +
    + 2020 +
    + + + + + + + +
    +
    + + + + + + + + +
    + + + + +
    + + + + + + + + +
    +
    + +
    + +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tags/string/index.html b/tags/string/index.html new file mode 100644 index 0000000000..1496fc21cf --- /dev/null +++ b/tags/string/index.html @@ -0,0 +1,397 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 标签: string | dinosaur + + + + + + + + + + + + +
    +
    + +
    +
    + + + + + +
    + + + + + + + + +
    + +
    + +
    +
    + + +
    + + 0% +
    + + +
    +
    +
    + + +
    + + + + + +
    +
    +
    +

    string + 标签 +

    +
    + + +
    + 2023 +
    + + + +
    +
    + + + + + + + + +
    + + + + +
    + + + + + + + + +
    +
    + +
    + +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tags/tensorflow/index.html b/tags/tensorflow/index.html new file mode 100644 index 0000000000..065736d910 --- /dev/null +++ b/tags/tensorflow/index.html @@ -0,0 +1,397 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 标签: tensorflow | dinosaur + + + + + + + + + + + + +
    +
    + +
    +
    + + + + + +
    + + + + + + + + +
    + +
    + +
    +
    + + +
    + + 0% +
    + + +
    +
    +
    + + +
    + + + + + +
    +
    +
    +

    tensorflow + 标签 +

    +
    + + +
    + 2023 +
    + + + +
    +
    + + + + + + + + +
    + + + + +
    + + + + + + + + +
    +
    + +
    + +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tags/thread-pool/index.html b/tags/thread-pool/index.html new file mode 100644 index 0000000000..66f769fc54 --- /dev/null +++ b/tags/thread-pool/index.html @@ -0,0 +1,397 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 标签: thread pool | dinosaur + + + + + + + + + + + + +
    +
    + +
    +
    + + + + + +
    + + + + + + + + +
    + +
    + +
    +
    + + +
    + + 0% +
    + + +
    +
    +
    + + +
    + + + + + +
    +
    +
    +

    thread pool + 标签 +

    +
    + + +
    + 2022 +
    + + + +
    +
    + + + + + + + + +
    + + + + +
    + + + + + + + + +
    +
    + +
    + +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tags/tomcat/index.html b/tags/tomcat/index.html new file mode 100644 index 0000000000..7da1bf5596 --- /dev/null +++ b/tags/tomcat/index.html @@ -0,0 +1,397 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 标签: tomcat | dinosaur + + + + + + + + + + + + +
    +
    + +
    +
    + + + + + +
    + + + + + + + + +
    + +
    + +
    +
    + + +
    + + 0% +
    + + +
    +
    +
    + + +
    + + + + + +
    +
    +
    +

    tomcat + 标签 +

    +
    + + +
    + 2022 +
    + + + +
    +
    + + + + + + + + +
    + + + + +
    + + + + + + + + +
    +
    + +
    + +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tags/utf-8/index.html b/tags/utf-8/index.html new file mode 100644 index 0000000000..f7f26904a2 --- /dev/null +++ b/tags/utf-8/index.html @@ -0,0 +1,397 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 标签: utf-8 | dinosaur + + + + + + + + + + + + +
    +
    + +
    +
    + + + + + +
    + + + + + + + + +
    + +
    + +
    +
    + + +
    + + 0% +
    + + +
    +
    +
    + + +
    + + + + + +
    +
    +
    +

    utf-8 + 标签 +

    +
    + + +
    + 2023 +
    + + + +
    +
    + + + + + + + + +
    + + + + +
    + + + + + + + + +
    +
    + +
    + +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tags/zookeeper/index.html b/tags/zookeeper/index.html new file mode 100644 index 0000000000..61b974e921 --- /dev/null +++ b/tags/zookeeper/index.html @@ -0,0 +1,417 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 标签: zookeeper | dinosaur + + + + + + + + + + + + +
    +
    + +
    +
    + + + + + +
    + + + + + + + + +
    + +
    + +
    +
    + + +
    + + 0% +
    + + +
    +
    +
    + + +
    + + + + + +
    +
    +
    +

    zookeeper + 标签 +

    +
    + + +
    + 2022 +
    + + + + + +
    +
    + + + + + + + + +
    + + + + +
    + + + + + + + + +
    +
    + +
    + +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git "a/tags/\345\217\215\346\200\235/index.html" "b/tags/\345\217\215\346\200\235/index.html" new file mode 100644 index 0000000000..ee1f0eb043 --- /dev/null +++ "b/tags/\345\217\215\346\200\235/index.html" @@ -0,0 +1,397 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 标签: 反思 | dinosaur + + + + + + + + + + + + +
    +
    + +
    +
    + + + + + +
    + + + + + + + + +
    + +
    + +
    +
    + + +
    + + 0% +
    + + +
    +
    +
    + + +
    + + + + + +
    +
    +
    +

    反思 + 标签 +

    +
    + + +
    + 2021 +
    + + + +
    +
    + + + + + + + + +
    + + + + +
    + + + + + + + + +
    +
    + +
    + +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +