diff --git a/.gitignore b/.gitignore index 4eec8110..98f93b82 100644 --- a/.gitignore +++ b/.gitignore @@ -2,3 +2,4 @@ .ipynb_checkpoints/ config.json __pycache__/ +.DS_Store diff --git a/data/citation.csv b/data/citation.csv index 7562e66d..4591fb7c 100644 --- a/data/citation.csv +++ b/data/citation.csv @@ -16,7 +16,6 @@ https://github.com/LMescheder/GAN_stability,"author = {Lars Mescheder and Sebast https://github.com/LMescheder/GAN_stability,"title = {Which Training Methods for GANs do actually Converge?}," https://github.com/LMescheder/GAN_stability,"booktitle = {International Conference on Machine Learning (ICML)}," https://github.com/LMescheder/GAN_stability,year = {2018} -https://github.com/LMescheder/GAN_stability,} https://github.com/NVIDIA/vid2vid,"If you find this useful for your research, please cite the following paper." https://github.com/NVIDIA/vid2vid, https://github.com/NVIDIA/vid2vid,"@inproceedings{wang2018vid2vid," @@ -25,7 +24,6 @@ https://github.com/NVIDIA/vid2vid,"and Andrew Tao and Jan Kautz and Bryan Catanz https://github.com/NVIDIA/vid2vid,"title = {Video-to-Video Synthesis}," https://github.com/NVIDIA/vid2vid,"booktitle = {Advances in Neural Information Processing Systems (NeurIPS)}," https://github.com/NVIDIA/vid2vid,"year = {2018}," -https://github.com/NVIDIA/vid2vid,} https://github.com/NVIDIA/vid2vid,Video-to-Video Synthesis https://github.com/NVIDIA/vid2vid,"Ting-Chun Wang1, Ming-Yu Liu1, Jun-Yan Zhu2, Guilin Liu1, Andrew Tao1, Jan Kautz1, Bryan Catanzaro1" https://github.com/NVIDIA/vid2vid,"1NVIDIA Corporation, 2MIT CSAIL" @@ -43,15 +41,12 @@ https://github.com/XiaLiPKU/RESCAN,"booktitle={European Conference on Computer V https://github.com/XiaLiPKU/RESCAN,"pages={262--277}," https://github.com/XiaLiPKU/RESCAN,"year={2018}," https://github.com/XiaLiPKU/RESCAN,organization={Springer} -https://github.com/XiaLiPKU/RESCAN,} https://github.com/ZhouYanzhao/PRM,Citation https://github.com/ZhouYanzhao/PRM,"If you find the code useful for your research, please cite:" https://github.com/ZhouYanzhao/PRM,"@INPROCEEDINGS{Zhou2018PRM," https://github.com/ZhouYanzhao/PRM,"author = {Zhou, Yanzhao and Zhu, Yi and Ye, Qixiang and Qiu, Qiang and Jiao, Jianbin}," https://github.com/ZhouYanzhao/PRM,"title = {Weakly Supervised Instance Segmentation using Class Peak Response}," https://github.com/ZhouYanzhao/PRM,"booktitle = {CVPR}," -https://github.com/ZhouYanzhao/PRM,year = {2018} -https://github.com/ZhouYanzhao/PRM,} https://github.com/akanazawa/hmr,"Angjoo Kanazawa, Michael J. Black, David W. Jacobs, Jitendra Malik CVPR 2018" https://github.com/akanazawa/hmr,"@inProceedings{kanazawaHMR18," https://github.com/akanazawa/hmr,"title={End-to-end Recovery of Human Shape and Pose}," @@ -60,16 +55,11 @@ https://github.com/akanazawa/hmr,and Michael J. Black https://github.com/akanazawa/hmr,and David W. Jacobs https://github.com/akanazawa/hmr,"and Jitendra Malik}," https://github.com/akanazawa/hmr,"booktitle={Computer Vision and Pattern Regognition (CVPR)}," -https://github.com/akanazawa/hmr,year={2018} -https://github.com/akanazawa/hmr,} -https://github.com/albertpumarola/GANimation,Citation https://github.com/albertpumarola/GANimation,"If you use this code or ideas from the paper for your research, please cite our paper:" https://github.com/albertpumarola/GANimation,"@inproceedings{pumarola2018ganimation," https://github.com/albertpumarola/GANimation,"title={GANimation: Anatomically-aware Facial Animation from a Single Image}," https://github.com/albertpumarola/GANimation,"author={A. Pumarola and A. Agudo and A.M. Martinez and A. Sanfeliu and F. Moreno-Noguer}," https://github.com/albertpumarola/GANimation,"booktitle={Proceedings of the European Conference on Computer Vision (ECCV)}," -https://github.com/albertpumarola/GANimation,year={2018} -https://github.com/albertpumarola/GANimation,} https://github.com/cgre-aachen/gempy,"For a more detailed elaboration of the theory behind GemPy, take a look at the upcoming scientific publication ""GemPy 1.0: open-source stochastic geological modeling and inversion"" by de la Varga et al. (2018)." https://github.com/cgre-aachen/gempy,References https://github.com/cgre-aachen/gempy,"de la Varga, M., Schaaf, A., and Wellmann, F.: GemPy 1.0: open-source stochastic geological modeling and inversion, Geosci. Model Dev., 12, 1-32, https://doi.org/10.5194/gmd-12-1-2019, 2019" @@ -77,16 +67,12 @@ https://github.com/cgre-aachen/gempy,"Calcagno, P., Chilès, J. P., Courrioux, G https://github.com/cgre-aachen/gempy,"Lajaunie, C., Courrioux, G., & Manuel, L. (1997). Foliation fields and 3D cartography in geology: principles of a method based on potential interpolation. Mathematical Geology, 29(4), 571-584." https://github.com/driftingtides/hyvr,"HyVR can be attributed by citing the following journal article: Bennett, J. P., Haslauer, C. P., Ross, M., & Cirpka, O. A. (2018). An open, object-based framework for generating anisotropy in sedimentary subsurface models. Groundwater. DOI: 10.1111/gwat.12803." https://github.com/driving-behavior/DBNet,"DBNet was developed by MVIG, Shanghai Jiao Tong University* and SCSC Lab, Xiamen University* (alphabetical order)." -https://github.com/driving-behavior/DBNet,Citation https://github.com/driving-behavior/DBNet,"If you find our work useful in your research, please consider citing:" https://github.com/driving-behavior/DBNet,"@InProceedings{DBNet2018," https://github.com/driving-behavior/DBNet,"author = {Yiping Chen and Jingkang Wang and Jonathan Li and Cewu Lu and Zhipeng Luo and HanXue and Cheng Wang}," https://github.com/driving-behavior/DBNet,"title = {LiDAR-Video Driving Dataset: Learning Driving Policies Effectively}," https://github.com/driving-behavior/DBNet,"booktitle = {The IEEE Conference on Computer Vision and Pattern Recognition (CVPR)}," https://github.com/driving-behavior/DBNet,"month = {June}," -https://github.com/driving-behavior/DBNet,year = {2018} -https://github.com/driving-behavior/DBNet,} -https://github.com/empymod/empymod,Citation https://github.com/empymod/empymod,"If you publish results for which you used empymod, please give credit by citing Werthmüller (2017):" https://github.com/empymod/empymod,"Werthmüller, D., 2017, An open-source full 3D electromagnetic modeler for 1D VTI media in Python: empymod: Geophysics, 82(6), WB9--WB19; DOI: 10.1190/geo2016-0626.1." https://github.com/empymod/empymod,"All releases have a Zenodo-DOI, provided on the release-page. Also consider citing Hunziker et al. (2015) and Key (2012), without which empymod would not exist." @@ -95,7 +81,6 @@ https://github.com/endernewton/iter-reason,"author = {Xinlei Chen and Li-Jia Li https://github.com/endernewton/iter-reason,"title = {Iterative Visual Reasoning Beyond Convolutions}," https://github.com/endernewton/iter-reason,"booktitle = {Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition}," https://github.com/endernewton/iter-reason,Year = {2018} -https://github.com/endernewton/iter-reason,} https://github.com/endernewton/iter-reason,"@inproceedings{chen2017spatial," https://github.com/endernewton/iter-reason,"author = {Xinlei Chen and Abhinav Gupta}," https://github.com/endernewton/iter-reason,"title = {Spatial Memory for Context Reasoning in Object Detection}," @@ -113,7 +98,6 @@ https://github.com/facebookresearch/Detectron/,"Piotr Doll\'{a}r and Kaiming He} https://github.com/facebookresearch/Detectron/,"title = {Detectron}," https://github.com/facebookresearch/Detectron/,"howpublished = {\url{https://github.com/facebookresearch/detectron}}," https://github.com/facebookresearch/Detectron/,year = {2018} -https://github.com/facebookresearch/Detectron/,} https://github.com/foolwood/DaSiamRPN,"Zheng Zhu*, Qiang Wang*, Bo Li*, Wei Wu, Junjie Yan, and Weiming Hu" https://github.com/foolwood/DaSiamRPN,"European Conference on Computer Vision (ECCV), 2018" https://github.com/foolwood/DaSiamRPN,Citing DaSiamRPN @@ -121,20 +105,13 @@ https://github.com/foolwood/DaSiamRPN,"If you find DaSiamRPN and SiamRPN useful https://github.com/foolwood/DaSiamRPN,"@inproceedings{Zhu_2018_ECCV," https://github.com/foolwood/DaSiamRPN,"title={Distractor-aware Siamese Networks for Visual Object Tracking}," https://github.com/foolwood/DaSiamRPN,"author={Zhu, Zheng and Wang, Qiang and Bo, Li and Wu, Wei and Yan, Junjie and Hu, Weiming}," -https://github.com/foolwood/DaSiamRPN,"booktitle={European Conference on Computer Vision}," -https://github.com/foolwood/DaSiamRPN,year={2018} -https://github.com/foolwood/DaSiamRPN,} https://github.com/foolwood/DaSiamRPN,"@InProceedings{Li_2018_CVPR," https://github.com/foolwood/DaSiamRPN,"title = {High Performance Visual Tracking With Siamese Region Proposal Network}," https://github.com/foolwood/DaSiamRPN,"author = {Li, Bo and Yan, Junjie and Wu, Wei and Zhu, Zheng and Hu, Xiaolin}," -https://github.com/foolwood/DaSiamRPN,"booktitle = {The IEEE Conference on Computer Vision and Pattern Recognition (CVPR)}," -https://github.com/foolwood/DaSiamRPN,year = {2018} https://github.com/google/sg2im/,"@inproceedings{johnson2018image," https://github.com/google/sg2im/,"title={Image Generation from Scene Graphs}," https://github.com/google/sg2im/,"author={Johnson, Justin and Gupta, Agrim and Fei-Fei, Li}," https://github.com/google/sg2im/,"booktitle={CVPR}," -https://github.com/google/sg2im/,year={2018} -https://github.com/google/sg2im/,} https://github.com/google/sg2im/,Image Generation from Scene Graphs https://github.com/google/sg2im/,"Justin Johnson, Agrim Gupta, Li Fei-Fei" https://github.com/google/sg2im/,Presented at CVPR 2018 @@ -146,25 +123,16 @@ https://github.com/hezhangsprinter/DCPDN,[Paper Link] (CVPR'18) https://github.com/hezhangsprinter/DID-MDN,"@inproceedings{derain_zhang_2018," https://github.com/hezhangsprinter/DID-MDN,"title={Density-aware Single Image De-raining using a Multi-stream Dense Network}," https://github.com/hezhangsprinter/DID-MDN,"author={Zhang, He and Patel, Vishal M}," -https://github.com/hezhangsprinter/DID-MDN,"booktitle={CVPR}," -https://github.com/hezhangsprinter/DID-MDN,year={2018} -https://github.com/hezhangsprinter/DID-MDN,} -https://github.com/hiroharu-kato/neural_renderer,Citation https://github.com/hiroharu-kato/neural_renderer,@InProceedings{kato2018renderer https://github.com/hiroharu-kato/neural_renderer,"title={Neural 3D Mesh Renderer}," https://github.com/hiroharu-kato/neural_renderer,"author={Kato, Hiroharu and Ushiku, Yoshitaka and Harada, Tatsuya}," https://github.com/hiroharu-kato/neural_renderer,"booktitle={The IEEE Conference on Computer Vision and Pattern Recognition (CVPR)}," -https://github.com/hiroharu-kato/neural_renderer,year={2018} -https://github.com/hiroharu-kato/neural_renderer,} https://github.com/iannesbitt/readgssi,"Ian M. Nesbitt, François-Xavier Simon, Thomas Paulin, 2018. readgssi - an open-source tool to read and plot GSSI ground-penetrating radar data. doi:10.5281/zenodo.1439119" https://github.com/jiangsutx/SRN-Deblur,"Xin Tao, Hongyun Gao, Xiaoyong Shen, Jue Wang, Jiaya Jia." https://github.com/jiangsutx/SRN-Deblur,"@inproceedings{tao2018srndeblur," https://github.com/jiangsutx/SRN-Deblur,"title={Scale-recurrent Network for Deep Image Deblurring}," https://github.com/jiangsutx/SRN-Deblur,"author={Tao, Xin and Gao, Hongyun and Shen, Xiaoyong and Wang, Jue and Jia, Jiaya}," https://github.com/jiangsutx/SRN-Deblur,"booktitle={IEEE Conference on Computer Vision and Pattern Recognition (CVPR)}," -https://github.com/jiangsutx/SRN-Deblur,year={2018} -https://github.com/jiangsutx/SRN-Deblur,} -https://github.com/joferkington/mplstereonet,References https://github.com/joferkington/mplstereonet,"[Kamb1956]Kamb, 1959. Ice Petrofabric Observations from Blue Glacier, Washington, in Relation to Theory and Experiment. Journal of Geophysical Research, Vol. 64, No. 11, pp. 1891--1909." https://github.com/joferkington/mplstereonet,"[Vollmer1995]Vollmer, 1995. C Program for Automatic Contouring of Spherical Orientation Data Using a Modified Kamb Method. Computers & Geosciences, Vol. 21, No. 1, pp. 31--49." https://github.com/kenshohara/3D-ResNets-PyTorch,"If you use this code or pre-trained models, please cite the following:" @@ -173,8 +141,6 @@ https://github.com/kenshohara/3D-ResNets-PyTorch,"author={Kensho Hara and Hiroka https://github.com/kenshohara/3D-ResNets-PyTorch,"title={Can Spatiotemporal 3D CNNs Retrace the History of 2D CNNs and ImageNet?}," https://github.com/kenshohara/3D-ResNets-PyTorch,"booktitle={Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR)}," https://github.com/kenshohara/3D-ResNets-PyTorch,"pages={6546--6555}," -https://github.com/kenshohara/3D-ResNets-PyTorch,"year={2018}," -https://github.com/kenshohara/3D-ResNets-PyTorch,} https://github.com/kenshohara/3D-ResNets-PyTorch,"Kensho Hara, Hirokatsu Kataoka, and Yutaka Satoh," https://github.com/kenshohara/3D-ResNets-PyTorch,"Can Spatiotemporal 3D CNNs Retrace the History of 2D CNNs and ImageNet?""," https://github.com/kenshohara/3D-ResNets-PyTorch,"Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 6546-6555, 2018." @@ -184,8 +150,6 @@ https://github.com/msracver/Flow-Guided-Feature-Aggregation,"@inproceedings{zhu1 https://github.com/msracver/Flow-Guided-Feature-Aggregation,"Author = {Xizhou Zhu, Yujie Wang, Jifeng Dai, Lu Yuan, Yichen Wei}," https://github.com/msracver/Flow-Guided-Feature-Aggregation,"Title = {Flow-Guided Feature Aggregation for Video Object Detection}," https://github.com/msracver/Flow-Guided-Feature-Aggregation,"Conference = {ICCV}," -https://github.com/msracver/Flow-Guided-Feature-Aggregation,Year = {2017} -https://github.com/msracver/Flow-Guided-Feature-Aggregation,} https://github.com/msracver/Flow-Guided-Feature-Aggregation,"@inproceedings{dai16rfcn," https://github.com/msracver/Flow-Guided-Feature-Aggregation,"Author = {Jifeng Dai, Yi Li, Kaiming He, Jian Sun}," https://github.com/msracver/Flow-Guided-Feature-Aggregation,"Title = {{R-FCN}: Object Detection via Region-based Fully Convolutional Networks}," @@ -201,13 +165,11 @@ https://github.com/phoenix104104/LapSRN,"author = {Lai, Wei-Sheng and Huang, https://github.com/phoenix104104/LapSRN,"title = {Deep Laplacian Pyramid Networks for Fast and Accurate Super-Resolution}," https://github.com/phoenix104104/LapSRN,"booktitle = {IEEE Conferene on Computer Vision and Pattern Recognition}," https://github.com/phoenix104104/LapSRN,year = {2017} -https://github.com/phoenix104104/LapSRN,} https://github.com/phuang17/DeepMVS,"@inproceedings{DeepMVS," https://github.com/phuang17/DeepMVS,"author = ""Huang, Po-Han and Matzen, Kevin and Kopf, Johannes and Ahuja, Narendra and Huang, Jia-Bin""," https://github.com/phuang17/DeepMVS,"title = ""DeepMVS: Learning Multi-View Stereopsis""," https://github.com/phuang17/DeepMVS,"booktitle = ""IEEE Conference on Computer Vision and Pattern Recognition (CVPR)""," https://github.com/phuang17/DeepMVS,"year = ""2018""" -https://github.com/phuang17/DeepMVS,} https://github.com/pyvista/pymeshfix,Algorithm and Citation Policy https://github.com/pyvista/pymeshfix,"To better understand how the algorithm works, please refer to the following paper:" https://github.com/pyvista/pymeshfix,"M. Attene. A lightweight approach to repairing digitized polygon meshes. The Visual Computer, 2010. (c) Springer. DOI: 10.1007/s00371-010-0416-3" @@ -229,32 +191,24 @@ https://github.com/pyvista/pyvista,"pages = {1450}," https://github.com/pyvista/pyvista,"author = {C. Bane Sullivan and Alexander Kaszynski}," https://github.com/pyvista/pyvista,"title = {{PyVista}: 3D plotting and mesh analysis through a streamlined interface for the Visualization Toolkit ({VTK})}," https://github.com/pyvista/pyvista,journal = {Journal of Open Source Software} -https://github.com/pyvista/pyvista,} https://github.com/rowanz/neural-motifs,Bibtex https://github.com/rowanz/neural-motifs,"@inproceedings{zellers2018scenegraphs," https://github.com/rowanz/neural-motifs,"title={Neural Motifs: Scene Graph Parsing with Global Context}," https://github.com/rowanz/neural-motifs,"author={Zellers, Rowan and Yatskar, Mark and Thomson, Sam and Choi, Yejin}," https://github.com/rowanz/neural-motifs,"booktitle = ""Conference on Computer Vision and Pattern Recognition""," -https://github.com/rowanz/neural-motifs,year={2018} -https://github.com/rowanz/neural-motifs,} https://github.com/ryersonvisionlab/two-stream-dyntex-synth,"@inproceedings{tesfaldet2018," https://github.com/ryersonvisionlab/two-stream-dyntex-synth,"author = {Matthew Tesfaldet and Marcus A. Brubaker and Konstantinos G. Derpanis}," https://github.com/ryersonvisionlab/two-stream-dyntex-synth,"title = {Two-Stream Convolutional Networks for Dynamic Texture Synthesis}," https://github.com/ryersonvisionlab/two-stream-dyntex-synth,"booktitle = {IEEE Conference on Computer Vision and Pattern Recognition (CVPR)}," -https://github.com/ryersonvisionlab/two-stream-dyntex-synth,year = {2018} -https://github.com/ryersonvisionlab/two-stream-dyntex-synth,} https://github.com/salihkaragoz/pose-residual-network-pytorch,"Muhammed Kocabas, Salih Karagoz, Emre Akbas. MultiPoseNet: Fast Multi-Person Pose Estimation using Pose Residual Network. In ECCV, 2018. arxiv" -https://github.com/salihkaragoz/pose-residual-network-pytorch,Citation https://github.com/salihkaragoz/pose-residual-network-pytorch,"If you find this code useful for your research, please consider citing our paper:" https://github.com/salihkaragoz/pose-residual-network-pytorch,"@Inproceedings{kocabas18prn," https://github.com/salihkaragoz/pose-residual-network-pytorch,"Title = {Multi{P}ose{N}et: Fast Multi-Person Pose Estimation using Pose Residual Network}," https://github.com/salihkaragoz/pose-residual-network-pytorch,"Author = {Kocabas, Muhammed and Karagoz, Salih and Akbas, Emre}," https://github.com/salihkaragoz/pose-residual-network-pytorch,"Booktitle = {European Conference on Computer Vision (ECCV)}," https://github.com/salihkaragoz/pose-residual-network-pytorch,Year = {2018} -https://github.com/salihkaragoz/pose-residual-network-pytorch,} https://github.com/whimian/pyGeoPressure,Cite pyGeoPressure as: https://github.com/whimian/pyGeoPressure,"Yu, (2018). PyGeoPressure: Geopressure Prediction in Python. Journal of Open Source Software, 3(30), 992, https://doi.org/10.21105/joss.00992" -https://github.com/whimian/pyGeoPressure,BibTex: https://github.com/whimian/pyGeoPressure,"@article{yu2018pygeopressure," https://github.com/whimian/pyGeoPressure,"title = {{PyGeoPressure}: {Geopressure} {Prediction} in {Python}}," https://github.com/whimian/pyGeoPressure,"author = {Yu, Hao}," @@ -264,23 +218,17 @@ https://github.com/whimian/pyGeoPressure,pages = {922} https://github.com/whimian/pyGeoPressure,"number = {30}," https://github.com/whimian/pyGeoPressure,"year = {2018}," https://github.com/whimian/pyGeoPressure,"doi = {10.21105/joss.00992}," -https://github.com/whimian/pyGeoPressure,} https://github.com/wuhuikai/DeepGuidedFilter,Fast End-to-End Trainable Guided Filter https://github.com/wuhuikai/DeepGuidedFilter,"Huikai Wu, Shuai Zheng, Junge Zhang, Kaiqi Huang" https://github.com/wuhuikai/DeepGuidedFilter,CVPR 2018 https://github.com/wuhuikai/DeepGuidedFilter,"@inproceedings{wu2017fast," https://github.com/wuhuikai/DeepGuidedFilter,"title = {Fast End-to-End Trainable Guided Filter}," https://github.com/wuhuikai/DeepGuidedFilter,"author = {Wu, Huikai and Zheng, Shuai and Zhang, Junge and Huang, Kaiqi}," -https://github.com/wuhuikai/DeepGuidedFilter,"booktitle = {CVPR}," -https://github.com/wuhuikai/DeepGuidedFilter,year = {2018} -https://github.com/wuhuikai/DeepGuidedFilter,} https://github.com/yuhuayc/da-faster-rcnn,"If you find it helpful for your research, please consider citing:" https://github.com/yuhuayc/da-faster-rcnn,"@inproceedings{chen2018domain," https://github.com/yuhuayc/da-faster-rcnn,"title={Domain Adaptive Faster R-CNN for Object Detection in the Wild}," https://github.com/yuhuayc/da-faster-rcnn,"author={Chen, Yuhua and Li, Wen and Sakaridis, Christos and Dai, Dengxin and Van Gool, Luc}," https://github.com/yuhuayc/da-faster-rcnn,"booktitle = {Computer Vision and Pattern Recognition (CVPR)}," -https://github.com/yuhuayc/da-faster-rcnn,year={2018} -https://github.com/yuhuayc/da-faster-rcnn,} https://github.com/yulunzhang/RDN,"Yulun Zhang, Yapeng Tian, Yu Kong, Bineng Zhong, and Yun Fu, ""Residual Dense Network for Image Super-Resolution"", CVPR 2018 (spotlight), [arXiv]" https://github.com/yulunzhang/RDN,"Yulun Zhang, Yapeng Tian, Yu Kong, Bineng Zhong, and Yun Fu, ""Residual Dense Network for Image Restoration"", arXiv 2018, [arXiv]" https://github.com/yulunzhang/RDN,"@InProceedings{Lim_2017_CVPR_Workshops," @@ -289,12 +237,9 @@ https://github.com/yulunzhang/RDN,"title = {Enhanced Deep Residual Networks for https://github.com/yulunzhang/RDN,"booktitle = {The IEEE Conference on Computer Vision and Pattern Recognition (CVPR) Workshops}," https://github.com/yulunzhang/RDN,"month = {July}," https://github.com/yulunzhang/RDN,year = {2017} -https://github.com/yulunzhang/RDN,} https://github.com/yulunzhang/RDN,"@inproceedings{zhang2018residual," https://github.com/yulunzhang/RDN,"title={Residual Dense Network for Image Super-Resolution}," https://github.com/yulunzhang/RDN,"author={Zhang, Yulun and Tian, Yapeng and Kong, Yu and Zhong, Bineng and Fu, Yun}," -https://github.com/yulunzhang/RDN,"booktitle={CVPR}," -https://github.com/yulunzhang/RDN,year={2018} https://github.com/yulunzhang/RDN,"@article{zhang2018rdnir," https://github.com/yulunzhang/RDN,"title={Residual Dense Network for Image Restoration}," https://github.com/yulunzhang/RDN,"booktitle={arXiv}," @@ -302,9 +247,52 @@ https://github.com/zhiqiangdon/CU-Net,"@inproceedings{tang2018quantized," https://github.com/zhiqiangdon/CU-Net,"title={Quantized densely connected U-Nets for efficient landmark localization}," https://github.com/zhiqiangdon/CU-Net,"author={Tang, Zhiqiang and Peng, Xi and Geng, Shijie and Wu, Lingfei and Zhang, Shaoting and Metaxas, Dimitris}," https://github.com/zhiqiangdon/CU-Net,"booktitle={ECCV}," -https://github.com/zhiqiangdon/CU-Net,year={2018} -https://github.com/zhiqiangdon/CU-Net,} https://github.com/zhiqiangdon/CU-Net,"@inproceedings{tang2018cu," https://github.com/zhiqiangdon/CU-Net,"title={CU-Net: Coupled U-Nets}," https://github.com/zhiqiangdon/CU-Net,"author={Tang, Zhiqiang and Peng, Xi and Geng, Shijie and Zhu, Yizhe and Metaxas, Dimitris}," https://github.com/zhiqiangdon/CU-Net,"booktitle={BMVC}," +https://github.com/cltk/cltk,"Each major release of the CLTK is given a DOI, a type of unique identity for digital documents. This DOI ought to be included in your citation, as it will allow researchers to reproduce your results should the CLTK's API or codebase change. To find the CLTK's current DOI, observe the blue DOI button in the repository's home on GitHub. To the end of your bibliographic entry, append DOI plus the current identifier. You may also add version/release number, located in the pypi button at the project's GitHub repository homepage." +https://github.com/cltk/cltk,"Thus, please cite core software as something like:" +https://github.com/cltk/cltk,Kyle P. Johnson et al.. (2014-2019). CLTK: The Classical Language Toolkit. DOI 10.5281/zenodo.<current_release_id> +https://github.com/cltk/cltk,A style-neutral BibTeX entry would look like this: +https://github.com/cltk/cltk,"@Misc{johnson2014," +https://github.com/cltk/cltk,"author = {Kyle P. Johnson et al.}," +https://github.com/cltk/cltk,"title = {CLTK: The Classical Language Toolkit}," +https://github.com/cltk/cltk,"howpublished = {\url{https://github.com/cltk/cltk}}," +https://github.com/cltk/cltk,"note = {{DOI} 10.5281/zenodo.<current_release_id>}," +https://github.com/cltk/cltk,"year = {2014--2019}," +https://github.com/facebookresearch/DensePose,"Citing DensePose" +https://github.com/facebookresearch/DensePose,"If you use Densepose, please use the following BibTeX entry." +https://github.com/facebookresearch/DensePose,"@InProceedings{Guler2018DensePose," +https://github.com/facebookresearch/DensePose," title={DensePose: Dense Human Pose Estimation In The Wild}," +https://github.com/facebookresearch/DensePose," author={R\{i}za Alp G\""uler, Natalia Neverova, Iasonas Kokkinos}," +https://github.com/facebookresearch/DensePose," journal={The IEEE Conference on Computer Vision and Pattern Recognition (CVPR)}," +https://github.com/facebookresearch/DensePose, year={2018} +https://github.com/facebookresearch/DensePose, } +https://github.com/facebookresearch/ResNeXt,"If you use ResNeXt in your research, please cite the paper:" +https://github.com/facebookresearch/ResNeXt,"@article{Xie2016," +https://github.com/facebookresearch/ResNeXt," title={Aggregated Residual Transformations for Deep Neural Networks}," +https://github.com/facebookresearch/ResNeXt," author={Saining Xie and Ross Girshick and Piotr Dollár and Zhuowen Tu and Kaiming He}," +https://github.com/facebookresearch/ResNeXt," journal={arXiv preprint arXiv:1611.05431}," +https://github.com/facebookresearch/ResNeXt, year={2016} +https://github.com/harismuneer/Ultimate-Facebook-Scraper,"If you use this tool for your research, then kindly cite it. Click the above badge for more information regarding the complete citation for this tool and diffferent citation formats like IEEE, APA etc." +https://github.com/microsoft/malmo,Citations +https://github.com/microsoft/malmo,Please cite Malmo as: +https://github.com/microsoft/malmo,"Johnson M., Hofmann K., Hutton T., Bignell D. (2016) The Malmo Platform for Artificial Intelligence Experimentation. Proc. 25th International Joint Conference on Artificial Intelligence, Ed. Kambhampati S., p. 4246. AAAI Press, Palo Alto, California USA. https://github.com/Microsoft/malmo" +https://github.com/nextflow-io/nextflow,"If you use Nextflow in your research, please cite:" +https://github.com/nextflow-io/nextflow,"P. Di Tommaso, et al. Nextflow enables reproducible computational workflows. Nature Biotechnology 35, 316–319 (2017) doi:10.1038/nbt.3820" +https://github.com/pyro-ppl/pyro,"If you use Pyro, please consider citing:" +https://github.com/pyro-ppl/pyro,"@article{bingham2018pyro," +https://github.com/pyro-ppl/pyro," author = {Bingham, Eli and Chen, Jonathan P. and Jankowiak, Martin and Obermeyer, Fritz and" +https://github.com/pyro-ppl/pyro," Pradhan, Neeraj and Karaletsos, Theofanis and Singh, Rohit and Szerlip, Paul and" +https://github.com/pyro-ppl/pyro," Horsfall, Paul and Goodman, Noah D.}," +https://github.com/pyro-ppl/pyro," title = {{Pyro: Deep Universal Probabilistic Programming}}," +https://github.com/pyro-ppl/pyro," journal = {arXiv preprint arXiv:1810.09538}," +https://github.com/pyro-ppl/pyro, year = {2018} +https://github.com/scikit-image/scikit-image,"If you find this project useful, please cite:" +https://github.com/scikit-image/scikit-image,"Stéfan van der Walt, Johannes L. Schönberger, Juan Nunez-Iglesias," +https://github.com/scikit-image/scikit-image,"François Boulogne, Joshua D. Warner, Neil Yager, Emmanuelle" +https://github.com/scikit-image/scikit-image,"Gouillart, Tony Yu, and the scikit-image contributors." +https://github.com/scikit-image/scikit-image,scikit-image: Image processing in Python. PeerJ 2:e453 (2014) +https://github.com/scikit-image/scikit-image,https://doi.org/10.7717/peerj.453 +https://github.com/scikit-learn/scikit-learn,"If you use scikit-learn in a scientific publication, we would appreciate citations: http://scikit-learn.org/stable/about.html#citing-scikit-learn" diff --git a/data/description.csv b/data/description.csv index 67f97713..4f4c1411 100644 --- a/data/description.csv +++ b/data/description.csv @@ -57,13 +57,11 @@ https://github.com/driftingtides/hyvr,HyVR: Turning your geofantasy into reality https://github.com/driftingtides/hyvr,"The Hydrogeological Virtual Reality simulation package (HyVR) is a Python module that helps researchers and practitioners generate subsurface models with multiple scales of heterogeneity that are based on geological concepts. The simulation outputs can then be used to explore groundwater flow and solute transport behaviour. This is facilitated by HyVR outputs in common flow simulation packages' input formats. As each site is unique, HyVR has been designed that users can take the code and extend it to suit their particular simulation needs." https://github.com/driftingtides/hyvr,"The original motivation for HyVR was the lack of tools for modelling sedimentary deposits that include bedding structure model outputs (i.e., dip and azimuth). Such bedding parameters were required to approximate full hydraulic-conductivity tensors for groundwater flow modelling. HyVR is able to simulate these bedding parameters and generate spatially distributed parameter fields, including full hydraulic-conductivity tensors. More information about HyVR is available in the online technical documentation." https://github.com/driftingtides/hyvr,I hope you enjoy using HyVR much more than I enjoyed putting it together! I look forward to seeing what kind of funky fields you created in the course of your work. -https://github.com/driving-behavior/DBNet,Introduction https://github.com/driving-behavior/DBNet,"This work is based on our research paper, which appears in CVPR 2018. We propose a large-scale dataset for driving behavior learning, namely, DBNet. You can also check our dataset webpage for a deeper introduction." https://github.com/driving-behavior/DBNet,"In this repository, we release demo code and partial prepared data for training with only images, as well as leveraging feature maps or point clouds. The prepared data are accessible here. (More demo models and scripts are released soon!)" https://github.com/driving-behavior/DBNet,"This baseline is run on dbnet-2018 challenge data and only nvidia_pn is tested. To measure difficult architectures comprehensively, several metrics are set, including accuracy under different thresholds, area under curve (AUC), max error (ME), mean error (AE) and mean of max errors (AME)." https://github.com/driving-behavior/DBNet,The implementations of these metrics could be found in evaluate.py. https://github.com/empymod/empymod,"The electromagnetic modeller empymod can model electric or magnetic responses due to a three-dimensional electric or magnetic source in a layered-earth model with vertical transverse isotropic (VTI) resistivity, VTI electric permittivity, and VTI magnetic permeability, from very low frequencies (DC) to very high frequencies (GPR). The calculation is carried out in the wavenumber-frequency domain, and various Hankel- and Fourier-transform methods are included to transform the responses into the space-frequency and space-time domains." -https://github.com/empymod/empymod,Features https://github.com/empymod/empymod,"Calculates the complete (diffusion and wave phenomena) 3D electromagnetic field in a layered-earth model including vertical transverse isotropic (VTI) resistivity, VTI electric permittivity, and VTI magnetic permeability, for electric and magnetic sources as well as electric and magnetic receivers." https://github.com/empymod/empymod,Modelling routines: https://github.com/empymod/empymod,"bipole: arbitrary oriented, finite length bipoles with given source strength; space-frequency and space-time domains." @@ -128,7 +126,6 @@ https://github.com/foolwood/DaSiamRPN,"DaSiamRPN improves the performances of Si https://github.com/geo-data/gdal-docker,This is an Ubuntu derived image containing the Geospatial Data Abstraction Library (GDAL) compiled with a broad range of drivers. The build process is based on that defined in the GDAL TravisCI tests. https://github.com/geo-data/gdal-docker,Each branch in the git repository corresponds to a supported GDAL version (e.g. 1.11.2) with the master branch following GDAL master. These branch names are reflected in the image tags on the Docker Index (e.g. branch 1.11.2 corresponds to the image geodata/gdal:1.11.2). https://github.com/geopandas/geopandas/,Python tools for geographic data -https://github.com/geopandas/geopandas/,Introduction https://github.com/geopandas/geopandas/,GeoPandas is a project to add support for geographic data to pandas objects. It currently implements GeoSeries and GeoDataFrame types which are subclasses of pandas.Series and pandas.DataFrame respectively. GeoPandas objects can act on shapely geometry objects and perform geometric operations. https://github.com/geopandas/geopandas/,"GeoPandas geometry operations are cartesian. The coordinate reference system (crs) can be stored as an attribute on an object, and is automatically set when loading from a file. Objects may be transformed to new coordinate systems with the to_crs() method. There is currently no enforcement of like coordinates for operations, but that may change in the future." https://github.com/google/sg2im/,This is the code for the paper @@ -199,7 +196,6 @@ https://github.com/mbloch/mapshaper,"Mapshaper is software for editing Shapefile https://github.com/mbloch/mapshaper,"The mapshaper command line program supports essential map making tasks like simplifying shapes, editing attribute data, clipping, erasing, dissolving, filtering and more." https://github.com/mbloch/mapshaper,"The web UI supports interactive simplification, attribute data editing, and running cli commands in a built-in console. Visit the public website at www.mapshaper.org or use the web UI locally via the mapshaper-gui script." https://github.com/msracver/Flow-Guided-Feature-Aggregation,"This repository is implemented by Yuqing Zhu, Shuhao Fu, and Xizhou Zhu, when they are interns at MSRA." -https://github.com/msracver/Flow-Guided-Feature-Aggregation,Introduction https://github.com/msracver/Flow-Guided-Feature-Aggregation,"Flow-Guided Feature Aggregation (FGFA) is initially described in an ICCV 2017 paper. It provides an accurate and end-to-end learning framework for video object detection. The proposed FGFA method, together with our previous work of Deep Feature Flow, powered the winning entry of ImageNet VID 2017. It is worth noting that:" https://github.com/msracver/Flow-Guided-Feature-Aggregation,"FGFA improves the per-frame features by aggregating nearby frame features along the motion paths. It significantly improves the object detection accuracy in videos, especially for fast moving objects." https://github.com/msracver/Flow-Guided-Feature-Aggregation,"FGFA is end-to-end trainable for the task of video object detection, which is vital for improving the recognition accuracy." @@ -262,7 +258,6 @@ https://github.com/ungarj/tilematrix,Tilematrix supports metatiling and tile buf https://github.com/ungarj/tilematrix,"It is very similar to mercantile but besides of supporting spherical mercator tile pyramids, it also supports geodetic (WGS84) tile pyramids." https://github.com/vuejs/vue,"Vue (pronounced /vjuː/, like view) is a progressive framework for building user interfaces. It is designed from the ground up to be incrementally adoptable, and can easily scale between a library and a framework depending on different use cases. It consists of an approachable core library that focuses on the view layer only, and an ecosystem of supporting libraries that helps you tackle complexity in large Single-Page Applications." https://github.com/whimian/pyGeoPressure,A Python package for pore pressure prediction using well log data and seismic velocity data. -https://github.com/whimian/pyGeoPressure,Features https://github.com/whimian/pyGeoPressure,Overburden (or Lithostatic) Pressure Calculation https://github.com/whimian/pyGeoPressure,Eaton's method and Parameter Optimization https://github.com/whimian/pyGeoPressure,Bowers' method and Parameter Optimization @@ -279,3 +274,64 @@ https://github.com/yulunzhang/RDN,"A very deep convolutional neural network (CNN https://github.com/zhiqiangdon/CU-Net,"The follwoing figure gives an illustration of naive dense U-Net, stacked U-Nets and coupled U-Nets (CU-Net). The naive dense U-Net and stacked U-Nets have shortcut connections only inside each U-Net. In contrast, the coupled U-Nets also have connections for semantic blocks across U-Nets. The CU-Net is a hybrid of naive dense U-Net and stacked U-Net, integrating the merits of both dense connectivity, intermediate supervisions and multi-stage top-down and bottom-up refinement. The resulted CU-Net could save ~70% parameters of the previous stacked U-Nets but with comparable accuracy." https://github.com/zhiqiangdon/CU-Net,"If we couple each U-Net pair in multiple U-Nets, the coupling connections would have quadratic growth with respect to the U-Net number. To make the model more parameter efficient, we propose the order-K coupling to trim off the long-distance coupling connections." https://github.com/zhiqiangdon/CU-Net,"For simplicity, each dot represents one U-Net. The red and blue lines are the shortcut connections of inside semantic blocks and outside inputs. Order-0 connectivity (Top) strings U-Nets together only by their inputs and outputs, i.e. stacked U-Nets. Order-1 connectivity (Middle) has shortcut connections for adjacent U-Nets. Similarly, order-2 connectivity (Bottom) has shortcut connections for 3 nearby U-Nets." +https://github.com/cltk/cltk,The Classical Language Toolkit +https://github.com/cltk/cltk,"The Classical Language Toolkit (CLTK) offers natural language processing (NLP) support for the languages of Ancient, Classical, and Medieval Eurasia. Greek, Latin, Akkadian, and the Germanic languages are currently most complete. The goals of the CLTK are to:" +https://github.com/cltk/cltk,* compile analysis-friendly corpora; +https://github.com/cltk/cltk,* collect and generate linguistic data; +https://github.com/cltk/cltk,* act as a free and open platform for generating scientific research. +https://github.com/facebookresearch/DensePose,Dense Human Pose Estimation In The Wild +https://github.com/facebookresearch/DensePose,"Rıza Alp Güler, Natalia Neverova, Iasonas Kokkinos" +https://github.com/facebookresearch/DensePose,Dense human pose estimation aims at mapping all human pixels of an RGB image to the 3D surface of the human body. +https://github.com/facebookresearch/DensePose,DensePose-RCNN is implemented in the Detectron framework and is powered by Caffe2. +https://github.com/facebookresearch/DensePose,"In this repository, we provide the code to train and evaluate DensePose-RCNN. We also provide notebooks to visualize the collected DensePose-COCO dataset and show the correspondences to the SMPL model." +https://github.com/facebookresearch/ResNeXt,ResNeXt: Aggregated Residual Transformations for Deep Neural Networks +https://github.com/facebookresearch/ResNeXt,ResNeXt is the foundation of their new SENet architecture (a ResNeXt-152 (64 x 4d) with the Squeeze-and-Excitation module)! +https://github.com/facebookresearch/ResNeXt,This repository contains a Torch implementation for the ResNeXt algorithm for image classification. The code is based on fb.resnet.torch. +https://github.com/facebookresearch/ResNeXt,"ResNeXt is a simple, highly modularized network architecture for image classification. Our network is constructed by repeating a building block that aggregates a set of transformations with the same topology. Our simple design results in a homogeneous, multi-branch architecture that has only a few hyper-parameters to set. This strategy exposes a new dimension, which we call “cardinality” (the size of the set of transformations), as an essential factor in addition to the dimensions of depth and width." +https://github.com/facebookresearch/pyrobot,"PyRobot is a light weight, high-level interface which provides hardware independent APIs for robotic manipulation and navigation. This repository also contains the low-level stack for LoCoBot, a low cost mobile manipulator hardware platform." +https://github.com/gitbucket/gitbucket,GitBucket is a Git web platform powered by Scala offering: +https://github.com/gitbucket/gitbucket,"You can try an online demo (ID: root / Pass: root) of GitBucket, and also get the latest information at GitBucket News." +https://github.com/harismuneer/Ultimate-Facebook-Scraper,🔥 Ultimate Facebook Scrapper +https://github.com/harismuneer/Ultimate-Facebook-Scraper,A bot which scrapes almost everything about a facebook user's profile including +https://github.com/harismuneer/Ultimate-Facebook-Scraper,"The best thing about this scraper is that the data is scraped in an organized format so that it can be used for educational/research purpose by researchers. Moreover, this scraper does not use Facebook's Graph API so there are no rate limiting issues as such. " +https://github.com/nextflow-io/nextflow,Nextflow is a bioinformatics workflow manager that enables the development of portable and reproducible workflows. +https://github.com/nextflow-io/nextflow,"It supports deploying workflows on a variety of execution platforms including local, HPC schedulers, AWS Batch," +https://github.com/nextflow-io/nextflow,"Google Genomics Pipelines, and Kubernetes. Additionally, it provides support for manage your workflow dependencies" +https://github.com/nextflow-io/nextflow,"through built-in support for Conda, Docker, Singularity, and Modules." +https://github.com/nextflow-io/nextflow,"Nextflow framework is based on the dataflow programming model, which greatly simplifies writing parallel and distributed pipelines without adding unnecessary complexity and letting you concentrate on the flow of data, i.e. the functional logic of the application/algorithm." +https://github.com/nextflow-io/nextflow,"It doesn't aim to be another pipeline scripting language yet, but it is built around the idea that the Linux platform is the lingua franca of data science, since it provides many simple command line and scripting tools, which by themselves are powerful, but when chained together facilitate complex data manipulations." +https://github.com/nextflow-io/nextflow,"In practice, this means that a Nextflow script is defined by composing many different processes. Each process can execute a given bioinformatics tool or scripting language, to which is added the ability to coordinate and synchronize the processes execution by simply specifying their inputs and outputs." +https://github.com/nextflow-io/nextflow,"Nextflow also supports running workflows across various clouds and cloud technologies. Nextflow can create AWS EC2 or Google GCE clusters and deploy your workflow. Managed solutions from both Amazon and Google are also supported through AWS Batch and Google Genomics Pipelines. Additionally, Nextflow can run workflows on either on-prem or managed cloud Kubernetes clusters. " +https://github.com/nextflow-io/nextflow,"Nextflow is built on two great pieces of open source software, namely Groovy" +https://github.com/pyro-ppl/pyro,"Pyro is a flexible, scalable deep probabilistic programming library built on PyTorch. Notably, it was designed with these principles in mind:" +https://github.com/pyro-ppl/pyro,Universal: Pyro is a universal PPL - it can represent any computable probability distribution. +https://github.com/pyro-ppl/pyro,Scalable: Pyro scales to large data sets with little overhead compared to hand-written code. +https://github.com/pyro-ppl/pyro,"Minimal: Pyro is agile and maintainable. It is implemented with a small core of powerful, composable abstractions." +https://github.com/pyro-ppl/pyro,"Flexible: Pyro aims for automation when you want it, control when you need it. This is accomplished through high-level abstractions to express generative and inference models, while allowing experts easy-access to customize inference." +https://github.com/pyro-ppl/pyro,Pyro is in a beta release. It is developed and maintained by Uber AI Labs and community contributors. +https://github.com/reduxjs/react-redux,Official React bindings for Redux. +https://github.com/reduxjs/react-redux,Performant and flexible. +https://github.com/scikit-image/scikit-image,scikit-image: Image processing in Python +https://github.com/scikit-learn/scikit-learn,scikit-learn is a Python module for machine learning built on top of +https://github.com/scikit-learn/scikit-learn,SciPy and is distributed under the 3-Clause BSD license. +https://github.com/scikit-learn/scikit-learn,The project was started in 2007 by David Cournapeau as a Google Summer +https://github.com/scikit-learn/scikit-learn,"of Code project, and since then many volunteers have contributed. See" +https://github.com/scikit-learn/scikit-learn,the About us <http://scikit-learn.org/dev/about.html#authors>_ page +https://github.com/scikit-learn/scikit-learn,for a list of core contributors. +https://github.com/scikit-learn/scikit-learn,It is currently maintained by a team of volunteers. +https://github.com/scikit-learn/scikit-learn,Website: http://scikit-learn.org +https://github.com/tensorflow/magenta,Magenta is a research project exploring the role of machine learning +https://github.com/tensorflow/magenta,in the process of creating art and music. Primarily this +https://github.com/tensorflow/magenta,involves developing new deep learning and reinforcement learning +https://github.com/tensorflow/magenta,"algorithms for generating songs, images, drawings, and other materials. But it's also" +https://github.com/tensorflow/magenta,an exploration in building smart tools and interfaces that allow +https://github.com/tensorflow/magenta,artists and musicians to extend (not replace!) their processes using +https://github.com/tensorflow/magenta,these models. Magenta was started by some researchers and engineers +https://github.com/tensorflow/magenta,"from the Google Brain team," +https://github.com/tensorflow/magenta,but many others have contributed significantly to the project. We use +https://github.com/tensorflow/magenta,TensorFlow and release our models and +https://github.com/tensorflow/magenta,tools in open source on this GitHub. If you’d like to learn more +https://github.com/tensorflow/magenta,"about Magenta, check out our blog," +https://github.com/tensorflow/magenta,where we post technical details. You can also join our discussion +https://github.com/tensorflow/magenta,group. +https://github.com/tensorflow/magenta,"This is the home for our Python TensorFlow library. To use our models in the browser with TensorFlow.js, head to the Magenta.js repository." diff --git a/data/installation.csv b/data/installation.csv index 0a25da2a..01d54273 100644 --- a/data/installation.csv +++ b/data/installation.csv @@ -12,14 +12,12 @@ https://github.com/JimmySuen/integral-human-pose,Python Version: 3.6 https://github.com/JimmySuen/integral-human-pose,OS: CentOs7 (Other Linux system is also OK) https://github.com/JimmySuen/integral-human-pose,CUDA: 9.0 (least 8.0) https://github.com/JimmySuen/integral-human-pose,PyTorch:0.4.0(see issue https://github.com/JimmySuen/integral-human-pose/issues/4) -https://github.com/JimmySuen/integral-human-pose,Installation https://github.com/JimmySuen/integral-human-pose,"We recommend installing python from Anaconda, installing pytorch following guide on PyTorch according to your specific CUDA & python version. In addition, you need to install dependencies below." https://github.com/JimmySuen/integral-human-pose,pip install scipy https://github.com/JimmySuen/integral-human-pose,pip install matplotlib https://github.com/JimmySuen/integral-human-pose,pip install opencv-python https://github.com/JimmySuen/integral-human-pose,pip install easydict https://github.com/JimmySuen/integral-human-pose,pip install pyyaml -https://github.com/JuliaGeo/LibGEOS.jl,Installation https://github.com/JuliaGeo/LibGEOS.jl,"At the Julia prompt, run" https://github.com/JuliaGeo/LibGEOS.jl,"julia> Pkg.add(""LibGEOS"")" https://github.com/JuliaGeo/LibGEOS.jl,"This will install both the Julia package and GEOS shared libraries together. To just reinstall the GEOS shared libraries, run Pkg.build(""LibGEOS"")." @@ -48,7 +46,6 @@ https://github.com/NVIDIA/vid2vid,Linux or macOS https://github.com/NVIDIA/vid2vid,Python 3 https://github.com/NVIDIA/vid2vid,NVIDIA GPU + CUDA cuDNN https://github.com/NVIDIA/vid2vid,PyTorch 0.4 -https://github.com/NVIDIA/vid2vid,Installation https://github.com/NVIDIA/vid2vid,Install python libraries dominate and requests. https://github.com/NVIDIA/vid2vid,pip install dominate requests https://github.com/NVIDIA/vid2vid,"If you plan to train with face datasets, please install dlib." @@ -60,11 +57,9 @@ https://github.com/NVIDIA/vid2vid,cd vid2vid https://github.com/NVIDIA/vid2vid,"Docker Image If you have difficulty building the repo, a docker image can be found in the docker folder." https://github.com/OpenGeoVis/PVGeo,"To begin using the PVGeo Python package, create/activate your Python virtual environment (we highly recommend using anaconda) and install PVGeo through pip:" https://github.com/OpenGeoVis/PVGeo,pip install PVGeo -https://github.com/OpenGeoVis/omfvista,Installation https://github.com/OpenGeoVis/omfvista,Installation is simply: https://github.com/OpenGeoVis/omfvista,pip install omfvista https://github.com/OpenGeoVis/omfvista,All necessary dependencies will be installed alongside omfvista. Please note that this package heavily leverages the PyVista package. -https://github.com/OpenGeoscience/geonotebook/,Installation https://github.com/OpenGeoscience/geonotebook/,System Prerequisites https://github.com/OpenGeoscience/geonotebook/,For default tile serving https://github.com/OpenGeoscience/geonotebook/,GDAL >= 2.1.0 @@ -78,7 +73,6 @@ https://github.com/OpenGeoscience/geonotebook/,mkvirtualenv -a . geonotebook https://github.com/OpenGeoscience/geonotebook/,# Numpy must be fully installed before rasterio https://github.com/OpenGeoscience/geonotebook/,pip install -r prerequirements.txt https://github.com/OpenGeoscience/geonotebook/,pip install -r requirements.txt -https://github.com/OpenGeoscience/geonotebook/,pip install . https://github.com/OpenGeoscience/geonotebook/,# Enable both the notebook and server extensions https://github.com/OpenGeoscience/geonotebook/,jupyter serverextension enable --sys-prefix --py geonotebook https://github.com/OpenGeoscience/geonotebook/,jupyter nbextension enable --sys-prefix --py geonotebook @@ -93,7 +87,6 @@ https://github.com/OpenGeoscience/geonotebook/,# Enable the extension https://github.com/OpenGeoscience/geonotebook/,# Start the javascript builder https://github.com/OpenGeoscience/geonotebook/,cd js https://github.com/OpenGeoscience/geonotebook/,npm run watch -https://github.com/Toblerity/Fiona/,Installation https://github.com/Toblerity/Fiona/,"Fiona requires Python 2.7 or 3.4+ and GDAL/OGR 1.8+. To build from a source distribution you will need a C compiler and GDAL and Python development headers and libraries (libgdal1-dev for Debian/Ubuntu, gdal-dev for CentOS/Fedora)." https://github.com/Toblerity/Fiona/,"To build from a repository copy, you will also need Cython to build C sources from the project's .pyx files. See the project's requirements-dev.txt file for guidance." https://github.com/Toblerity/Fiona/,"The Kyngchaos GDAL frameworks will satisfy the GDAL/OGR dependency for OS X, as will Homebrew's GDAL Formula (brew install gdal)." @@ -141,7 +134,6 @@ https://github.com/XiaLiPKU/RESCAN,Python>=3.6 https://github.com/XiaLiPKU/RESCAN,Pytorch>=4.1.0 https://github.com/XiaLiPKU/RESCAN,Opencv>=3.1.0 https://github.com/XiaLiPKU/RESCAN,tensorboardX -https://github.com/ZhouYanzhao/PRM,Prerequisites https://github.com/ZhouYanzhao/PRM,System (tested on Ubuntu 14.04LTS and Win10) https://github.com/ZhouYanzhao/PRM,NVIDIA GPU + CUDA CuDNN (CPU mode is also supported but significantly slower) https://github.com/ZhouYanzhao/PRM,Python>=3.5 @@ -174,28 +166,22 @@ https://github.com/agile-geoscience/striplog/,"Then do this to create an environ https://github.com/agile-geoscience/striplog/,conda create -n myenv python=3.5 numpy matplotlib https://github.com/agile-geoscience/striplog/,source activate myenv https://github.com/agile-geoscience/striplog/,Then you can do: -https://github.com/akanazawa/hmr,Requirements https://github.com/akanazawa/hmr,Python 2.7 https://github.com/akanazawa/hmr,"TensorFlow tested on version 1.3, demo alone runs with TF 1.12" -https://github.com/akanazawa/hmr,Installation https://github.com/akanazawa/hmr,Setup virtualenv https://github.com/akanazawa/hmr,virtualenv venv_hmr https://github.com/akanazawa/hmr,source venv_hmr/bin/activate https://github.com/akanazawa/hmr,pip install -U pip https://github.com/akanazawa/hmr,deactivate -https://github.com/akanazawa/hmr,pip install -r requirements.txt https://github.com/akanazawa/hmr,Install TensorFlow https://github.com/akanazawa/hmr,With GPU: https://github.com/akanazawa/hmr,pip install tensorflow-gpu==1.3.0 https://github.com/akanazawa/hmr,Without GPU: https://github.com/akanazawa/hmr,pip install tensorflow==1.3.0 -https://github.com/akaszynski/pyansys,Installation https://github.com/akaszynski/pyansys,Installation through pip: https://github.com/akaszynski/pyansys,pip install pyansys -https://github.com/albertpumarola/GANimation,Prerequisites https://github.com/albertpumarola/GANimation,"Install PyTorch (version 0.3.1), Torch Vision and dependencies from http://pytorch.org" https://github.com/albertpumarola/GANimation,Install requirements.txt (pip install -r requirements.txt) -https://github.com/cgre-aachen/gempy,Installation https://github.com/cgre-aachen/gempy,We provide the latest release version of GemPy via the Conda and PyPi package services. We highly recommend using either PyPi as it will take care of automatically installing all dependencies. https://github.com/cgre-aachen/gempy,PyPi https://github.com/cgre-aachen/gempy,$ pip install gempy @@ -229,7 +215,6 @@ https://github.com/d3/d3,"You can also use the standalone D3 microlibraries. For https://github.com/d3/d3,"" https://github.com/driftingtides/hyvr,Installing the HyVR package https://github.com/driftingtides/hyvr,Installing Python -https://github.com/driftingtides/hyvr,Windows https://github.com/driftingtides/hyvr,"If you are using Windows, we recommend installing the Anaconda distribution of Python 3. This distribution has the majority of dependencies that HyVR requires." https://github.com/driftingtides/hyvr,It is also a good idea to install the HyVR package into a virtual environment. Do this by opening a command prompt window and typing the following: https://github.com/driftingtides/hyvr,conda create --name hyvr_env @@ -246,10 +231,8 @@ https://github.com/driftingtides/hyvr,"The version on PyPI should always be up t https://github.com/driftingtides/hyvr,git clone https://github.com/driftingtides/hyvr.git https://github.com/driftingtides/hyvr,To install from source you need a C compiler. https://github.com/driftingtides/hyvr,Installation from conda-forge will (hopefully) be coming soon. -https://github.com/driftingtides/hyvr,Requirements https://github.com/driftingtides/hyvr,Python https://github.com/driftingtides/hyvr,"HyVR was developed for use with Python 3.4 or greater. It may be possible to use with earlier versions of Python 3, however this has not been tested." -https://github.com/driftingtides/hyvr,Dependencies https://github.com/driftingtides/hyvr,numpy <= 1.13.3 https://github.com/driftingtides/hyvr,matplotlib <= 2.1.0 https://github.com/driftingtides/hyvr,scipy = 1.0.0 @@ -257,19 +240,15 @@ https://github.com/driftingtides/hyvr,pandas = 0.21.0 https://github.com/driftingtides/hyvr,flopy == 3.2.9 (optional for modflow output) https://github.com/driftingtides/hyvr,pyevtk = 1.1.0 (optional for VTK output) https://github.com/driftingtides/hyvr,h5py (optional for HDF5 output) -https://github.com/driving-behavior/DBNet,Requirements https://github.com/driving-behavior/DBNet,Tensorflow 1.2.0 -https://github.com/driving-behavior/DBNet,Python 2.7 https://github.com/driving-behavior/DBNet,CUDA 8.0+ (For GPU) https://github.com/driving-behavior/DBNet,"Python Libraries: numpy, scipy and laspy" https://github.com/driving-behavior/DBNet,"The code has been tested with Python 2.7, Tensorflow 1.2.0, CUDA 8.0 and cuDNN 5.1 on Ubuntu 14.04. But it may work on more machines (directly or through mini-modification), pull-requests or test report are well welcomed." -https://github.com/empymod/empymod,Installation https://github.com/empymod/empymod,You can install empymod either via conda: https://github.com/empymod/empymod,conda install -c prisae empymod https://github.com/empymod/empymod,or via pip: https://github.com/empymod/empymod,pip install empymod https://github.com/empymod/empymod,Required are Python version 3.5 or higher and the modules NumPy and SciPy. Consult the installation notes in the manual for more information regarding installation and requirements. -https://github.com/endernewton/iter-reason,Prerequisites https://github.com/endernewton/iter-reason,"Tensorflow, tested with version 1.6 with Ubuntu 16.04, installed with:" https://github.com/endernewton/iter-reason,pip install --ignore-installed --upgrade https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow_gpu-1.6.0-cp27-none-linux_x86_64.whl https://github.com/endernewton/iter-reason,Other packages needed can be installed with pip: @@ -358,18 +337,15 @@ https://github.com/equinor/segyio,git clone https://github.com/equinor/segyio https://github.com/equinor/segyio,mkdir segyio/build https://github.com/equinor/segyio,cd segyio/build https://github.com/equinor/segyio,cmake .. -DCMAKE_BUILD_TYPE=Release -DBUILD_SHARED_LIBS=ON -https://github.com/equinor/segyio,make https://github.com/equinor/segyio,make install https://github.com/equinor/segyio,"make install must be done as root for a system install; if you want to install in your home directory, add -DCMAKE_INSTALL_PREFIX=~/ or some other appropriate directory, or make DESTDIR=~/ install. Please ensure your environment picks up on non-standard install locations (PYTHONPATH, LD_LIBRARY_PATH and PATH)." https://github.com/equinor/segyio,"If you have multiple Python installations, or want to use some alternative interpreter, you can help cmake find the right one by passing -DPYTHON_EXECUTABLE=/opt/python/binary along with install prefix and build type." https://github.com/equinor/segyio,"To build the matlab bindings, invoke CMake with the option -DBUILD_MEX=ON. In some environments the Matlab binaries are in a non-standard location, in which case you need to help CMake find the matlab binaries by passing -DMATLAB_ROOT=/path/to/matlab." -https://github.com/facebook/react,Installation https://github.com/facebook/react,"React has been designed for gradual adoption from the start, and you can use as little or as much React as you need:" https://github.com/facebook/react,Use Online Playgrounds to get a taste of React. https://github.com/facebook/react,Add React to a Website as a