jaeswift-website/api/data/awesomelist/seriousran--awesome-qa.json

1 line
No EOL
13 KiB
JSON

{"slug": "seriousran--awesome-qa", "title": "Qa", "description": "\ud83d\ude0e A curated list of the Question Answering (QA)", "github_url": "https://github.com/seriousran/awesome-qa", "stars": "706", "tag": "Computer Science", "entry_count": 84, "subcategory_count": 15, "subcategories": [{"name": "General", "parent": "", "entries": [{"name": "Recent Trends", "url": "#recent-trends", "description": ""}, {"name": "About QA", "url": "#about-qa", "description": ""}, {"name": "Events", "url": "#events", "description": ""}, {"name": "Systems", "url": "#systems", "description": ""}, {"name": "Competitions in QA", "url": "#competitions-in-qa", "description": ""}, {"name": "Publications", "url": "#publications", "description": ""}, {"name": "Codes", "url": "#codes", "description": ""}, {"name": "Lectures", "url": "#lectures", "description": ""}, {"name": "Slides", "url": "#slides", "description": ""}, {"name": "Dataset Collections", "url": "#dataset-collections", "description": ""}, {"name": "Datasets", "url": "#datasets", "description": ""}, {"name": "Books", "url": "#books", "description": ""}, {"name": "Links", "url": "#links", "description": ""}]}, {"name": "Recent QA Models", "parent": "Recent Trends", "entries": []}, {"name": "Recent Language Models", "parent": "Recent Trends", "entries": [{"name": "ELECTRA: Pre-training Text Encoders as Discriminators Rather Than Generators", "url": "https://openreview.net/pdf?id=r1xMH1BtvB", "description": ""}, {"name": "TinyBERT: Distilling BERT for Natural Language Understanding", "url": "https://openreview.net/pdf?id=rJx0Q6EFPB", "description": ""}, {"name": "MINILM: Deep Self-Attention Distillation for Task-Agnostic Compression of Pre-Trained Transformers", "url": "https://arxiv.org/abs/2002.10957", "description": ""}, {"name": "T5: Exploring the Limits of Transfer Learning with a Unified Text-to-Text Transformer", "url": "https://arxiv.org/abs/1910.10683", "description": ""}, {"name": "ERNIE: Enhanced Language Representation with Informative Entities", "url": "https://arxiv.org/abs/1905.07129", "description": ""}, {"name": "XLNet: Generalized Autoregressive Pretraining for Language Understanding", "url": "https://arxiv.org/abs/1906.08237", "description": ""}, {"name": "ALBERT: A Lite BERT for Self-supervised Learning of Language Representations", "url": "https://arxiv.org/abs/1909.11942", "description": ""}, {"name": "RoBERTa: A Robustly Optimized BERT Pretraining Approach", "url": "https://arxiv.org/abs/1907.11692", "description": ""}, {"name": "DistilBERT, a distilled version of BERT: smaller, faster, cheaper and lighter", "url": "https://arxiv.org/pdf/1910.01108.pdf", "description": ""}, {"name": "SpanBERT: Improving Pre-training by Representing and Predicting Spans", "url": "https://arxiv.org/pdf/1907.10529v3.pdf", "description": ""}, {"name": "BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding", "url": "https://arxiv.org/abs/1810.04805", "description": ""}]}, {"name": "AAAI 2020", "parent": "Recent Trends", "entries": [{"name": "TANDA: Transfer and Adapt Pre-Trained Transformer Models for Answer Sentence Selection", "url": "https://arxiv.org/pdf/1911.04118.pdf", "description": ""}]}, {"name": "ACL 2019", "parent": "Recent Trends", "entries": [{"name": "Towards Scalable and Reliable Capsule Networks for Challenging NLP Applications", "url": "https://arxiv.org/pdf/1906.02829v1.pdf", "description": ""}, {"name": "Cognitive Graph for Multi-Hop Reading Comprehension at Scale", "url": "https://arxiv.org/pdf/1905.05460v2.pdf", "description": ""}, {"name": "Real-Time Open-Domain Question Answering with Dense-Sparse Phrase Index", "url": "https://arxiv.org/abs/1906.05807", "description": ""}, {"name": "Unsupervised Question Answering by Cloze Translation", "url": "https://arxiv.org/abs/1906.04980", "description": ""}, {"name": "SemEval-2019 Task 10: Math Question Answering", "url": "https://www.aclweb.org/anthology/S19-2153", "description": ""}, {"name": "Improving Question Answering over Incomplete KBs with Knowledge-Aware Reader", "url": "https://arxiv.org/abs/1905.07098", "description": ""}, {"name": "Matching Article Pairs with Graphical Decomposition and Convolutions", "url": "https://arxiv.org/pdf/1802.07459v2.pdf", "description": ""}, {"name": "Episodic Memory Reader: Learning what to Remember for Question Answering from Streaming Data", "url": "https://arxiv.org/abs/1903.06164", "description": ""}, {"name": "Natural Questions: a Benchmark for Question Answering Research", "url": "https://ai.google/research/pubs/pub47761", "description": ""}, {"name": "Textbook Question Answering with Multi-modal Context Graph Understanding and Self-supervised Open-set Comprehension", "url": "https://arxiv.org/abs/1811.00232", "description": ""}]}, {"name": "EMNLP-IJCNLP 2019", "parent": "Recent Trends", "entries": [{"name": "Language Models as Knowledge Bases?", "url": "https://arxiv.org/pdf/1909.01066v2.pdf", "description": ""}, {"name": "LXMERT: Learning Cross-Modality Encoder Representations from Transformers", "url": "https://arxiv.org/pdf/1908.07490v3.pdf", "description": ""}, {"name": "Answering Complex Open-domain Questions Through Iterative Query Generation", "url": "https://arxiv.org/pdf/1910.07000v1.pdf", "description": ""}, {"name": "KagNet: Knowledge-Aware Graph Networks for Commonsense Reasoning", "url": "https://arxiv.org/pdf/1909.02151v1.pdf", "description": ""}, {"name": "Mixture Content Selection for Diverse Sequence Generation", "url": "https://arxiv.org/pdf/1909.01953v1.pdf", "description": ""}, {"name": "A Discrete Hard EM Approach for Weakly Supervised Question Answering", "url": "https://arxiv.org/pdf/1909.04849v1.pdf", "description": ""}]}, {"name": "Arxiv", "parent": "Recent Trends", "entries": [{"name": "Investigating the Successes and Failures of BERT for Passage Re-Ranking", "url": "https://arxiv.org/abs/1905.01758", "description": ""}, {"name": "BERT with History Answer Embedding for Conversational Question Answering", "url": "https://arxiv.org/abs/1905.05412", "description": ""}, {"name": "Understanding the Behaviors of BERT in Ranking", "url": "https://arxiv.org/abs/1904.07531", "description": ""}, {"name": "BERT Post-Training for Review Reading Comprehension and Aspect-based Sentiment Analysis", "url": "https://arxiv.org/abs/1904.02232", "description": ""}, {"name": "End-to-End Open-Domain Question Answering with BERTserini", "url": "https://arxiv.org/abs/1902.01718", "description": ""}, {"name": "A BERT Baseline for the Natural Questions", "url": "https://arxiv.org/abs/1901.08634", "description": ""}, {"name": "Passage Re-ranking with BERT", "url": "https://arxiv.org/abs/1901.04085", "description": ""}, {"name": "SDNet: Contextualized Attention-based Deep Network for Conversational Question Answering", "url": "https://arxiv.org/abs/1812.03593", "description": ""}]}, {"name": "Dataset", "parent": "Recent Trends", "entries": [{"name": "ELI5: Long Form Question Answering", "url": "https://arxiv.org/abs/1907.09190", "description": ""}]}, {"name": "Types of QA", "parent": "About QA", "entries": []}, {"name": "Analysis and Parsing for Pre-processing in QA systems", "parent": "About QA", "entries": []}, {"name": "Most QA systems have roughly 3 parts", "parent": "About QA", "entries": [{"name": "IBM Watson", "url": "https://www.ibm.com/watson/", "description": "Has state-of-the-arts performance."}, {"name": "Facebook DrQA", "url": "https://research.fb.com/downloads/drqa/", "description": "Applied to the SQuAD1.0 dataset. The SQuAD2.0 dataset has released. but DrQA is not tested yet."}, {"name": "MIT media lab's Knowledge graph", "url": "http://conceptnet.io/", "description": "Is a freely-available semantic network, designed to help computers understand the meanings of words that people use."}, {"name": "BiDAF", "url": "https://github.com/allenai/bi-att-flow", "description": "Bi-Directional Attention Flow (BIDAF) network is a multi-stage hierarchical process that represents the context at different levels of granularity and uses bi-directional attention flow mechanism to obtain a query-aware context representation without early summarization.", "stars": "1.5k"}, {"name": "QANet", "url": "https://github.com/NLPLearn/QANet", "description": "A Q\\&A architecture does not require recurrent networks: Its encoder consists exclusively of convolution and self-attention, where convolution models local interactions and self-attention models global interactions.", "stars": "991"}, {"name": "R-Net", "url": "https://github.com/HKUST-KnowComp/R-Net", "description": "An end-to-end neural networks model for reading comprehension style question answering, which aims to answer questions from a given passage.", "stars": "586"}, {"name": "R-Net-in-Keras", "url": "https://github.com/YerevaNN/R-NET-in-Keras", "description": "R-NET re-implementation in Keras.", "stars": "181"}, {"name": "DrQA", "url": "https://github.com/hitvoice/DrQA", "description": "DrQA is a system for reading comprehension applied to open-domain question answering.", "stars": "393"}, {"name": "BERT", "url": "https://github.com/google-research/bert", "description": "A new language representation model which stands for Bidirectional Encoder Representations from Transformers. Unlike recent language representation models, BERT is designed to pre-train deep bidirectional representations by jointly conditioning on both left and right context in all layers.", "stars": "32k"}, {"name": "Question Answering - Natural Language Processing", "url": "https://youtu.be/Kzi6tE4JaGo", "description": "By Dragomir Radev, Ph.D. | University of Michigan | 2016."}, {"name": "Question Answering with Knowledge Bases, Web and Beyond", "url": "https://github.com/scottyih/Slides/blob/master/QA%20Tutorial.pdf", "description": "By Scott Wen-tau Yih & Hao Ma | Microsoft Research | 2016.", "stars": "33"}, {"name": "Question Answering", "url": "https://hpi.de/fileadmin/user_upload/fachgebiete/plattner/teaching/NaturalLanguageProcessing/NLP2017/NLP8_QuestionAnswering.pdf", "description": "By Dr. Mariana Neves | Hasso Plattner Institut | 2017."}, {"name": "NLIWOD's Question answering datasets", "url": "https://github.com/dice-group/NLIWOD/tree/master/qa.datasets", "description": "", "stars": "93"}, {"name": "karthinkncode's Datasets for Natural Language Processing", "url": "https://github.com/karthikncode/nlp-datasets", "description": "", "stars": "920"}, {"name": "AI2 Science Questions v2.1(2017)", "url": "http://data.allenai.org/ai2-science-questions/", "description": ""}, {"name": "Children's Book Test", "url": "https://uclmr.github.io/ai4exams/data.html", "description": ""}, {"name": "CODAH Dataset", "url": "https://github.com/Websail-NU/CODAH", "description": "", "stars": "19"}, {"name": "DeepMind Q\\&A Dataset; CNN/Daily Mail", "url": "https://github.com/deepmind/rc-data", "description": "", "stars": "1.3k"}, {"name": "ELI5", "url": "https://github.com/facebookresearch/ELI5", "description": "", "stars": "261"}, {"name": "GraphQuestions", "url": "https://github.com/ysu1989/GraphQuestions", "description": "", "stars": "86"}, {"name": "LC-QuAD", "url": "http://sda.cs.uni-bonn.de/projects/qa-dataset/", "description": ""}, {"name": "MS MARCO", "url": "http://www.msmarco.org/dataset.aspx", "description": ""}, {"name": "MultiRC", "url": "https://cogcomp.org/multirc/", "description": ""}, {"name": "NarrativeQA", "url": "https://github.com/deepmind/narrativeqa", "description": "", "stars": "391"}, {"name": "NewsQA", "url": "https://github.com/Maluuba/newsqa", "description": "", "stars": "232"}, {"name": "Qestion-Answer Dataset by CMU", "url": "http://www.cs.cmu.edu/\\~ark/QA-data/", "description": ""}, {"name": "SQuAD1.0", "url": "https://rajpurkar.github.io/SQuAD-explorer/", "description": ""}, {"name": "SQuAD2.0", "url": "https://rajpurkar.github.io/SQuAD-explorer/", "description": ""}, {"name": "Story cloze test", "url": "http://cs.rochester.edu/nlp/rocstories/", "description": ""}, {"name": "TriviaQA", "url": "http://nlp.cs.washington.edu/triviaqa/", "description": ""}, {"name": "WikiQA", "url": "https://www.microsoft.com/en-us/download/details.aspx?id=52419\\&from=https%3A%2F%2Fresearch.microsoft.com%2Fen-US%2Fdownloads%2F4495da01-db8c-4041-a7f6-7984a4f6a905%2Fdefault.aspx", "description": ""}]}, {"name": "The DeepQA Research Team in IBM Watson's publication within 5 years", "parent": "Datasets", "entries": []}, {"name": "MS Research's publication within 5 years", "parent": "Datasets", "entries": []}, {"name": "Google AI's publication within 5 years", "parent": "Datasets", "entries": []}, {"name": "Facebook AI Research's publication within 5 years", "parent": "Datasets", "entries": [{"name": "Building a Question-Answering System from Scratch\u2014 Part 1", "url": "https://towardsdatascience.com/building-a-question-answering-system-part-1-9388aadff507", "description": ""}, {"name": "Qeustion Answering with Tensorflow By Steven Hewitt, O'REILLY, 2017", "url": "https://www.oreilly.com/ideas/question-answering-with-tensorflow", "description": ""}, {"name": "Why question answering is hard", "url": "http://nicklothian.com/blog/2014/09/25/why-question-answering-is-hard/", "description": ""}]}], "name": ""}