My colleagues and I have published our works in various top-tier journals and conferences. Please visit my Google Scholar page for the complete list of publications.

Journal

Improving Aspect-Level Sentiment Analysis with Aspect Extraction
Navonil Majumder, Rishabh Bhardwaj, Soujanya Poria, Alexander Gelbukh, and Amir Hussain
Neural Computing and Applications (Accepted), 2020 pdf
@ARTICLE{94jer0i9, author={N. {Majumder} and R. {Bhardwaj} and S. {Poria} and A. {Gelbukh} and A. {Hussain}}, journal={Neural Computing and Applications}, title={{Improving Aspect-Level Sentiment Analysis with Aspect Extraction}}, year={2020},}
Emotion Recognition in Conversation: Research Challenges, Datasets, and Recent Advances
Soujanya Poria, Navonil Majumder, Rada Mihalcea, and Eduard Hovy
IEEE Access, 2019 pdf
@ARTICLE{8764449, author={S. {Poria} and N. {Majumder} and R. {Mihalcea} and E. {Hovy}}, journal={IEEE Access}, title={Emotion Recognition in Conversation: Research Challenges, Datasets, and Recent Advances}, year={2019}, volume={}, number={}, pages={1-1}, keywords={Emotion recognition;Task analysis;Context modeling;Taxonomy;Natural language processing;Licenses;Pragmatics}, doi={10.1109/ACCESS.2019.2929050}, ISSN={2169-3536}, month={}, ipf="4.098"}
Sentiment and Sarcasm Classification with Multitask Learning
Navonil Majumder, Soujanya Poria, Haiyun Peng, Niyati Chhaya, Erik Cambria, and Alexander Gelbukh
IEEE Intelligent Systems, 2019 pdf
@ARTICLE{8766192, author={N. {Majumder} and S. {Poria} and H. {Peng} and N. {Chhaya} and E. {Cambria} and A. {Gelbukh}}, journal={IEEE Intelligent Systems}, title={Sentiment and Sarcasm Classification With Multitask Learning}, year={2019}, volume={34}, number={3}, pages={38-43}, keywords={learning (artificial intelligence);neural nets;pattern classification;sentiment analysis;deep neural network;separate tasks;intensive emotion;important natural language processing tasks;sarcasm classification;multitask learning setting;multitask learning-based framework;sentiment classification;sarcasm detection;Task analysis;Neural networks;Computer architecture;Feature extraction;Affective computing;Sentiment analysis;Intelligent systems;Classification}, doi={10.1109/MIS.2019.2904691}, ISSN={1941-1294}, month={May},}
Multimodal Sentiment Analysis: Addressing Key Issues and Setting Up the Baselines
Soujanya Poria, Navonil Majumder, Devamanyu Hazarika, Erik Cambria, Alexander Gelbukh, and Amir Hussain
IEEE Intelligent Systems, 2018 pdf
@ARTICLE{8636432, author={S. Poria and N. Majumder and D. Hazarika and E. Cambria and A. Gelbukh and A. Hussain}, journal={IEEE Intelligent Systems}, title={Multimodal Sentiment Analysis: Addressing Key Issues and Setting Up the Baselines}, year={2018}, volume={33}, number={6}, pages={17-25}, keywords={Sentiment analysis;Feature extraction;Visualization;Emotion recognition;Affective computing;Social networking (online);Intelligent systems}, doi={10.1109/MIS.2018.2882362}, ISSN={1541-1672}, ipf="2.596", month={Nov},}
Multimodal Sentiment Analysis using Hierarchical Fusion with Context Modeling
Navonil Majumder, Devamanyu Hazarika, Alexander Gelbukh, Erik Cambria, and Soujanya Poria
Knowledge-Based Systems, 2018 pdf code
@article{MAJUMDER2018124, title = "Multimodal Sentiment Analysis using Hierarchical Fusion with Context Modeling", journal = "Knowledge-Based Systems", volume = "161", pages = "124 - 133", year = "2018", issn = "0950-7051", doi = "10.1016/j.knosys.2018.07.041", url = "http://www.sciencedirect.com/science/article/pii/S0950705118303897", author = "N. Majumder and D. Hazarika and A. Gelbukh and E. Cambria and S. Poria", keywords = "Multimodal fusion, Sentiment analysis", ipf = "4.396" }
Deep Learning-Based Document Modeling for Personality Detection from Text
Navonil Majumder, Soujanya Poria, Alexander Gelbukh, and Erik Cambria
IEEE Intelligent Systems, 2017 pdf code
@ARTICLE{7887639, author={N. {Majumder} and S. {Poria} and A. {Gelbukh} and E. {Cambria}}, journal={IEEE Intelligent Systems}, title={Deep Learning-Based Document Modeling for Personality Detection from Text}, year={2017}, volume={32}, number={2}, pages={74-79}, keywords={feedforward neural nets;information filtering;learning (artificial intelligence);pattern classification;text analysis;emotionally neutral input sentence filtering;document vector;document-level Mairesse features;deep convolutional neural network;identical architecture;binary classifier training;author psychological profile;Big Five traits;author personality type;deep learning based method;text;personality detection;deep learning-based document modeling;Feature extraction;Semantics;Pragmatics;Computational modeling;Neural networks;Emotion recognition;Artificial intelligence;personality;natural language processing;distributional semantics;neural-based document modeling;convolutional neural network;intelligent systems;artificial intelligence}, doi={10.1109/MIS.2017.23}, ISSN={1941-1294}, month={Mar},}

Conference

KinGDOM: Knowledge-Guided DOMain adaptation for sentiment analysis
Deepanway Ghosal, Devamanyu Hazarika, Abhinaba Roy, Navonil Majumder, Rada Mihalcea, and Soujanya Poria
ACL (Accepted), 2020 pdf code
@inproceedings{kingdom-acl, title = "{KinGDOM}: Knowledge-Guided DOMain adaptation for sentiment analysis", author = "Ghosal, Deepanway and Hazarika, Devamanyu and Roy, Abhinaba and Majumder, Navonil and Mihalcea, Rada and Poria, Soujanya", booktitle = "Proceedings of the 58th Conference of the Association for Computational Linguistics", year = "2020", publisher = "Association for Computational Linguistics", }
DialogueGCN: A Graph Convolutional Neural Network for Emotion Recognition in Conversation
Deepanway Ghosal, Navonil Majumder, Soujanya Poria, Niyati Chhaya, and Alexander Gelbukh
EMNLP, 2019 pdf code
@inproceedings{ghosal-etal-2019-dialoguegcn, title = "{D}ialogue{GCN}: A Graph Convolutional Neural Network for Emotion Recognition in Conversation", author = "Ghosal, Deepanway and Majumder, Navonil and Poria, Soujanya and Chhaya, Niyati and Gelbukh, Alexander", booktitle = "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP)", month = nov, year = "2019", address = "Hong Kong, China", publisher = "Association for Computational Linguistics", url = "https://www.aclweb.org/anthology/D19-1015", doi = "10.18653/v1/D19-1015", pages = "154--164", abstract = "Emotion recognition in conversation (ERC) has received much attention, lately, from researchers due to its potential widespread applications in diverse areas, such as health-care, education, and human resources. In this paper, we present Dialogue Graph Convolutional Network (DialogueGCN), a graph neural network based approach to ERC. We leverage self and inter-speaker dependency of the interlocutors to model conversational context for emotion recognition. Through the graph network, DialogueGCN addresses context propagation issues present in the current RNN-based methods. We empirically show that this method alleviates such issues, while outperforming the current state of the art on a number of benchmark emotion classification datasets.", }
MELD: A Multimodal Multi-Party Dataset for Emotion Recognition in Conversations
Soujanya Poria, Devamanyu Hazarika, Navonil Majumder, Gautam Naik, Erik Cambria, and Rada Mihalcea
ACL, 2019 pdf code
@inproceedings{poria-etal-2019-meld, title = "{MELD}: A Multimodal Multi-Party Dataset for Emotion Recognition in Conversations", author = "Poria, Soujanya and Hazarika, Devamanyu and Majumder, Navonil and Naik, Gautam and Cambria, Erik and Mihalcea, Rada", booktitle = "Proceedings of the 57th Conference of the Association for Computational Linguistics", month = jul, year = "2019", address = "Florence, Italy", publisher = "Association for Computational Linguistics", url = "https://www.aclweb.org/anthology/P19-1050", pages = "527--536", abstract = "Emotion recognition in conversations is a challenging task that has recently gained popularity due to its potential applications. Until now, however, a large-scale multimodal multi-party emotional conversational database containing more than two speakers per dialogue was missing. Thus, we propose the Multimodal EmotionLines Dataset (MELD), an extension and enhancement of EmotionLines. MELD contains about 13,000 utterances from 1,433 dialogues from the TV-series Friends. Each utterance is annotated with emotion and sentiment labels, and encompasses audio, visual and textual modalities. We propose several strong multimodal baselines and show the importance of contextual and multimodal information for emotion recognition in conversations. The full dataset is available for use at http://affective-meld.github.io.", }
DialogueRNN: An Attentive RNN for Emotion Detection in Conversations
Navonil Majumder, Soujanya Poria, Devamanyu Hazarika, Rada Mihalcea, Alexander Gelbukh, and Erik Cambria
AAAI, 2019 pdf code
@InProceedings{Majumder_Poria_Hazarika_Mihalcea_Gelbukh_Cambria_2019, title={DialogueRNN: An Attentive RNN for Emotion Detection in Conversations}, volume={33}, url={https://aaai.org/ojs/index.php/AAAI/article/view/4657}, DOI={10.1609/aaai.v33i01.33016818}, abstractNote={<p>Emotion detection in conversations is a necessary step for a number of applications, including opinion mining over chat history, social media threads, debates, argumentation mining, understanding consumer feedback in live conversations, and so on. Currently systems do not treat the parties in the conversation individually by adapting to the speaker of each utterance. In this paper, we describe a new method based on recurrent neural networks that keeps track of the individual party states throughout the conversation and uses this information for emotion classification. Our model outperforms the state-of-the-art by a significant margin on two different datasets.</p&gt;}, number={01}, booktitle={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Majumder, Navonil and Poria, Soujanya and Hazarika, Devamanyu and Mihalcea, Rada and Gelbukh, Alexander and Cambria, Erik}, year={2019}, month={Jul.}, pages={6818-6825} }
A Deep Learning Approach for Multimodal Deception Detection
Gangeshwar Krishnamurthy, Navonil Majumder, Soujanya Poria, and Erik Cambria
CICLing, 2018 pdf
@InProceedings{DBLP:journals/corr/abs-1803-00344, author = {Gangeshwar Krishnamurthy and Navonil Majumder and Soujanya Poria and Erik Cambria}, title = {A Deep Learning Approach for Multimodal Deception Detection}, booktitle = {Computational Linguistics and Intelligent Text Processing}, journal = {CoRR}, volume = {abs/1803.00344}, month = {March-April}, year = {2018}, url = {http://arxiv.org/abs/1803.00344}, archivePrefix = {arXiv}, eprint = {1803.00344}, timestamp = {Mon, 13 Aug 2018 16:47:43 +0200}, biburl = {https://dblp.org/rec/bib/journals/corr/abs-1803-00344}, bibsource = {dblp computer science bibliography, https://dblp.org}, }
Memory Fusion Network for Multi-view Sequential Learning
Amir Zadeh, Paul Pu Liang, Navonil Majumder, Soujanya Poria, Erik Cambria, and Louis-Philippe Morency
AAAI, 2018 pdf code
@conference{AAAI1817341, author = {Amir Zadeh and Paul Pu Liang and Navonil Majumder and Soujanya Poria and Erik Cambria and Louis-Philippe Morency}, title = {Memory Fusion Network for Multi-view Sequential Learning}, booktitle = {AAAI Conference on Artificial Intelligence}, year = {2018}, keywords = {Sentiment Analysis; Emotion Recognition; Personality Traits Recognition; Multimodal Fusion}, abstract = {Multi-view sequential learning is a fundamental problem in machine learning dealing with multi-view sequences. In a multi-view sequence, there exists two forms of interactions between different views: view-specific interactions and cross-view interactions. In this paper, we present a new neural architecture for multi-view sequential learning called the Memory Fusion Network (MFN) that explicitly accounts for both interactions in a neural architecture and continuously models them through time. The first component of the MFN is called the System of LSTMs, where view-specific interactions are learned in isolation through assigning an LSTM function to each view. The cross-view interactions are then identified using a special attention mechanism called the Delta-memory Attention Network (DMAN) and summarized through time with a Multi-view Gated Memory. Through extensive experimentation, MFN is compared to various proposed approaches for multi-view sequential learning on multiple publicly available benchmark datasets. MFN outperforms all the multi-view approaches. Furthermore, MFN outperforms all current state-of-the-art models, setting new state-of-the-art results for all three multi-view datasets. }, url = {https://aaai.org/ocs/index.php/AAAI/AAAI18/paper/view/17341} }
IARM: Inter-Aspect Relation Modeling with Memory Networks in Aspect-Based Sentiment Analysis
Navonil Majumder, Soujanya Poria, Alexander Gelbukh, Md Shad Akhtar, Erik Cambria, and Asif Ekbal
EMNLP, 2018 pdf code
@inproceedings{majumder-etal-2018-iarm, title = "{IARM}: Inter-Aspect Relation Modeling with Memory Networks in Aspect-Based Sentiment Analysis", author = "Majumder, Navonil and Poria, Soujanya and Gelbukh, Alexander and Akhtar, Md. Shad and Cambria, Erik and Ekbal, Asif", booktitle = "Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing", month = oct # "-" # nov, year = "2018", address = "Brussels, Belgium", publisher = "Association for Computational Linguistics", url = "https://www.aclweb.org/anthology/D18-1377", doi = "10.18653/v1/D18-1377", pages = "3402--3411", abstract = "Sentiment analysis has immense implications in e-commerce through user feedback mining. Aspect-based sentiment analysis takes this one step further by enabling businesses to extract aspect specific sentimental information. In this paper, we present a novel approach of incorporating the neighboring aspects related information into the sentiment classification of the target aspect using memory networks. We show that our method outperforms the state of the art by 1.6{\%} on average in two distinct domains: restaurant and laptop.", }
Multi-level Multiple Attentions for Contextual Multimodal Sentiment Analysis
Soujanya Poria, Erik Cambria, Devamanyu Hazarika, Navonil Majumder, Amir Zadeh, and Louis-Philippe Morency
ICDM, 2017 pdf code
@INPROCEEDINGS{8215597, author={S. Poria and E. Cambria and D. Hazarika and N. Majumder and A. Zadeh and L. Morency}, booktitle={2017 IEEE International Conference on Data Mining (ICDM)}, title={Multi-level Multiple Attentions for Contextual Multimodal Sentiment Analysis}, year={2017}, volume={}, number={}, pages={1033-1038}, keywords={data mining;feature extraction;image classification;image fusion;learning (artificial intelligence);sentiment analysis;utterances;attention-based networks;context learning;videos;multimodal sentiment analysis;contextual multimodal sentiment;multilevel multiple attentions;dynamic feature fusion;contextual information;recurrent model;Feature extraction;Videos;Context modeling;Sentiment analysis;Visualization;Social network services;Fuses}, doi={10.1109/ICDM.2017.134}, ISSN={2374-8486}, month={Nov},}
Context-Dependent Sentiment Analysis in User-Generated Videos
Soujanya Poria, Erik Cambria, Devamanyu Hazarika, Navonil Majumder, Amir Zadeh, and Louis-Philippe Morency
ACL, 2017 pdf code
@InProceedings{poria-EtAl:2017:Long, author = {Poria, Soujanya and Cambria, Erik and Hazarika, Devamanyu and Majumder, Navonil and Zadeh, Amir and Morency, Louis-Philippe}, title = {Context-Dependent Sentiment Analysis in User-Generated Videos}, booktitle = {Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)}, month = {July}, year = {2017}, address = {Vancouver, Canada}, publisher = {Association for Computational Linguistics}, pages = {873--883}, abstract = {Multimodal sentiment analysis is a developing area of research, which involves the identification of sentiments in videos. Current research considers utterances as independent entities, i.e., ignores the interdependencies and relations among the utterances of a video. In this paper, we propose a LSTM-based model that enables utterances to capture contextual information from their surroundings in the same video, thus aiding the classification process. Our method shows 5-10% performance improvement over the state of the art and high robustness to generalizability.}, url = {http://aclweb.org/anthology/P17-1081} }

Archive

Variational Fusion for Multimodal Sentiment Analysis
Navonil Majumder, Soujanya Poria, Gangeshwar Krishnamurthy, Niyati Chhaya, Rada Mihalcea, and Alexander Gelbukh
arxiv, 2019 pdf
@ARTICLE{vfusion, author={N. {Majumder} and S. {Poria} and G. {Krishnamurthy} and N. {Chhaya} and R. {Mihalcea} and A. {Gelbukh}}, title={{Variational Fusion for Multimodal Sentiment Analysis}}, year={2019}, volume={abs/1908.06008}, url={https://arxiv.org/abs/1908.06008} }