publications
2025
- arXivWatching the AI Watchdogs: A Fairness and Robustness Analysis of AI Safety Moderation ClassifiersAkshit Achara, and Anshuman ChhabraarXiv preprint arXiv:2501.13302, 2025
AI Safety Moderation (ASM) classifiers are designed to moderate content on social media platforms and to serve as guardrails that prevent Large Language Models (LLMs) from being fine-tuned on unsafe inputs. Owing to their potential for disparate impact, it is crucial to ensure that these classifiers: (1) do not unfairly classify content belonging to users from minority groups as unsafe compared to those from majority groups and (2) that their behavior remains robust and consistent across similar inputs. In this work, we thus examine the fairness and robustness of four widely-used, closed-source ASM classifiers: OpenAI Moderation API, Perspective API, Google Cloud Natural Language (GCNL) API, and Clarifai API. We assess fairness using metrics such as demographic parity and conditional statistical parity, comparing their performance against ASM models and a fair-only baseline. Additionally, we analyze robustness by testing the classifiers’ sensitivity to small and natural input perturbations. Our findings reveal potential fairness and robustness gaps, highlighting the need to mitigate these issues in future versions of these models.
@article{achara2025watching, title = {Watching the AI Watchdogs: A Fairness and Robustness Analysis of AI Safety Moderation Classifiers}, author = {Achara, Akshit and Chhabra, Anshuman}, journal = {arXiv preprint arXiv:2501.13302}, year = {2025} }
2024
- arXivEfficient Biomedical Entity Linking: Clinical Text Standardization with Low-Resource TechniquesAkshit Achara, Sanand Sasidharan, and Gagan NIn Proceedings of the 23rd Workshop on Biomedical Natural Language Processing, Aug 2024
Clinical text is rich in information, with mentions of treatment, medication and anatomy among many other clinical terms. Multiple terms can refer to the same core concepts which can be referred as a clinical entity. Ontologies like the Unified Medical Language System (UMLS) are developed and maintained to store millions of clinical entities including the definitions, relations and other corresponding information. These ontologies are used for standardization of clinical text by normalizing varying surface forms of a clinical term through Biomedical entity linking. With the introduction of transformer-based language models, there has been significant progress in Biomedical entity linking. In this work, we focus on learning through synonym pairs associated with the entities. As compared to the existing approaches, our approach significantly reduces the training data and resource consumption. Moreover, we propose a suite of context-based and context-less reranking techniques for performing the entity disambiguation. Overall, we achieve similar performance to the state-of-the-art zero-shot and distant supervised entity linking techniques on the Medmentions dataset, the largest annotated dataset on UMLS, without any domain-based training. Finally, we show that retrieval performance alone might not be sufficient as an evaluation metric and introduce an article level quantitative and qualitative analysis to reveal further insights on the performance of entity linking methods.
@inproceedings{achara-etal-2024-efficient, title = {Efficient Biomedical Entity Linking: Clinical Text Standardization with Low-Resource Techniques}, author = {Achara, Akshit and Sasidharan, Sanand and N, Gagan}, editor = {Demner-Fushman, Dina and Ananiadou, Sophia and Miwa, Makoto and Roberts, Kirk and Tsujii, Junichi}, booktitle = {Proceedings of the 23rd Workshop on Biomedical Natural Language Processing}, month = aug, year = {2024}, address = {Bangkok, Thailand}, publisher = {Association for Computational Linguistics}, url = {https://aclanthology.org/2024.bionlp-1.40}, pages = {493--505} }
- Revealing the underlying patterns: Investigating dataset similarity, performance, and generalizationAkshit Achara, and Ram Krishna PandeyNeurocomputing, Aug 2024
Supervised deep learning models require significant amount of labeled data to achieve an acceptable performance on a specific task. However, when tested on unseen data, the models may not perform well. Therefore, the models need to be trained with additional and varying labeled data to improve the generalization. In this work, our goal is to understand the models, their performance and generalization. We establish image-image, dataset-dataset, and image-dataset distances to gain insights into the model’s behavior. Our proposed distance metric when combined with model performance can help in selecting an appropriate model/architecture from a pool of candidate architectures. We have shown that the generalization of these models can be improved by only adding a small number of unseen images (say 1, 3 or 7) into the training set. Our proposed approach reduces training and annotation costs while providing an estimate of model performance on unseen data in dynamic environments.
@article{ACHARA2024127205, title = {Revealing the underlying patterns: Investigating dataset similarity, performance, and generalization}, journal = {Neurocomputing}, volume = {573}, pages = {127205}, year = {2024}, issn = {0925-2312}, doi = {https://doi.org/10.1016/j.neucom.2023.127205}, url = {https://www.sciencedirect.com/science/article/pii/S0925231223013280}, author = {Achara, Akshit and Pandey, Ram Krishna}, keywords = {Segmentation, Generalization, Explainability, Similarity, Computer vision} }
- TrueDeep: A systematic approach of crack detection with less dataRamkrishna Pandey, and Akshit AcharaExpert Systems with Applications, Aug 2024
Supervised and semi-supervised semantic segmentation algorithms require significant amount of annotated data to achieve a good performance. In many situations, the data is either not available or the annotation is expensive. The objective of this work is to show that by incorporating domain knowledge along with deep learning architectures, we can achieve similar performance with less data. We have used publicly available crack segmentation datasets and shown that selecting the input images using knowledge can significantly boost the performance of deep-learning based architectures. Our proposed approaches have many fold advantages such as low annotation and training cost, and less energy consumption. We have measured the performance of our algorithm quantitatively in terms of mean intersection over union (mIoU) and F-score. Our representative model, developed with 23% of the overall data; surpasses the baseline model (>1.5% F-score gain) on the test data while matching the benchmark performance and, with a suitable augmentation strategy, outperforms the baseline model (11.5% average F-score gain) and the benchmark (>8.75% average F-score gain) over the selected blind datasets.
@article{PANDEY2024122785, title = {TrueDeep: A systematic approach of crack detection with less data}, journal = {Expert Systems with Applications}, volume = {242}, pages = {122785}, year = {2024}, issn = {0957-4174}, doi = {https://doi.org/10.1016/j.eswa.2023.122785}, url = {https://www.sciencedirect.com/science/article/pii/S0957417423032876}, author = {Pandey, Ramkrishna and Achara, Akshit}, keywords = {Cracks, Semantic segmentation, Deep learning, Computer vision, Image processing} }
2022
- CoreDeep: Improving Crack Detection Algorithms Using Width StochasticityRam Krishna Pandey, and Akshit AcharaarXiv preprint arXiv:2209.04648, Aug 2022
Automatically detecting or segmenting cracks in images can help in reducing the cost of maintenance or operations. Detecting, measuring and quantifying cracks for distress analysis in challenging background scenarios is a difficult task as there is no clear boundary that separates cracks from the background. Some of the other perceptually noted challenges with the images are variations in color, intensity, depth, blur, motion-blur, orientation, region of interest (ROI) for the defect, scale, illumination, backgrounds, etc. These variations occur across (crack inter-class) and within images (crack intra-class variabilities). In this work, we have proposed a stochastic width (SW) approach to reduce the effect of these variations. Our proposed approach improves detectability and significantly reduces false positives and negatives. We have measured the performance of our algorithm objectively in terms of mean intersection over union (mIoU) and subjectively in terms of perceptual quality.
@article{pandey2022coredeep, title = {CoreDeep: Improving Crack Detection Algorithms Using Width Stochasticity}, author = {Pandey, Ram Krishna and Achara, Akshit}, journal = {arXiv preprint arXiv:2209.04648}, year = {2022} }