As video-sharing social-media platforms have increased in popularity, a ’creator economy’ has emerged in which platform users make online content to share with wide audiences, often for profit. As the creator economy has risen in popularity, so have concerns of racism and discrimination on social media. Black content creators across multiple platforms have identified challenges with racism and discrimination, perpetuated by platform users, companies that collaborate with creators for sponsored content, and the algorithms governing these platforms. In this work, we provide a qualitative study of the experiences of Black content creators on one video-sharing platform, TikTok. We conduct 12 semi-structured interviews with Black TikTok content creators to understand their experiences, identify the challenges they face, and understand their perceptions of the platform. We find that some common challenges include: content moderation, monetization, harassment and bullying from viewers, lack of transparency of recommendation and filtering algorithms, and the perception that content from Black creators is treated unfairly by those algorithms. We then suggest design interventions to mitigate the challenges, bolster positive aspects, and overall cultivate an inclusive algorithmic experience for Black creators on TikTok
@article{10.1145/3610169,author={Harris, Camille and Johnson, Amber Gayle and Palmer, Sadie and Yang, Diyi and Bruckman, Amy},title={"Honestly, I Think TikTok Has a Vendetta Against Black Creators": Understanding Black Content Creator Experiences on TikTok},year={2023},issue_date={October 2023},publisher={Association for Computing Machinery},address={New York, NY, USA},volume={7},url={https://doi.org/10.1145/3610169},doi={10.1145/3610169},journal={Proc. ACM Hum.-Comput. Interact.},month=oct,articleno={320},numpages={31},keywords={algorithmic inclusivity, racism, algorithmic exclusion, black online culture, TikTok, anti-blackness, social media}}
2022
FAccT
Exploring the Role of Grammar and Word Choice in Bias toward African American English (AAE) in Hate Speech Classification
Camille Harris, Matan Halevy, Ayanna Howard, and 2 more authors
In 2022 ACM Conference on Fairness, Accountability, and Transparency Oct 2022
@inproceedings{harris2022exploring,title={Exploring the Role of Grammar and Word Choice in Bias toward African American English (AAE) in Hate Speech Classification},author={{Harris}, Camille and {Halevy}, Matan and {Howard}, Ayanna and {Bruckman}, Amy and {Yang}, Diyi},journal={FAccT},booktitle={2022 ACM Conference on Fairness, Accountability, and Transparency},pages={789--798},year={2022},url={https://dl.acm.org/doi/abs/10.1145/3531146.3533144},}
@article{ziems2022value,title={VALUE: Understanding Dialect Disparity in NLU},author={{Ziems}, Caleb and {Chen}, Jiaao and {Harris}, Camille and {Anderson}, Jessica and {Yang}, Diyi},booktitle={In Proceedings of the 60th Annual Meeting of the Association for Computational Linguistics (ACL) 2022},year={2022},}
Configuration of user interface for intuitive selection of insight visualizations
Camille Harris, Zening Qu, Sana Lee, and 8 more authors
@misc{harris2022configuration,title={Configuration of user interface for intuitive selection of insight visualizations},author={{Harris}, Camille and {Qu}, Zening and {Lee}, Sana and {Rossi}, Ryan and {Du}, Fan and {Koh}, Eunyee and {Lee}, Tak Yeon and {Kim}, Sungchul and {Zhao}, Handong and {Shekhar}, Sumit and others},year={2022},month=aug,note={US Patent App. 17/161,770},}
2021
Insight-centric Visualization Recommendation
Camille Harris, Ryan A Rossi, Sana Malik, and 5 more authors
@article{harris2021insight,title={Insight-centric Visualization Recommendation},author={{Harris}, Camille and {Rossi}, Ryan A and {Malik}, Sana and {Hoffswell}, Jane and {Du}, Fan and {Lee}, Tak Yeon and {Koh}, Eunyee and {Zhao}, Handong},journal={arXiv preprint arXiv:2103.11297},year={2021},}
EAAMO
Mitigating Racial Biases in Toxic Language Detection with an Equity-Based Ensemble Framework
Matan Halevy, Camille Harris, Amy Bruckman, and 2 more authors
@incollection{halevy2021mitigating,title={Mitigating Racial Biases in Toxic Language Detection with an Equity-Based Ensemble Framework},author={{Halevy}, Matan and {Harris}, Camille and {Bruckman}, Amy and {Yang}, Diyi and {Howard}, Ayanna},booktitle={Equity and Access in Algorithms, Mechanisms, and Optimization},pages={1--11},year={2021},url={https://dl.acm.org/doi/abs/10.1145/3465416.3483299},}