diff --git a/_publications/2023-05-15-axonn-pipelining.ipdps.md b/_publications/2023-05-15-axonn-pipelining.ipdps.md
new file mode 100644
index 0000000..906a3cd
--- /dev/null
+++ b/_publications/2023-05-15-axonn-pipelining.ipdps.md
@@ -0,0 +1,26 @@
+---
+title: "AxoNN: An asynchronous, message-driven parallel framework for extreme-scale deep learning"
+collection: publications
+category: conferences
+permalink:
+excerpt: ""
+author_list: 'S. Singh, A. Bhatele'
+date: 2022-05-30
+venue: "2022 IEEE International Parallel and Distributed Processing Symposium (IPDPS)"
+paperurl: 'https://ieeexplore.ieee.org/abstract/document/9820664'
+pdf: /publications/paper_pdfs/axonn_pp.pdf
+citation: |
+ @INPROCEEDINGS{9820664,
+ author={Singh, Siddharth and Bhatele, Abhinav},
+ booktitle={2022 IEEE International Parallel and Distributed Processing Symposium (IPDPS)},
+ title={AxoNN: An asynchronous, message-driven parallel framework for extreme-scale deep learning},
+ year={2022},
+ volume={},
+ number={},
+ pages={606-616},
+ keywords={Training;Deep learning;Schedules;Neural networks;Memory management;Graphics processing units;Clustering algorithms;parallel deep learning;asynchrony;message driven scheduling;memory optimizations},
+ doi={10.1109/IPDPS53621.2022.00065}}
+
+---
+
+
diff --git a/_publications/2023-05-15-axonn-pruning-ipdps.md b/_publications/2023-05-15-axonn-pruning-ipdps.md
new file mode 100644
index 0000000..1e12ac9
--- /dev/null
+++ b/_publications/2023-05-15-axonn-pruning-ipdps.md
@@ -0,0 +1,25 @@
+---
+title: "Exploiting sparsity in pruned neural networks to optimize large model training"
+collection: publications
+category: conferences
+permalink:
+excerpt: ""
+author_list: 'S. Singh, A. Bhatele'
+date: 2023-05-15
+venue: "2023 IEEE International Parallel and Distributed Processing Symposium (IPDPS)"
+paperurl: 'https://ieeexplore.ieee.org/abstract/document/10177389'
+pdf: /publications/paper_pdfs/axonn_pruning.pdf
+citation: |
+ @INPROCEEDINGS{10177389,
+ author={Singh, Siddharth and Bhatele, Abhinav},
+ booktitle={2023 IEEE International Parallel and Distributed Processing Symposium (IPDPS)},
+ title={Exploiting Sparsity in Pruned Neural Networks to Optimize Large Model Training},
+ year={2023},
+ volume={},
+ number={},
+ pages={245-255},
+ keywords={Deep learning;Training;Artificial satellites;Computational modeling;Neural networks;Memory management;Parallel processing;lottery ticket hypothesis;sparse computations;GPUs;parallel deep learning;memory optimizations},
+ doi={10.1109/IPDPS54959.2023.00033}}
+---
+
+
diff --git a/_publications/2023-06-21-deepspeed-ted.md b/_publications/2023-06-21-deepspeed-ted-ics.md
similarity index 100%
rename from _publications/2023-06-21-deepspeed-ted.md
rename to _publications/2023-06-21-deepspeed-ted-ics.md
diff --git a/_publications/paper_pdfs/axonn_pp.pdf b/_publications/paper_pdfs/axonn_pp.pdf
new file mode 100755
index 0000000..b1dc4ae
Binary files /dev/null and b/_publications/paper_pdfs/axonn_pp.pdf differ
diff --git a/_publications/paper_pdfs/axonn_pruning.pdf b/_publications/paper_pdfs/axonn_pruning.pdf
new file mode 100755
index 0000000..645c0e7
Binary files /dev/null and b/_publications/paper_pdfs/axonn_pruning.pdf differ