Skip to contents

Returns a string of text that can be rendered as LaTeX.

Usage

build_skill_table(skill_data)

Arguments

skill_data

A data frame containing skill data.

Value

A LaTeX string.

See also

Examples

# Define some data ----------------------------------------------------------
library(dplyr, warn.conflicts = FALSE)
skill <- c("R", "SQL", "Excel", "Pandas", "Feature Engineering")
alias <- c("Coding", "Coding", "Data Analysis", "Data Analysis", "ML")
skill_data <- dplyr::bind_cols(alias, skill)
#> New names:
#>  `` -> `...1`
#>  `` -> `...2`
colnames(skill_data) <- c("alias", "skill")
print(skill_data)
#> # A tibble: 5 × 2
#>   alias         skill              
#>   <chr>         <chr>              
#> 1 Coding        R                  
#> 2 Coding        SQL                
#> 3 Data Analysis Excel              
#> 4 Data Analysis Pandas             
#> 5 ML            Feature Engineering


# Render --------------------------------------------------------------------
skill_table <- build_skill_table(skill_data)
paste(skill_table)
#> [1] "\\begin{tabular}{@{}p{0.475\\linewidth}p{0.475\\linewidth}@{}}\n\\textbf{Coding:} R, SQL&\\textbf{Data Analysis:} Excel, Pandas\\\\\\textbf{ML:} Feature Engineering&\\end{tabular}\n\n"

# With a larger dataset -----------------------------------------------------
data("example_skill_data", package = "autocv")
skill_table <- build_skill_table(example_skill_data)
paste(skill_table)
#> [1] "\\begin{tabular}{@{}p{0.475\\linewidth}p{0.475\\linewidth}@{}}\n\\textbf{Programming:} R, SQL, Python, MATLAB, Bash&\\textbf{Machine Learning:} Beautiful Soup, NumPy, SciPy, Pandas, ArviZ, dplyr (R), Web Scraping, Data Preprocessing, SymPy, Exploratory Data Analysis, TensorFlow, Scikit-Learn, Keras, Feature Engineering, Hyperparameter Tuning, Model Optimization, Model Tuning, ML Pipelines, Supervised Learning, Unsupervised Learning, Dimensionality Reduction, Deep Learning, Reinforcement Learning, Neural Networks, Customer Segmentation, Decision Trees, Clustering, Manifold Learning, Classification, Model Selection\\\\\\textbf{NLP:} SpaCy, Hugging Face, LLMs, Topic Modeling, Transformer Models, Sentiment Analysis&\\textbf{Data Visualization:} Matplotlib, ggplot (R), Seaborn (Python), Tableau, Power BI\\\\\\textbf{Data Management:} Excel, NoSQL, Snowflake, PostgreSQL, MySQL, Cassandra, MongoDB, Data Modeling, Data Pipelines, Data Validation, Database Design, Data Warehouses, ETL, Dashboards, Business Intelligence, Microsoft Office Suite&\\textbf{Python Development:} MyPy, PyTest, Sphinx, Pydantic\\\\\\textbf{DevOps:} Git, GitHub Actions, Continuous Testing, CI/CD Pipelines, Object-Oriented Programming (OOP)&\\textbf{Big Data:} Databricks, Hadoop, Spark, Azure, Google Cloud Platform (GCP), Vertex AI\\\\\\textbf{MLOps:} Kubernetes, Docker, Model Deployment&\\textbf{Document Authoring:} LaTeX, Jupyter Notebooks, Google Colab, R Markdown, VSCode\\\\\\textbf{Web Development:} CSS, HTML, Javascript&\\textbf{Statistics:} Bayesian Inference, Hypothesis Testing, Statistical Learning, Probability Theory, Statistical Modeling, Convex Optimization, Time Series Analysis, Regression Analysis, Graph Theory, Causal Inference, Matrix Calculus, AB Testing, Applied Mathematics, Statistical Analysis, Inferential Statistics, Machine Learning, Mixture Models, Hierarchical Models, Multivariate Analysis\\\\\\textbf{Soft Skills:} Electroencephalography (EEG), Functional Magnetic Resonance Imaging (fMRI), Scientific Writing, Experimental Design, Critical Thinking, Documentation Writing&\\end{tabular}\n\n"