if (!require("ggplot2")) install.packages("ggplot2") library(ggplot2) if (!require("reshape2")) install.packages("reshape2") library(reshape2) data <- read.csv("Word_Dynamics2.csv") keywords <- data$keyword data_without_keywords <- t(data[,-1]) normalize <- function(x) { return((x - min(x)) / (max(x) - min(x))) } normalized_data <- apply(data_without_keywords, 2, normalize) cor_matrix <- cor(normalized_data) colnames(cor_matrix) <- keywords rownames(cor_matrix) <- keywords hc <- hclust(dist(cor_matrix)) dendrogram <- as.dendrogram(hc) ordered_cor_matrix <- cor_matrix[order.dendrogram(dendrogram), order.dendrogram(dendrogram)] melted_ordered_cor_matrix <- melt(ordered_cor_matrix) p <- ggplot(data = melted_ordered_cor_matrix, aes(x=Var1, y=Var2, fill=value)) + geom_tile() + scale_fill_gradient2(low = "blue", high = "red", mid = "white", midpoint = 0) + theme_minimal() + theme(axis.text.x = element_text(angle = 90, hjust = 1, size = 8), axis.text.y = element_text(angle = 0, hjust = 1, size = 8)) + labs(fill = "Correlation", x = "Keyword", y = "Keyword") p ggsave("Correlation.png", plot = p, width = 12, height = 12, dpi = 300) install.packages("reshape2") if (!requireNamespace("BiocManager", quietly = TRUE)) install.packages("BiocManager") BiocManager::install("ComplexHeatmap") install.packages("circlize") library(reshape2) library(ComplexHeatmap) library(circlize) data <- read.csv("Word_Dynamics2.csv") data_long <- melt(data, id.vars = "keyword") data_long$value <- as.numeric(data_long$value) data_long <- ddply(data_long, .(keyword), transform, scale = scale(value)) data_wide <- dcast(data_long, keyword ~ variable, value.var = "scale") colnames(data_wide)[-1] <- gsub("^X", "", colnames(data_wide)[-1]) rownames(data_wide) <- data_wide$keyword data_matrix <- as.matrix(data_wide[,-1]) my_col_fun <- colorRamp2(c(-2, 0, 2), c("blue", "white", "red")) pdf("heatmap_output.pdf", width = 10, height = 12) ht <- Heatmap(data_matrix, name = "scale", col = my_col_fun, cluster_rows = TRUE, show_row_names = TRUE, row_names_side = "right", cluster_columns = FALSE, row_names_gp = gpar(fontsize = 10), row_dend_width = unit(2, "cm"), clustering_distance_rows = "euclidean", clustering_method_rows = "complete", row_names_max_width = unit(8, "cm") ) draw(ht, heatmap_legend_side = "bot", annotation_legend_side = "bot") dev.off() install.packages("ggplot2") install.packages("reshape2") install.packages("dendextend") install.packages("gplots") library(ggplot2) library(reshape2) library(plyr) library(dendextend) library(gplots) library(stringr) library(reshape2) data <- read.csv("Word_Dynamics2.csv") data_long <- melt(data, id.vars = "keyword") data_long$value <- as.numeric(data_long$value) data_long <- ddply(data_long, .(keyword), transform, scale = scale(value)) data_long$variable <- str_replace(data_long$variable, "^X", "") p <- ggplot(data_long, aes(x = variable, y = keyword, fill = scale)) + geom_tile() + scale_fill_gradient2(low = "blue", mid = "white", high = "red", midpoint = 0) + theme_minimal() + xlab("Year") + ylab("Keyword") + ggtitle("Row Scaled Heatmap of Keyword Dynamics") + theme(axis.text.x = element_text(angle = 90, vjust = 0.5, hjust = 1), plot.background = element_rect(fill = "transparent")) p ggsave("heatmap.png", plot = p, width = 9, height = 12, dpi = 300) install.packages("ggplot2") library(ggplot2) data <- read.csv("Word_Dynamics2.csv") keywords <- data$keyword data_without_keywords <- data[,-1] normalize <- function(x) { return((x - min(x)) / (max(x) - min(x))) } normalized_data <- as.data.frame(lapply(data_without_keywords, normalize)) transposed_normalized_data <- t(normalized_data) cor_matrix <- cor(transposed_normalized_data, method = "pearson") rownames(cor_matrix) <- colnames(cor_matrix) <- keywords melted_cor_matrix <- melt(cor_matrix) ggplot(data = melted_cor_matrix, aes(x=Var1, y=Var2, fill=value)) + geom_tile() + scale_fill_gradient2(low = "blue", high = "red", mid = "white", midpoint = 0) + theme_minimal() + theme(axis.text.x = element_text(angle = 45, hjust = 1), axis.text.y = element_text(angle = 45, hjust = 1)) + labs(fill = "Correlation", x = "Keyword", y = "Keyword") install.packages("ggplot2") install.packages("drc") install.packages("minpack.lm") install.packages("minpack.lm") library(ggplot2) library(dplyr) library(broom) library(minpack.lm) getwd() data <- read.csv("Logistic.csv") logistic_fit <- nlsLM(Documents ~ SSlogis(Year, Asym, xmid, scal), data = data) poly_fit2 <- lm(Documents ~ poly(Year, 2), data = data) year_seq <- seq(min(data$Year), max(data$Year) + 10) predictions <- expand.grid(Year = year_seq) predictions <- predictions %>% mutate( Logistic = predict(logistic_fit, newdata = predictions), Poly2 = predict(poly_fit2, newdata = predictions, interval = "predict")[, 1] ) pred_intervals <- predict(poly_fit2, newdata = predictions, interval = "predict") predictions$lwr <- pred_intervals[, "lwr"] predictions$upr <- pred_intervals[, "upr"] p<-ggplot() + geom_point(data = data, aes(x = Year, y = Documents)) + geom_line(data = predictions, aes(x = Year, y = Logistic), color = "red") + geom_ribbon(data = predictions, aes(x = Year, ymin = lwr, ymax = upr), fill = "red", alpha = 0.2) + geom_line(data = predictions, aes(x = Year, y = Poly2), color = "blue") + theme_minimal() + labs(title = "各模型拟合及预测区间") p ggsave("logistic.png", plot = p, width = 6, height = 6, dpi = 300)