Spring Pulse Flow Chinook salmon Survival Study
2022-2023 Season (PROVISIONAL DATA)
Telemetry Study Template for this study can be found here
1. Project Status
Study is complete, all tags are no longer active as of 2023-08-31. All times in Pacific Standard Time.
Study began on 2023-04-18 09:05:00, see tagging details below:
Release
|
First_release_time
|
Last_release_time
|
Number_fish_released
|
Release_location
|
Release_rkm
|
Mean_length
|
Mean_weight
|
Before Pulse
|
2023-04-18 09:05:00
|
2023-04-19 10:05:00
|
251
|
RBDD_Rel
|
461.579
|
84.1
|
7.5
|
First Pulse
|
2023-04-24 10:10:00
|
2023-04-25 09:45:00
|
250
|
RBDD_Rel
|
461.579
|
84.8
|
6.9
|
Second Pulse - Week 1
|
2023-05-02 09:07:00
|
2023-05-03 09:07:00
|
250
|
RBDD_Rel
|
461.579
|
86.2
|
7.5
|
Second Pulse - Week 2
|
2023-05-09 09:05:00
|
2023-05-10 08:25:00
|
250
|
RBDD_Rel
|
461.579
|
86.6
|
7.2
|
Second Pulse - Week 3
|
2023-05-16 08:26:00
|
2023-05-17 08:50:00
|
250
|
RBDD_Rel
|
461.579
|
88.5
|
7.5
|
2. Real-time Fish Detections
library(leaflet)
library(maps)
library(htmlwidgets)
library(leaflet.extras)
library(dplyr)
library(dbplyr)
library(DBI)
library(odbc)
library(data.table)
# Create connection with cloud database
con <- dbConnect(odbc(),
Driver = "SQL Server",
Server = "calfishtrack-server.database.windows.net",
Database = "realtime_detections",
UID = "realtime_user",
PWD = "Pass@123",
Port = 1433)
try(setwd(paste(file.path(Sys.getenv("USERPROFILE"),"Desktop",fsep="\\"), "\\Real-time data massaging\\products", sep = "")))
## THIS CODE CHUNK WILL NOT WORK IF USING ONLY ERDDAP DATA, REQUIRES ACCESS TO LOCAL FILES
if (nrow(detects_study[is.na(detects_study$DateTime_PST)==F,]) == 0){
cat("No detections yet")
# Use dbplyr to load realtime_locs and qryHexCodes sql table
gen_locs <- tbl(con, "realtime_locs") %>% collect()
# gen_locs <- read.csv("realtime_locs.csv", stringsAsFactors = F) %>% filter(is.na(stop))
leaflet(data = gen_locs[is.na(gen_locs$stop),]) %>%
# setView(-72.14600, 43.82977, zoom = 8) %>%
addProviderTiles("Esri.WorldStreetMap", group = "Map") %>%
addProviderTiles("Esri.WorldImagery", group = "Satellite") %>%
addProviderTiles("Esri.WorldShadedRelief", group = "Relief") %>%
# Marker data are from the sites data frame. We need the ~ symbols
# to indicate the columns of the data frame.
addMarkers(~longitude, ~latitude, label = ~general_location, group = "Receiver Sites", popup = ~location) %>%
# addAwesomeMarkers(~lon_dd, ~lat_dd, label = ~locality, group = "Sites", icon=icons) %>%
addScaleBar(position = "bottomleft") %>%
addLayersControl(
baseGroups = c("Street Map", "Satellite", "Relief"),
overlayGroups = c("Receiver Sites"),
options = layersControlOptions(collapsed = FALSE)) %>%
addSearchFeatures(targetGroups = c("Receiver Sites"))
} else {
# Use dbplyr to load realtime_locs and qryHexCodes sql table
gen_locs <- tbl(con, "realtime_locs") %>% collect()
# gen_locs <- read.csv("realtime_locs.csv", stringsAsFactors = F)
endtime <- min(as.Date(format(Sys.time(), "%Y-%m-%d")),
max(as.Date(detects_study$release_time)+(as.numeric(detects_study$tag_life)*1.5)))
beacon_by_day <- fread("beacon_by_day.csv", stringsAsFactors = F) %>%
mutate(day = as.Date(day)) %>%
# Subset to only look at data for the correct beacon for that day
filter(TagCode == beacon) %>%
# Only keep beacon by day for days since fish were released
filter(day >= as.Date(min(study_tagcodes$release_time)) & day <= endtime) %>%
dplyr::left_join(., gen_locs[,c("location", "general_location","rkm")], by = "location")
arrivals_per_day <- detects_study %>%
group_by(general_location, TagCode) %>%
summarise(DateTime_PST = min(DateTime_PST, na.rm = T)) %>%
arrange(TagCode, general_location) %>%
mutate(day = as.Date(DateTime_PST, "%Y-%m-%d", tz = "Etc/GMT+8")) %>%
group_by(day, general_location) %>%
summarise(New_arrivals = length(TagCode)) %>%
na.omit() %>%
mutate(day = as.Date(day)) %>%
dplyr::left_join(unique(beacon_by_day[,c("general_location", "day", "rkm")]), .,
by = c("general_location", "day")) %>%
arrange(general_location, day) %>%
mutate(day = as.factor(day)) %>%
filter(general_location != "Bench_test") %>% # Remove bench test
filter(!(is.na(general_location))) # Remove NA locations
## Remove sites that were not operation the whole time
#### FOR THE SEASONAL SURVIVAL PAGE, KEEP ALL SITES SINCE PEOPLE WANT TO SEE DETECTIONS OF LATER FISH AT NEWLY
#### DEPLOYED SPOTS
gen_locs_days_in_oper <- arrivals_per_day %>%
group_by(general_location) %>%
summarise(days_in_oper = length(day))
#gen_locs_days_in_oper <- gen_locs_days_in_oper[gen_locs_days_in_oper$days_in_oper ==
# max(gen_locs_days_in_oper$days_in_oper),]
arrivals_per_day_in_oper <- arrivals_per_day %>%
filter(general_location %in% gen_locs_days_in_oper$general_location)
fish_per_site <- arrivals_per_day_in_oper %>%
group_by(general_location) %>%
summarise(fish_count = sum(New_arrivals, na.rm=T))
gen_locs_mean_coords <- gen_locs %>%
filter(is.na(stop) & general_location %in% fish_per_site$general_location) %>%
group_by(general_location) %>%
summarise(latitude = mean(latitude), # estimate mean lat and lons for each genloc
longitude = mean(longitude))
fish_per_site <- merge(fish_per_site, gen_locs_mean_coords)
if(!is.na(release_stats$Release_lat[1])){
leaflet(data = fish_per_site) %>%
addProviderTiles("Esri.WorldStreetMap", group = "Street Map") %>%
addProviderTiles("Esri.WorldImagery", group = "Satellite") %>%
addProviderTiles("Esri.WorldShadedRelief", group = "Relief") %>%
# Marker data are from the sites data frame. We need the ~ symbols
# to indicate the columns of the data frame.
addPulseMarkers(lng = fish_per_site$longitude, lat = fish_per_site$latitude, label = ~fish_count,
labelOptions = labelOptions(noHide = T, textsize = "15px"), group = "Receiver Sites",
popup = ~general_location, icon = makePulseIcon(heartbeat = 1.3)) %>%
addCircleMarkers(data = release_stats, ~Release_lon, ~Release_lat, label = ~Number_fish_released, stroke = F, color = "blue", fillOpacity = 1,
group = "Release Sites", popup = ~Release_location, labelOptions = labelOptions(noHide = T, textsize = "15px")) %>%
addScaleBar(position = "bottomleft") %>%
addLegend("bottomright", labels = c("Receivers", "Release locations"), colors = c("red","blue")) %>%
addLayersControl(baseGroups = c("Street Map", "Satellite", "Relief"), options = layersControlOptions(collapsed = FALSE))
} else {
leaflet(data = fish_per_site) %>%
addProviderTiles("Esri.NatGeoWorldMap", group = "Street Map") %>%
addProviderTiles("Esri.WorldImagery", group = "Satellite") %>%
addProviderTiles("Esri.WorldShadedRelief", group = "Relief") %>%
# Marker data are from the sites data frame. We need the ~ symbols
# to indicate the columns of the data frame.
addPulseMarkers(lng = fish_per_site$longitude, lat = fish_per_site$latitude, label = ~fish_count,
labelOptions = labelOptions(noHide = T, textsize = "15px"), group = "Receiver Sites",
popup = ~general_location, icon = makePulseIcon(heartbeat = 1.3)) %>%
addScaleBar(position = "bottomleft") %>%
addLayersControl(baseGroups = c("Street Map", "Satellite", "Relief"),
options = layersControlOptions(collapsed = FALSE))
}
}
try(setwd(paste(file.path(Sys.getenv("USERPROFILE"),"Desktop",fsep="\\"), "\\Real-time data massaging\\products", sep = "")))
if (nrow(detects_study[is.na(detects_study$DateTime_PST)==F,]) > 0){
detects_study <- detects_study[order(detects_study$TagCode, detects_study$DateTime_PST),]
## Now estimate the time in hours between the previous and next detection, for each detection.
detects_study$prev_genloc <- shift(detects_study$general_location, fill = NA, type = "lag")
#detects_study$prev_genloc <- shift(detects_study$General_Location, fill = NA, type = "lag")
## Now make NA the time diff values when it's between 2 different tagcodes or genlocs
detects_study[which(detects_study$TagCode != shift(detects_study$TagCode, fill = NA, type = "lag")), "prev_genloc"] <- NA
detects_study[which(detects_study$general_location != detects_study$prev_genloc), "prev_genloc"] <- NA
detects_study$mov_score <- 0
detects_study[is.na(detects_study$prev_genloc), "mov_score"] <- 1
detects_study$mov_counter <- cumsum(detects_study$mov_score)
detects_summary <- aggregate(list(first_detect = detects_study$DateTime_PST), by = list(TagCode = detects_study$TagCode, length = detects_study$length, release_time = detects_study$release_time, mov_counter = detects_study$mov_counter ,general_location = detects_study$general_location, river_km = detects_study$river_km, release_rkm = detects_study$release_rkm), min)
detects_summary <- detects_summary[is.na(detects_summary$first_detect) == F,]
releases <- aggregate(list(first_detect = detects_summary$release_time), by = list(TagCode = detects_summary$TagCode, length = detects_summary$length, release_time = detects_summary$release_time, release_rkm = detects_summary$release_rkm), min)
releases$river_km <- releases$release_rkm
releases$mov_counter <- NA
releases$general_location <- NA
detects_summary <- rbindlist(list(detects_summary, releases), use.names = T)
detects_summary <- detects_summary[order(detects_summary$TagCode, detects_summary$first_detect),]
starttime <- as.Date(min(detects_study$release_time), "Etc/GMT+8")
## Endtime should be either now, or end of predicted tag life, whichever comes first
endtime <- min(as.Date(format(Sys.time(), "%Y-%m-%d"))+1, max(as.Date(detects_study$release_time)+(as.numeric(detects_study$tag_life))))
#par(mar=c(6, 5, 2, 5) + 0.1)
plot_ly(detects_summary, width = 900, height = 600, dynamicTicks = TRUE) %>%
add_lines(x = ~first_detect, y = ~river_km, color = ~TagCode) %>%
add_markers(x = ~first_detect, y = ~river_km, color = ~TagCode, showlegend = F) %>%
layout(showlegend = T,
xaxis = list(title = "<b> Date <b>", mirror=T,ticks="outside",showline=T, range=c(starttime,endtime)),
yaxis = list(title = "<b> Kilometers upstream of the Golden Gate <b>", mirror=T,ticks="outside",showline=T, range=c(max(detects_study$Rel_rkm)+10, min(gen_locs[is.na(gen_locs$stop),"rkm"])-10)),
legend = list(title=list(text='<b> Tag Code </b>')),
margin=list(l = 50,r = 100,b = 50,t = 50)
)
}else{
plot(1:2, type = "n", xlab = "",xaxt = "n", yaxt = "n", ylab = "Kilometers upstream of the Golden Gate")
text(1.5,1.5, labels = "NO DETECTIONS YET", cex = 2)
}
_______________________________________________________________________________________________________
library(tidyr)
try(setwd(paste(file.path(Sys.getenv("USERPROFILE"),"Desktop",fsep="\\"), "\\Real-time data massaging\\products", sep = "")))
detects_3 <- detects_study %>% filter(general_location == "Blw_Salt_RT")
if(nrow(detects_3) == 0){
plot(1:2, type = "n", xlab = "",xaxt = "n", yaxt = "n", ylab = "Number of fish arrivals per day")
text(1.5,1.5, labels = "NO DETECTIONS YET", cex = 2)
} else {
detects_3 <- detects_3 %>%
dplyr::left_join(., detects_3 %>%
group_by(TagCode) %>%
summarise(first_detect = min(DateTime_PST))) %>%
mutate(Day = as.Date(as.Date(first_detect, "Etc/GMT+8")))
starttime <- as.Date(min(detects_3$release_time), "Etc/GMT+8")
# Endtime should be either now, or end of predicted tag life, whichever comes first
endtime <- min(as.Date(format(Sys.time(), "%Y-%m-%d")),
max(as.Date(detects_study$release_time)+(as.numeric(detects_study$tag_life))))
daterange <- data.frame(Day = seq.Date(from = starttime, to = endtime, by = "day"))
rels <- unique(study_tagcodes$Release)
rel_num <- length(rels)
rels_no_detects <- as.character(rels[!(rels %in% unique(detects_3$Release))])
tagcount1 <- detects_3 %>%
group_by(Day, Release) %>%
summarise(unique_tags = length(unique(TagCode))) %>%
spread(Release, unique_tags)
daterange1 <- merge(daterange, tagcount1, all.x=T)
daterange1[is.na(daterange1)] <- 0
if(length(rels_no_detects)>0){
for(i in rels_no_detects){
daterange1 <- cbind(daterange1, x=NA)
names(daterange1)[names(daterange1) == "x"] <- paste(i)
}
}
# Download flow data
flow_day <- readNWISuv(siteNumbers = "11377100", parameterCd="00060", startDate = starttime,
endDate = endtime+1) %>%
mutate(Day = as.Date(format(dateTime, "%Y-%m-%d"))) %>%
group_by(Day) %>%
summarise(parameter_value = mean(X_00060_00000))
## reorder columns in alphabetical order so its coloring in barplots is consistent
daterange2 <- daterange1[,order(colnames(daterange1))] %>%
dplyr::left_join(., flow_day, by = "Day")
rownames(daterange2) <- daterange2$Day
daterange2$Date <- daterange2$Day
daterange2$Day <- NULL
daterange2_flow <- daterange2 %>% select(Date, parameter_value)
daterange3 <- melt(daterange2[,!(names(daterange2) %in% c("parameter_value"))],
id.vars = "Date", variable.name = ".")
daterange3$. <- factor(daterange3$., levels = sort(unique(daterange3$.), decreasing = T))
par(mar=c(6, 5, 2, 5) + 0.1)
ay <- list(
overlaying = "y",
nticks = 5,
color = "#947FFF",
side = "right",
title = "Flow (cfs) at Bend Bridge",
automargin = TRUE
)
plot_ly(daterange3, width = 900, height = 600, dynamicTicks = TRUE) %>%
add_bars(x = ~Date, y = ~value, color = ~.) %>%
add_annotations(text="Release (click on legend items to isolate)", xref="paper", yref="paper",
x=0.01, xanchor="left",
y=1.056, yanchor="top", # Same y as legend below
legendtitle=TRUE, showarrow=FALSE ) %>%
add_lines(x=~daterange2_flow$Date,
y=~daterange2_flow$parameter_value,
line = list(color = alpha("#947FFF", alpha = 0.5)), yaxis="y2", showlegend=FALSE,
inherit=FALSE) %>%
layout(yaxis2 = ay,showlegend = T,
barmode = "stack",
xaxis = list(title = "Date", mirror=T,ticks="outside",showline=T),
yaxis = list(title = "Number of fish arrivals per day", mirror=T,ticks="outside",showline=T),
legend = list(orientation = "h",x = 0.34, y = 1.066),
margin=list(l = 50,r = 100,b = 50,t = 50))
}
_______________________________________________________________________________________________________
library(tidyr)
try(setwd(paste(file.path(Sys.getenv("USERPROFILE"),"Desktop",fsep="\\"), "\\Real-time data massaging\\products", sep = "")))
detects_4 <- detects_study %>% filter(general_location == "MeridianBr")
if(nrow(detects_4) == 0){
plot(1:2, type = "n", xlab = "",xaxt = "n", yaxt = "n", ylab = "Number of fish arrivals per day")
text(1.5,1.5, labels = "NO DETECTIONS YET", cex = 2)
} else {
detects_4 <- detects_4 %>%
dplyr::left_join(., detects_4 %>%
group_by(TagCode) %>%
summarise(first_detect = min(DateTime_PST))) %>%
mutate(Day = as.Date(as.Date(first_detect, "Etc/GMT+8")))
starttime <- as.Date(min(detects_4$release_time), "Etc/GMT+8")
# Endtime should be either now, or end of predicted tag life, whichever comes first
endtime <- min(as.Date(format(Sys.time(), "%Y-%m-%d")),
max(as.Date(detects_study$release_time)+(as.numeric(detects_study$tag_life))))
daterange <- data.frame(Day = seq.Date(from = starttime, to = endtime, by = "day"))
rels <- unique(study_tagcodes$Release)
rel_num <- length(rels)
rels_no_detects <- as.character(rels[!(rels %in% unique(detects_4$Release))])
tagcount1 <- detects_4 %>%
group_by(Day, Release) %>%
summarise(unique_tags = length(unique(TagCode))) %>%
spread(Release, unique_tags)
daterange1 <- merge(daterange, tagcount1, all.x=T)
daterange1[is.na(daterange1)] <- 0
if(length(rels_no_detects)>0){
for(i in rels_no_detects){
daterange1 <- cbind(daterange1, x=NA)
names(daterange1)[names(daterange1) == "x"] <- paste(i)
}
}
# Download flow data
flow_day <- readNWISuv(siteNumbers = "11390500", parameterCd="00060", startDate = starttime,
endDate = endtime+1) %>%
mutate(Day = as.Date(format(dateTime, "%Y-%m-%d"))) %>%
group_by(Day) %>%
summarise(parameter_value = mean(X_00060_00000))
## reorder columns in alphabetical order so its coloring in barplots is consistent
daterange2 <- daterange1[,order(colnames(daterange1))] %>%
dplyr::left_join(., flow_day, by = "Day")
rownames(daterange2) <- daterange2$Day
daterange2$Date <- daterange2$Day
daterange2$Day <- NULL
daterange2_flow <- daterange2 %>% select(Date, parameter_value)
daterange3 <- melt(daterange2[,!(names(daterange2) %in% c("parameter_value"))],
id.vars = "Date", variable.name = ".")
daterange3$. <- factor(daterange3$., levels = sort(unique(daterange3$.), decreasing = T))
par(mar=c(6, 5, 2, 5) + 0.1)
ay <- list(
overlaying = "y",
nticks = 5,
color = "#947FFF",
side = "right",
title = "Flow (cfs) at Wilkins Slough",
automargin = TRUE
)
plot_ly(daterange3, width = 900, height = 600, dynamicTicks = TRUE) %>%
add_bars(x = ~Date, y = ~value, color = ~.) %>%
add_annotations(text="Release (click on legend items to isolate)", xref="paper", yref="paper",
x=0.01, xanchor="left",
y=1.056, yanchor="top", # Same y as legend below
legendtitle=TRUE, showarrow=FALSE ) %>%
add_lines(x=~daterange2_flow$Date,
y=~daterange2_flow$parameter_value,
line = list(color = alpha("#947FFF", alpha = 0.5)), yaxis="y2", showlegend=FALSE,
inherit=FALSE) %>%
layout(yaxis2 = ay,showlegend = T,
barmode = "stack",
xaxis = list(title = "Date", mirror=T,ticks="outside",showline=T),
yaxis = list(title = "Number of fish arrivals per day", mirror=T,ticks="outside",showline=T),
legend = list(orientation = "h",x = 0.34, y = 1.066),
margin=list(l = 50,r = 100,b = 50,t = 50))
}
_______________________________________________________________________________________________________
library(tidyr)
try(setwd(paste(file.path(Sys.getenv("USERPROFILE"),"Desktop",fsep="\\"), "\\Real-time data massaging\\products", sep = "")))
detects_5 <- detects_study %>% filter(general_location == "TowerBridge")
if(nrow(detects_5) == 0){
plot(1:2, type = "n", xlab = "",xaxt = "n", yaxt = "n", ylab = "Number of fish arrivals per day")
text(1.5,1.5, labels = "NO DETECTIONS YET", cex = 2)
} else {
detects_5 <- detects_5 %>%
dplyr::left_join(., detects_5 %>%
group_by(TagCode) %>%
summarise(first_detect = min(DateTime_PST))) %>%
mutate(Day = as.Date(as.Date(first_detect, "Etc/GMT+8")))
starttime <- as.Date(min(detects_5$release_time), "Etc/GMT+8")
# Endtime should be either now, or end of predicted tag life, whichever comes first
endtime <- min(as.Date(format(Sys.time(), "%Y-%m-%d")),
max(as.Date(detects_study$release_time)+(as.numeric(detects_study$tag_life))))
daterange <- data.frame(Day = seq.Date(from = starttime, to = endtime, by = "day"))
rels <- unique(study_tagcodes$Release)
rel_num <- length(rels)
rels_no_detects <- as.character(rels[!(rels %in% unique(detects_5$Release))])
tagcount1 <- detects_5 %>%
group_by(Day, Release) %>%
summarise(unique_tags = length(unique(TagCode))) %>%
spread(Release, unique_tags)
daterange1 <- merge(daterange, tagcount1, all.x=T)
daterange1[is.na(daterange1)] <- 0
if(length(rels_no_detects)>0){
for(i in rels_no_detects){
daterange1 <- cbind(daterange1, x=NA)
names(daterange1)[names(daterange1) == "x"] <- paste(i)
}
}
# Download flow data
flow_day <- readNWISuv(siteNumbers = "11425500", parameterCd="00060", startDate = starttime,
endDate = endtime+1) %>%
mutate(Day = as.Date(format(dateTime, "%Y-%m-%d"))) %>%
group_by(Day) %>%
summarise(parameter_value = mean(X_00060_00000))
## reorder columns in alphabetical order so its coloring in barplots is consistent
daterange2 <- daterange1[,order(colnames(daterange1))] %>%
dplyr::left_join(., flow_day, by = "Day")
rownames(daterange2) <- daterange2$Day
daterange2$Date <- daterange2$Day
daterange2$Day <- NULL
daterange2_flow <- daterange2 %>% select(Date, parameter_value)
daterange3 <- melt(daterange2[,!(names(daterange2) %in% c("parameter_value"))],
id.vars = "Date", variable.name = ".")
daterange3$. <- factor(daterange3$., levels = sort(unique(daterange3$.), decreasing = T))
par(mar=c(6, 5, 2, 5) + 0.1)
ay <- list(
overlaying = "y",
nticks = 5,
color = "#947FFF",
side = "right",
title = "Flow (cfs) at Verona",
automargin = TRUE
)
plot_ly(daterange3, width = 900, height = 600, dynamicTicks = TRUE) %>%
add_bars(x = ~Date, y = ~value, color = ~.) %>%
add_annotations(text="Release (click on legend items to isolate)", xref="paper", yref="paper",
x=0.01, xanchor="left",
y=1.056, yanchor="top", # Same y as legend below
legendtitle=TRUE, showarrow=FALSE ) %>%
add_lines(x=~daterange2_flow$Date,
y=~daterange2_flow$parameter_value,
line = list(color = alpha("#947FFF", alpha = 0.5)), yaxis="y2", showlegend=FALSE,
inherit=FALSE) %>%
layout(yaxis2 = ay,showlegend = T,
barmode = "stack",
xaxis = list(title = "Date", mirror=T,ticks="outside",showline=T),
yaxis = list(title = "Number of fish arrivals per day", mirror=T,ticks="outside",showline=T),
legend = list(orientation = "h",x = 0.34, y = 1.066),
margin=list(l = 50,r = 100,b = 50,t = 50))
}
_______________________________________________________________________________________________________
library(tidyr)
try(setwd(paste(file.path(Sys.getenv("USERPROFILE"),"Desktop",fsep="\\"), "\\Real-time data massaging\\products", sep = "")))
detects_6 <- detects_study %>% filter(general_location == "Benicia_west" | general_location == "Benicia_east")
if(nrow(detects_6) == 0){
plot(1:2, type = "n", xlab = "",xaxt = "n", yaxt = "n", ylab = "Number of fish arrivals per day")
text(1.5,1.5, labels = "NO DETECTIONS YET", cex = 2)
} else {
detects_6 <- detects_6 %>%
dplyr::left_join(., detects_6 %>%
group_by(TagCode) %>%
summarise(first_detect = min(DateTime_PST))) %>%
mutate(Day = as.Date(as.Date(first_detect, "Etc/GMT+8")))
starttime <- as.Date(min(detects_6$release_time), "Etc/GMT+8")
# Endtime should be either now, or end of predicted tag life, whichever comes first
endtime <- min(as.Date(format(Sys.time(), "%Y-%m-%d")),
max(as.Date(detects_study$release_time)+(as.numeric(detects_study$tag_life))))
daterange <- data.frame(Day = seq.Date(from = starttime, to = endtime, by = "day"))
rels <- unique(study_tagcodes$Release)
rel_num <- length(rels)
rels_no_detects <- as.character(rels[!(rels %in% unique(detects_6$Release))])
tagcount1 <- detects_6 %>%
group_by(Day, Release) %>%
summarise(unique_tags = length(unique(TagCode))) %>%
spread(Release, unique_tags)
daterange1 <- merge(daterange, tagcount1, all.x=T)
daterange1[is.na(daterange1)] <- 0
if(length(rels_no_detects)>0){
for(i in rels_no_detects){
daterange1 <- cbind(daterange1, x=NA)
names(daterange1)[names(daterange1) == "x"] <- paste(i)
}
}
## reorder columns in alphabetical order so its coloring in barplots is consistent
daterange1 <- daterange1[,order(colnames(daterange1))]
daterange2 <- daterange1
rownames(daterange2) <- daterange2$Day
daterange2$Day <- NULL
par(mar=c(6, 5, 2, 5) + 0.1)
daterange2$Date <- as.Date(row.names(daterange2))
daterange3 <- melt(daterange2, id.vars = "Date", variable.name = ".", )
daterange3$. <- factor(daterange3$., levels = sort(unique(daterange3$.), decreasing = T))
plot_ly(daterange3, width = 900, height = 600, dynamicTicks = TRUE) %>%
add_bars(x = ~Date, y = ~value, color = ~.) %>%
add_annotations( text="Release (click on legend items to isolate)", xref="paper", yref="paper",
x=0.01, xanchor="left",
y=1.056, yanchor="top", # Same y as legend below
legendtitle=TRUE, showarrow=FALSE ) %>%
layout(showlegend = T,
barmode = "stack",
xaxis = list(title = "Date", mirror=T,ticks="outside",showline=T),
yaxis = list(title = "Number of fish arrivals per day", mirror=T,ticks="outside",showline=T),
legend = list(orientation = "h",x = 0.34, y = 1.066),
margin=list(l = 50,r = 100,b = 50,t = 50))
}
3. Survival and Routing Probability
try(setwd(paste(file.path(Sys.getenv("USERPROFILE"),"Desktop",fsep="\\"), "\\Real-time data massaging\\products", sep = "")))
detects_tower <- detects_study %>% filter(general_location == "TowerBridge")
if(nrow(detects_tower) == 0){
WR.surv <- data.frame("Release"=NA, "Survival (%)"="NO DETECTIONS YET", "SE"=NA, "95% lower C.I."=NA,
"95% upper C.I."=NA, "Detection efficiency (%)"=NA)
colnames(WR.surv) <- c("Release", "Survival (%)", "SE", "95% lower C.I.",
"95% upper C.I.", "Detection efficiency (%)")
print(kable(WR.surv, row.names = F, "html", caption = "3.1 Minimum survival to Tower Bridge (using CJS
survival model). If Yolo Bypass Weirs are overtopping during migration, fish may have taken
that route, and therefore this is a minimum estimate of survival") %>%
kable_styling(bootstrap_options = c("striped", "hover", "condensed", "responsive", "bordered"),
full_width = F, position = "left"))
} else {
study_count <- nrow(study_tagcodes)
# Only do survival to Sac for now
surv <- detects_study %>% filter(river_km > 168 & river_km < 175)
# calculate mean and SD travel time
travel <- aggregate(list(first_detect = surv$DateTime_PST), by = list(Release = surv$Release, TagCode = surv$TagCode, RelDT = surv$RelDT), min)
travel$days <- as.numeric(difftime(travel$first_detect, travel$RelDT, units = "days"))
travel_final <- aggregate(list(mean_travel_time = travel$days), by = list(Release = travel$Release), mean)
travel_final <- merge(travel_final, aggregate(list(sd_travel_time = travel$days), by = list(Release = travel$Release), sd))
travel_final <- merge(travel_final, aggregate(list(n = travel$days), by = list(Release = travel$Release), length))
travel_final <- rbind(travel_final, data.frame(Release = "ALL", mean_travel_time = mean(travel$days), sd_travel_time = sd(travel$days),n = nrow(travel)))
# Create inp for survival estimation
inp <- as.data.frame(reshape2::dcast(surv, TagCode ~ river_km, fun.aggregate = length))
# Sort columns by river km in descending order
gen_loc_sites <- ncol(inp)-1 # Count number of genlocs
if(gen_loc_sites < 2){
WR.surv <- data.frame("Release"=NA, "Survival (%)"="NOT ENOUGH DETECTIONS", "SE"=NA, "95% lower C.I."=NA,
"95% upper C.I."=NA, "Detection efficiency (%)"=NA)
colnames(WR.surv) <- c("Release", "Survival (%)", "SE", "95% lower C.I.", "95% upper C.I.",
"Detection efficiency (%)")
print(kable(WR.surv, row.names = F, "html", caption = "3.1 Minimum survival to Tower Bridge (using CJS
survival model). If Yolo Bypass Weirs are overtopping during migration, fish may
have taken that route, and therefore this is a minimum estimate of survival") %>%
kable_styling(bootstrap_options = c("striped", "hover", "condensed", "responsive", "bordered"),
full_width = F,position = "left"))
} else {
inp <- inp[,c(1,order(names(inp[,2:(gen_loc_sites+1)]), decreasing = T)+1)] %>%
dplyr::left_join(study_tagcodes, ., by = "TagCode")
inp2 <- inp[,(ncol(inp)-gen_loc_sites+1):ncol(inp)] %>%
replace(is.na(.), 0) %>%
replace(., . > 0, 1)
inp <- cbind(inp, inp2)
groups <- as.character(sort(unique(inp$Release)))
surv$Release <- factor(surv$Release, levels = groups)
inp[,groups] <- 0
for (i in groups) {
inp[as.character(inp$Release) == i, i] <- 1
}
inp$inp_final <- paste("1",apply(inp2, 1, paste, collapse=""),sep="")
if(length(groups) > 1){
# make sure factor levels have a release that has detections first. if first release in factor order
# has zero detectins, model goes haywire
inp.df <- data.frame(ch = as.character(inp$inp_final), freq = 1,
rel = factor(inp$Release, levels = names(sort(table(surv$Release),decreasing = T))),
stringsAsFactors = F)
WR.process <- process.data(inp.df, model="CJS", begin.time=1, groups = "rel")
WR.ddl <- make.design.data(WR.process)
WR.mark.all <- mark(WR.process, WR.ddl,
model.parameters=list(Phi=list(formula=~time),p=list(formula=~time)),
silent = T, output = F)
WR.mark.rel <- mark(WR.process, WR.ddl,
model.parameters=list(Phi=list(formula=~time*rel),p=list(formula=~time)),
silent = T, output = F)
WR.surv <- round(WR.mark.all$results$real[1,c("estimate", "se", "lcl", "ucl")] * 100,1)
WR.surv <- rbind(WR.surv, round(WR.mark.rel$results$real[seq(from=1,to=length(groups)*2,by = 2),
c("estimate", "se", "lcl", "ucl")] * 100,1))
WR.surv$Detection_efficiency <- NA
WR.surv[1,"Detection_efficiency"] <- round(WR.mark.all$results$real[gen_loc_sites+1,"estimate"] * 100,1)
WR.surv <- cbind(c("ALL", names(sort(table(surv$Release),decreasing = T))), WR.surv)
}
if(length(intersect(colnames(inp),groups)) < 2){
inp$inp_final <- paste("1",apply(inp2, 1, paste, collapse=""), " ", 1,sep = "")
write.table(inp$inp_final,"WRinp.inp",row.names = F, col.names = F, quote = F)
WRinp <- convert.inp("WRinp.inp")
WR.process <- process.data(WRinp, model="CJS", begin.time=1)
WR.ddl <- make.design.data(WR.process)
WR.mark.all <- mark(WR.process, WR.ddl,
model.parameters=list(Phi=list(formula=~time),p=list(formula=~time)),
silent = T, output = F)
WR.mark.rel <- mark(WR.process, WR.ddl,
model.parameters=list(Phi=list(formula=~time),p=list(formula=~time)),
silent = T, output = F)
WR.surv <- round(WR.mark.all$results$real[1,c("estimate", "se", "lcl", "ucl")] * 100,1)
WR.surv <- rbind(WR.surv, round(WR.mark.rel$results$real[seq(from=1,to=length(groups)*2,by = 2),
c("estimate", "se", "lcl", "ucl")] * 100,1))
WR.surv$Detection_efficiency <- NA
WR.surv[1,"Detection_efficiency"] <- round(WR.mark.all$results$real[gen_loc_sites+1,"estimate"] * 100,1)
WR.surv <- cbind(c("ALL", groups), WR.surv)
}
colnames(WR.surv)[1] <- "Release"
WR.surv <- merge(WR.surv, travel_final, by = "Release", all.x = T)
WR.surv$mean_travel_time <- round(WR.surv$mean_travel_time,1)
WR.surv$sd_travel_time <- round(WR.surv$sd_travel_time,1)
colnames(WR.surv) <- c("Release", "Survival (%)", "SE", "95% lower C.I.",
"95% upper C.I.", "Detection efficiency (%)", "Mean time to Tower (days)", "SD of time to Tower (days)","Count")
WR.surv <- WR.surv %>% arrange(., Release)
print(kable(WR.surv, row.names = F, "html", caption = "3.1 Minimum survival to Tower Bridge (using CJS
survival model), and travel time. If Yolo Bypass Weirs are overtopping during migration, fish may have taken
that route, and therefore this is a minimum estimate of survival") %>%
kable_styling(bootstrap_options = c("striped", "hover", "condensed", "responsive", "bordered"),
full_width = F, position = "left"))
}
}
3.1 Minimum survival to Tower Bridge (using CJS survival model), and travel time. If Yolo Bypass Weirs are overtopping during migration, fish may have taken that route, and therefore this is a minimum estimate of survival
Release
|
Survival (%)
|
SE
|
95% lower C.I.
|
95% upper C.I.
|
Detection efficiency (%)
|
Mean time to Tower (days)
|
SD of time to Tower (days)
|
Count
|
ALL
|
41.3
|
1.4
|
38.6
|
44.1
|
88.2
|
6.2
|
4.5
|
510
|
Before Pulse
|
53.8
|
3.2
|
47.5
|
59.9
|
NA
|
8.5
|
2.8
|
134
|
First Pulse
|
25.1
|
2.8
|
20.0
|
30.9
|
NA
|
5.5
|
6.0
|
62
|
Second Pulse - Week 1
|
34.5
|
3.0
|
28.8
|
40.7
|
NA
|
7.0
|
6.1
|
85
|
Second Pulse - Week 2
|
48.6
|
3.2
|
42.3
|
55.0
|
NA
|
4.8
|
3.8
|
119
|
Second Pulse - Week 3
|
44.8
|
3.2
|
38.6
|
51.1
|
NA
|
4.6
|
2.7
|
110
|
try(setwd(paste(file.path(Sys.getenv("USERPROFILE"),"Desktop",fsep="\\"), "\\Real-time data massaging\\products", sep = "")))
try(Delta <- read.csv("Delta_surv.csv", stringsAsFactors = F))
if(nrow(detects_study[is.na(detects_study$DateTime_PST) == F,]) == 0){
WR.surv1 <- data.frame("Measure"=NA, "Estimate"="NO DETECTIONS YET", "SE"=NA, "95% lower C.I."=NA, "95% upper C.I."=NA)
colnames(WR.surv1) <- c("Measure", "Estimate", "SE", "95% lower C.I.", "95% upper C.I.")
print(kable(WR.surv1, row.names = F, "html", caption = "3.2 Minimum through-Delta survival: City of Sacramento to Benicia (using CJS survival model)") %>%
kable_styling(bootstrap_options = c("striped", "hover", "condensed", "responsive", "bordered"), full_width = F, position = "left"))
} else {
test4 <- detects_study[detects_study$general_location %in% c("TowerBridge", "I80-50_Br", "Benicia_west", "Benicia_east"),]
if(nrow(test4[test4$general_location =="Benicia_west",]) == 0 | nrow(test4[test4$general_location =="Benicia_east",]) == 0){
WR.surv1 <- data.frame("Measure"=NA, "Estimate"="NOT ENOUGH DETECTIONS", "SE"=NA, "95% lower C.I."=NA, "95% upper C.I."=NA)
colnames(WR.surv1) <- c("Measure", "Estimate", "SE", "95% lower C.I.", "95% upper C.I.")
print(kable(WR.surv1, row.names = F, "html", caption = "3.2 Minimum through-Delta survival: City of Sacramento to Benicia (using CJS survival model)") %>%
kable_styling(bootstrap_options = c("striped", "hover", "condensed", "responsive", "bordered"), full_width = F, position = "left"))
} else {
# calculate mean and SD travel time
sac <- test4[test4$general_location %in% c("TowerBridge", "I80-50_Br"),]
ben <- test4[test4$general_location %in% c("Benicia_west", "Benicia_east"),]
travel_sac <- aggregate(list(first_detect_sac = sac$DateTime_PST), by = list(Release = sac$Release, TagCode = sac$TagCode), min)
travel_ben <- aggregate(list(first_detect_ben = ben$DateTime_PST), by = list(Release = ben$Release, TagCode = ben$TagCode), min)
travel <- merge(travel_sac, travel_ben, by = c("Release","TagCode"))
travel$days <- as.numeric(difftime(travel$first_detect_ben, travel$first_detect_sac, units = "days"))
travel_final <- aggregate(list(mean_travel_time = travel$days), by = list(Release = travel$Release), mean)
travel_final <- merge(travel_final, aggregate(list(sd_travel_time = travel$days), by = list(Release = travel$Release), sd))
travel_final <- merge(travel_final, aggregate(list(n = travel$days), by = list(Release = travel$Release), length))
travel_final <- rbind(travel_final, data.frame(Release = "ALL", mean_travel_time = mean(travel$days), sd_travel_time = sd(travel$days), n = nrow(travel)))
inp <- as.data.frame(reshape2::dcast(test4, TagCode ~ general_location, fun.aggregate = length))
# add together detections at Tower and I80 to ensure good detection entering Delta
if("I80-50_Br" %in% colnames(inp) & "TowerBridge" %in% colnames(inp)){
inp$`I80-50_Br` <- inp$`I80-50_Br` + inp$TowerBridge
} else if("TowerBridge" %in% colnames(inp)){
inp$`I80-50_Br` <- inp$TowerBridge
}
# Sort columns by river km in descending order, this also removes TowerBridge, no longer needed
inp <- inp[,c("TagCode","I80-50_Br", "Benicia_east", "Benicia_west")]
# Count number of genlocs
gen_loc_sites <- ncol(inp)-1
inp <- inp[,c(1,order(names(inp[,2:(gen_loc_sites+1)]), decreasing = T)+1)]
inp <- merge(study_tagcodes, inp, by = "TagCode", all.x = T)
inp2 <- inp[,(ncol(inp)-gen_loc_sites+1):ncol(inp)]
inp2[is.na(inp2)] <- 0
inp2[inp2 > 0] <- 1
inp <- cbind(inp, inp2)
groups <- as.character(sort(unique(inp$Release)))
groups_w_detects <- names(table(detects_study[which(detects_study$river_km < 53),"Release"]))
inp[,groups] <- 0
for(i in groups){
inp[as.character(inp$Release) == i, i] <- 1
}
inp$inp_final <- paste("1",apply(inp2, 1, paste, collapse=""),sep="")
if(length(groups) > 1){
# make sure factor levels have a release that has detections first. if first release in factor order has zero #detectins, model goes haywire
inp.df <- data.frame(ch = as.character(inp$inp_final), freq = 1, rel = inp$Release, stringsAsFactors = F)
WR.process <- process.data(inp.df, model="CJS", begin.time=1)
WR.ddl <- make.design.data(WR.process)
WR.mark.all <- mark(WR.process, WR.ddl, model.parameters=list(Phi=list(formula=~time),p=list(formula=~time)),
silent = T, output = F)
inp.df <- inp.df[inp.df$rel %in% groups_w_detects,]
inp.df$rel <- factor(inp.df$rel, levels = groups_w_detects)
if(length(groups_w_detects) > 1){
WR.process <- process.data(inp.df, model="CJS", begin.time=1, groups = "rel")
WR.ddl <- make.design.data(WR.process)
WR.mark.rel <- mark(WR.process, WR.ddl, model.parameters=list(Phi=list(formula=~time*rel),p=list(formula=~time)),
silent = T, output = F)
} else {
WR.process <- process.data(inp.df, model="CJS", begin.time=1)
WR.ddl <- make.design.data(WR.process)
WR.mark.rel <- mark(WR.process, WR.ddl, model.parameters=list(Phi=list(formula=~time),p=list(formula=~time)),
silent = T, output = F)
}
WR.surv <- cbind(Release = "ALL",round(WR.mark.all$results$real[2,c("estimate", "se", "lcl", "ucl")] * 100,1))
WR.surv.rel <- cbind(Release = groups_w_detects,
round(WR.mark.rel$results$real[seq(from=2,to=length(groups_w_detects)*3,by = 3),
c("estimate", "se", "lcl", "ucl")] * 100,1))
WR.surv.rel <- merge(WR.surv.rel, data.frame(Release = groups), all.y = T)
WR.surv.rel[is.na(WR.surv.rel$estimate),"estimate"] <- 0
WR.surv <- rbind(WR.surv, WR.surv.rel)
} else {
inp.df <- data.frame(ch = as.character(inp$inp_final), freq = 1, stringsAsFactors = F)
WR.process <- process.data(inp.df, model="CJS", begin.time=1)
WR.ddl <- make.design.data(WR.process)
WR.mark.all <- mark(WR.process, WR.ddl, model.parameters=list(Phi=list(formula=~time),p=list(formula=~time)),
silent = T, output = F)
WR.surv <- cbind(Release = c("ALL", groups),round(WR.mark.all$results$real[2,c("estimate", "se", "lcl", "ucl")] * 100,1))
}
WR.surv1 <- WR.surv
colnames(WR.surv1)[1] <- "Release"
WR.surv1 <- merge(WR.surv1, travel_final, by = "Release", all.x = T)
WR.surv1$mean_travel_time <- round(WR.surv1$mean_travel_time,1)
WR.surv1$sd_travel_time <- round(WR.surv1$sd_travel_time,1)
colnames(WR.surv1) <- c("Release", "Survival (%)", "SE", "95% lower C.I.",
"95% upper C.I.", "Mean Delta passage (days)", "SD of Delta Passage (days)","Count")
#colnames(WR.surv1) <- c("Release Group", "Survival (%)", "SE", "95% lower C.I.", "95% upper C.I.")
print(kable(WR.surv1, row.names = F, "html", caption = "3.2 Minimum through-Delta survival, and travel time: City of Sacramento to Benicia (using CJS survival model)") %>%
kable_styling(bootstrap_options = c("striped", "hover", "condensed", "responsive", "bordered"), full_width = F, position = "left"))
if(exists("Delta")==T & is.numeric(WR.surv1[1,2])){
reltimes <- aggregate(list(RelDT = study_tagcodes$release_time), by = list(Release = study_tagcodes$Release), FUN = mean)
reltimes <- rbind(reltimes, data.frame(Release = "ALL", RelDT = mean(study_tagcodes$release_time)))
# Assign whether the results are tentative or final
quality <- "tentative"
if(endtime < as.Date(format(Sys.time(), "%Y-%m-%d"))){
quality <- "final"}
WR.surv <- merge(WR.surv, reltimes, by = "Release", all.x = T)
WR.surv$RelDT <- as.POSIXct(WR.surv$RelDT, origin = "1970-01-01")
Delta$RelDT <- as.POSIXct(Delta$RelDT)
# remove old benicia record for this studyID
Delta <- Delta[!Delta$StudyID %in% unique(detects_study$Study_ID),]
Delta <- rbind(Delta, data.frame(WR.surv, StudyID = unique(detects_study$Study_ID), data_quality = quality))
write.csv(Delta, "Delta_surv.csv", row.names = F, quote = F)
}
}
}
3.2 Minimum through-Delta survival, and travel time: City of Sacramento to Benicia (using CJS survival model)
Release
|
Survival (%)
|
SE
|
95% lower C.I.
|
95% upper C.I.
|
Mean Delta passage (days)
|
SD of Delta Passage (days)
|
Count
|
ALL
|
69.1
|
2.0
|
64.9
|
72.9
|
3.4
|
1.3
|
352
|
Before Pulse
|
74.2
|
3.8
|
66.1
|
80.9
|
3.5
|
1.2
|
100
|
First Pulse
|
67.8
|
5.9
|
55.4
|
78.2
|
3.5
|
2.6
|
42
|
Second Pulse - Week 1
|
75.3
|
4.7
|
65.0
|
83.3
|
3.9
|
1.1
|
64
|
Second Pulse - Week 2
|
76.2
|
3.9
|
67.7
|
83.0
|
3.1
|
0.7
|
91
|
Second Pulse - Week 3
|
52.0
|
4.6
|
42.9
|
60.9
|
2.8
|
0.9
|
55
|
try(setwd(paste(file.path(Sys.getenv("USERPROFILE"),"Desktop",fsep="\\"), "\\Real-time data massaging\\products", sep = "")))
try(benicia <- read.csv("benicia_surv.csv", stringsAsFactors = F))
detects_benicia <- detects_study[detects_study$general_location %in% c("Benicia_west", "Benicia_east"),]
endtime <- min(as.Date(format(Sys.time(), "%Y-%m-%d")), max(as.Date(detects_study$release_time)+(as.numeric(detects_study$tag_life))))
if(nrow(detects_benicia) == 0){
if(as.numeric(difftime(Sys.time(), min(detects_study$RelDT), units = "days"))>30){
WR.surv <- data.frame("Release"="ALL", "estimate"=0, "se"=NA, "lcl"=NA, "ucl"=NA, "Detection_efficiency"=NA)
} else {
WR.surv <- data.frame("Release"=NA, "estimate"="NO DETECTIONS YET", "se"=NA, "lcl"=NA, "ucl"=NA, "Detection_efficiency"=NA)
}
WR.surv1 <- WR.surv
colnames(WR.surv1) <- c("Release Group", "Survival (%)", "SE", "95% lower C.I.", "95% upper C.I.", "Detection efficiency (%)")
print(kable(WR.surv1, row.names = F, "html", caption = "3.3 Minimum survival to Benicia Bridge East Span (using CJS survival model)") %>%
kable_styling(bootstrap_options = c("striped", "hover", "condensed", "responsive", "bordered"), full_width = F, position = "left"))
} else if(length(table(detects_benicia$general_location)) == 1){
if(as.numeric(difftime(Sys.time(), min(detects_study$RelDT), units = "days"))>30){
WR.surv <- data.frame("Release"="ALL", "estimate"=round(length(unique(detects_benicia$TagCode))/length(unique(detects_study$TagCode))*100,1),
"se"=NA, "lcl"=NA, "ucl"=NA, "Detection_efficiency"=NA)
} else {
WR.surv <- data.frame("Release" = NA, "estimate" = "NOT ENOUGH DETECTIONS", "se" = NA, "lcl" = NA, "ucl" = NA, "Detection_efficiency" = NA)
}
WR.surv1 <- WR.surv
colnames(WR.surv1) <- c("Release Group", "Survival (%)", "SE", "95% lower C.I.", "95% upper C.I.", "Detection efficiency (%)")
print(kable(WR.surv1, row.names = F, "html", caption = "3.3 Minimum survival to Benicia Bridge East Span (using CJS survival model)") %>%
kable_styling(bootstrap_options = c("striped", "hover", "condensed", "responsive", "bordered"), full_width = F, position = "left"))
} else {
# Only do survival to Benicia here
test3 <- detects_study[which(detects_study$river_km < 53),]
# calculate mean and SD travel time
travel <- aggregate(list(first_detect = test3$DateTime_PST), by = list(Release = test3$Release, TagCode = test3$TagCode, RelDT = test3$RelDT), min)
travel$days <- as.numeric(difftime(travel$first_detect, travel$RelDT, units = "days"))
travel_final <- aggregate(list(mean_travel_time = travel$days), by = list(Release = travel$Release), mean)
travel_final <- merge(travel_final, aggregate(list(sd_travel_time = travel$days), by = list(Release = travel$Release), sd))
travel_final <- merge(travel_final, aggregate(list(n = travel$days), by = list(Release = travel$Release), length))
travel_final <- rbind(travel_final, data.frame(Release = "ALL", mean_travel_time = mean(travel$days), sd_travel_time = sd(travel$days), n = nrow(travel)))
# Create inp for survival estimation
inp <- as.data.frame(reshape2::dcast(test3, TagCode ~ river_km, fun.aggregate = length))
# Sort columns by river km in descending order
# Count number of genlocs
gen_loc_sites <- ncol(inp)-1
inp <- inp[,c(1,order(names(inp[,2:(gen_loc_sites+1)]), decreasing = T)+1)]
inp <- merge(study_tagcodes, inp, by = "TagCode", all.x = T)
inp2 <- inp[,(ncol(inp)-gen_loc_sites+1):ncol(inp)]
inp2[is.na(inp2)] <- 0
inp2[inp2 > 0] <- 1
inp <- cbind(inp, inp2)
groups <- as.character(sort(unique(inp$Release)))
groups_w_detects <- names(table(test3$Release))
inp[,groups] <- 0
for(i in groups){
inp[as.character(inp$Release) == i, i] <- 1
}
inp$inp_final <- paste("1",apply(inp2, 1, paste, collapse=""),sep="")
if(length(groups) > 1){
# make sure factor levels have a release that has detections first. if first release in factor order has zero #detectins, model goes haywire
inp.df <- data.frame(ch = as.character(inp$inp_final), freq = 1, rel = inp$Release, stringsAsFactors = F)
WR.process <- process.data(inp.df, model="CJS", begin.time=1)
WR.ddl <- make.design.data(WR.process)
WR.mark.all <- mark(WR.process, WR.ddl, model.parameters=list(Phi=list(formula=~time),p=list(formula=~time)), silent = T, output = F)
inp.df <- inp.df[inp.df$rel %in% groups_w_detects,]
inp.df$rel <- factor(inp.df$rel, levels = groups_w_detects)
if(length(groups_w_detects) > 1){
WR.process <- process.data(inp.df, model="CJS", begin.time=1, groups = "rel")
WR.ddl <- make.design.data(WR.process)
WR.mark.rel <- mark(WR.process, WR.ddl, model.parameters=list(Phi=list(formula=~time*rel),p=list(formula=~time)), silent = T, output = F)
} else {
WR.process <- process.data(inp.df, model="CJS", begin.time=1)
WR.ddl <- make.design.data(WR.process)
WR.mark.rel <- mark(WR.process, WR.ddl, model.parameters=list(Phi=list(formula=~time),p=list(formula=~time)), silent = T, output = F)
}
WR.surv <- cbind(Release = "ALL",round(WR.mark.all$results$real[1,c("estimate", "se", "lcl", "ucl")] * 100,1))
WR.surv.rel <- cbind(Release = groups_w_detects, round(WR.mark.rel$results$real[seq(from=1,to=length(groups_w_detects)*2,by = 2),
c("estimate", "se", "lcl", "ucl")] * 100,1))
WR.surv.rel <- merge(WR.surv.rel, data.frame(Release = groups), all.y = T)
WR.surv.rel[is.na(WR.surv.rel$estimate),"estimate"] <- 0
WR.surv <- rbind(WR.surv, WR.surv.rel)
} else {
inp.df <- data.frame(ch = as.character(inp$inp_final), freq = 1, stringsAsFactors = F)
WR.process <- process.data(inp.df, model="CJS", begin.time=1)
WR.ddl <- make.design.data(WR.process)
WR.mark.all <- mark(WR.process, WR.ddl, model.parameters=list(Phi=list(formula=~time),p=list(formula=~time)), silent = T, output = F)
WR.surv <- cbind(Release = c("ALL", groups),round(WR.mark.all$results$real[1,c("estimate", "se", "lcl", "ucl")] * 100,1))
}
WR.surv$Detection_efficiency <- NA
WR.surv[1,"Detection_efficiency"] <- round(WR.mark.all$results$real[gen_loc_sites+1,"estimate"] * 100,1)
WR.surv1 <- WR.surv
colnames(WR.surv1)[1] <- "Release"
WR.surv1 <- merge(WR.surv1, travel_final, by = "Release", all.x = T)
WR.surv1$mean_travel_time <- round(WR.surv1$mean_travel_time,1)
WR.surv1$sd_travel_time <- round(WR.surv1$sd_travel_time,1)
colnames(WR.surv1) <- c("Release", "Survival (%)", "SE", "95% lower C.I.",
"95% upper C.I.", "Detection efficiency (%)", "Mean time to Benicia (days)", "SD of time to Benicia (days)", "Count")
#colnames(WR.surv1) <- c("Release Group", "Survival (%)", "SE", "95% lower C.I.", "95% upper C.I.", "Detection efficiency (%)")
print(kable(WR.surv1, row.names = F, "html", caption = "3.3 Minimum survival to Benicia Bridge East Span (using CJS survival model), and travel time") %>%
kable_styling(bootstrap_options = c("striped", "hover", "condensed", "responsive", "bordered"), full_width = F, position = "left"))
}
3.3 Minimum survival to Benicia Bridge East Span (using CJS survival model), and travel time
Release
|
Survival (%)
|
SE
|
95% lower C.I.
|
95% upper C.I.
|
Detection efficiency (%)
|
Mean time to Benicia (days)
|
SD of time to Benicia (days)
|
Count
|
ALL
|
29.4
|
1.3
|
26.9
|
31.9
|
99.1
|
9.2
|
3.7
|
367
|
Before Pulse
|
40.6
|
3.1
|
34.7
|
46.8
|
NA
|
11.8
|
1.8
|
102
|
First Pulse
|
17.6
|
2.4
|
13.4
|
22.8
|
NA
|
8.1
|
3.4
|
44
|
Second Pulse - Week 1
|
26.4
|
2.8
|
21.3
|
32.2
|
NA
|
10.3
|
4.8
|
66
|
Second Pulse - Week 2
|
37.2
|
3.1
|
31.4
|
43.4
|
NA
|
7.4
|
2.6
|
93
|
Second Pulse - Week 3
|
24.8
|
2.7
|
19.9
|
30.6
|
NA
|
7.2
|
3.5
|
62
|
if(exists("benicia")==T & is.numeric(WR.surv1[1,2])){
# Find mean release time per release group, and ALL
reltimes <- aggregate(list(RelDT = study_tagcodes$release_time), by = list(Release = study_tagcodes$Release), FUN = mean)
reltimes <- rbind(reltimes, data.frame(Release = "ALL", RelDT = mean(study_tagcodes$release_time)))
# Assign whether the results are tentative or final
quality <- "tentative"
if(endtime < as.Date(format(Sys.time(), "%Y-%m-%d"))){
quality <- "final"
}
WR.surv <- merge(WR.surv, reltimes, by = "Release", all.x = T)
WR.surv$RelDT <- as.POSIXct(WR.surv$RelDT, origin = "1970-01-01")
benicia$RelDT <- as.POSIXct(benicia$RelDT)
# remove old benicia record for this studyID
benicia <- benicia[!benicia$StudyID == unique(detects_study$Study_ID),]
benicia <- rbind(benicia, data.frame(WR.surv, StudyID = unique(detects_study$Study_ID), data_quality = quality))
write.csv(benicia, "benicia_surv.csv", row.names = F, quote = F)
}
4. Detections statistics at all realtime receivers
try(setwd(paste(file.path(Sys.getenv("USERPROFILE"),"Desktop",fsep="\\"), "\\Real-time data massaging\\products", sep = "")))
if(nrow(detects_study[is.na(detects_study$DateTime_PST)==F,]) == 0){
"No detections yet"
} else {
arrivals <- detects_study %>%
group_by(general_location, TagCode) %>%
summarise(DateTime_PST = min(DateTime_PST)) %>%
arrange(TagCode)
tag_stats <- arrivals %>%
group_by(general_location) %>%
summarise(First_arrival = min(DateTime_PST),
Mean_arrival = mean(DateTime_PST),
Last_arrival = max(DateTime_PST),
Fish_count = length(unique(TagCode))) %>%
mutate(Percent_arrived = round(Fish_count/nrow(study_tagcodes) * 100,2)) %>%
dplyr::left_join(., unique(detects_study[,c("general_location", "river_km")])) %>%
arrange(desc(river_km)) %>%
mutate(First_arrival = format(First_arrival, tz = "Etc/GMT+8"),
Mean_arrival = format(Mean_arrival, tz = "Etc/GMT+8"),
Last_arrival = format(Last_arrival, tz = "Etc/GMT+8")) %>%
na.omit()
print(kable(tag_stats, row.names = F,
caption = "4.1 Detections for all releases combined",
"html") %>%
kable_styling(bootstrap_options = c("striped", "hover", "condensed", "responsive", "bordered"), full_width = F, position = "left"))
count <- 0
for(j in sort(unique(study_tagcodes$Release))){
if(nrow(detects_study[detects_study$Release == j,]) > 0){
count <- count + 1
arrivals1 <- detects_study %>%
filter(Release == j) %>%
group_by(general_location, TagCode) %>%
summarise(DateTime_PST = min(DateTime_PST)) %>%
arrange(TagCode)
rel_count <- nrow(study_tagcodes[study_tagcodes$Release == j,])
tag_stats1 <- arrivals1 %>%
group_by(general_location) %>%
summarise(First_arrival = min(DateTime_PST),
Mean_arrival = mean(DateTime_PST),
Last_arrival = max(DateTime_PST),
Fish_count = length(unique(TagCode))) %>%
mutate(Percent_arrived = round(Fish_count/rel_count * 100,2)) %>%
dplyr::left_join(., unique(detects_study[,c("general_location", "river_km")])) %>%
arrange(desc(river_km)) %>%
mutate(First_arrival = format(First_arrival, tz = "Etc/GMT+8"),
Mean_arrival = format(Mean_arrival, tz = "Etc/GMT+8"),
Last_arrival = format(Last_arrival, tz = "Etc/GMT+8")) %>%
na.omit()
final_stats <- kable(tag_stats1, row.names = F,
caption = paste("4.2.", count, " Detections for ", j, " release groups", sep = ""),
"html")
print(kable_styling(final_stats, bootstrap_options = c("striped", "hover", "condensed", "responsive", "bordered"), full_width = F, position = "left"))
} else {
cat("\n\n\\pagebreak\n")
print(paste("No detections for",j,"release group yet", sep=" "), quote = F)
cat("\n\n\\pagebreak\n")
}
}
}
4.1 Detections for all releases combined
general_location
|
First_arrival
|
Mean_arrival
|
Last_arrival
|
Fish_count
|
Percent_arrived
|
river_km
|
Blw_Salt_RT
|
2023-04-18 10:26:00
|
2023-05-01 11:11:38
|
2023-05-18 05:01:20
|
1017
|
81.29
|
457.000
|
MeridianBr
|
2023-04-20 16:57:28
|
2023-05-07 06:29:09
|
2023-06-01 19:55:48
|
610
|
48.76
|
290.848
|
TowerBridge
|
2023-04-22 11:39:42
|
2023-05-08 16:56:40
|
2023-06-10 08:38:08
|
456
|
36.45
|
172.000
|
I80-50_Br
|
2023-04-22 12:03:41
|
2023-05-08 15:06:55
|
2023-06-10 08:58:06
|
456
|
36.45
|
170.748
|
Sac_BlwGeorgiana
|
2023-04-24 15:54:43
|
2023-05-10 03:31:59
|
2023-06-10 20:22:23
|
313
|
25.02
|
119.058
|
Sac_BlwGeorgiana2
|
2023-04-24 16:08:25
|
2023-05-10 06:08:12
|
2023-06-10 20:35:34
|
320
|
25.58
|
118.398
|
Benicia_east
|
2023-04-26 15:16:35
|
2023-05-11 06:51:37
|
2023-06-15 03:19:25
|
364
|
29.10
|
52.240
|
Benicia_west
|
2023-04-27 09:50:56
|
2023-05-10 22:08:02
|
2023-06-15 03:25:47
|
340
|
27.18
|
52.040
|
4.2.1 Detections for Before Pulse release groups
general_location
|
First_arrival
|
Mean_arrival
|
Last_arrival
|
Fish_count
|
Percent_arrived
|
river_km
|
Blw_Salt_RT
|
2023-04-18 10:26:00
|
2023-04-19 07:51:27
|
2023-04-25 14:30:12
|
226
|
90.04
|
457.000
|
MeridianBr
|
2023-04-20 16:57:28
|
2023-04-25 14:45:33
|
2023-05-05 18:23:22
|
145
|
57.77
|
290.848
|
TowerBridge
|
2023-04-22 11:39:42
|
2023-04-27 08:02:27
|
2023-05-22 18:12:20
|
122
|
48.61
|
172.000
|
I80-50_Br
|
2023-04-22 12:03:41
|
2023-04-27 05:57:30
|
2023-05-02 02:16:20
|
127
|
50.60
|
170.748
|
Sac_BlwGeorgiana
|
2023-04-24 15:54:43
|
2023-04-28 01:28:24
|
2023-05-23 17:52:55
|
82
|
32.67
|
119.058
|
Sac_BlwGeorgiana2
|
2023-04-24 16:08:25
|
2023-04-28 01:40:31
|
2023-05-23 18:05:04
|
82
|
32.67
|
118.398
|
Benicia_east
|
2023-04-26 15:16:35
|
2023-04-30 16:03:51
|
2023-05-11 13:22:23
|
102
|
40.64
|
52.240
|
Benicia_west
|
2023-04-27 09:50:56
|
2023-04-30 16:21:28
|
2023-05-11 13:29:19
|
100
|
39.84
|
52.040
|
4.2.2 Detections for First Pulse release groups
general_location
|
First_arrival
|
Mean_arrival
|
Last_arrival
|
Fish_count
|
Percent_arrived
|
river_km
|
Blw_Salt_RT
|
2023-04-24 11:16:12
|
2023-04-25 01:45:38
|
2023-04-29 12:30:57
|
246
|
98.4
|
457.000
|
MeridianBr
|
2023-04-26 04:01:50
|
2023-04-28 12:10:07
|
2023-05-28 23:43:45
|
85
|
34.0
|
290.848
|
TowerBridge
|
2023-04-27 19:30:09
|
2023-04-30 09:34:28
|
2023-06-05 00:54:25
|
59
|
23.6
|
172.000
|
I80-50_Br
|
2023-04-27 19:51:43
|
2023-04-30 09:13:00
|
2023-06-05 01:18:05
|
57
|
22.8
|
170.748
|
Sac_BlwGeorgiana
|
2023-04-28 11:01:03
|
2023-05-01 15:11:19
|
2023-06-05 15:27:55
|
36
|
14.4
|
119.058
|
Sac_BlwGeorgiana2
|
2023-04-28 11:11:56
|
2023-05-01 14:08:32
|
2023-06-05 15:38:02
|
37
|
14.8
|
118.398
|
Benicia_east
|
2023-04-30 13:50:55
|
2023-05-02 20:55:28
|
2023-05-22 06:52:18
|
44
|
17.6
|
52.240
|
Benicia_west
|
2023-04-30 13:58:14
|
2023-05-02 21:07:04
|
2023-05-22 06:55:06
|
44
|
17.6
|
52.040
|
4.2.3 Detections for Second Pulse - Week 1 release groups
general_location
|
First_arrival
|
Mean_arrival
|
Last_arrival
|
Fish_count
|
Percent_arrived
|
river_km
|
Blw_Salt_RT
|
2023-05-02 10:00:47
|
2023-05-02 22:25:51
|
2023-05-03 14:18:35
|
228
|
91.2
|
457.000
|
MeridianBr
|
2023-05-03 22:52:07
|
2023-05-07 20:30:46
|
2023-05-31 03:53:52
|
109
|
43.6
|
290.848
|
TowerBridge
|
2023-05-05 14:51:21
|
2023-05-09 17:26:35
|
2023-05-31 18:30:26
|
74
|
29.6
|
172.000
|
I80-50_Br
|
2023-05-05 15:12:05
|
2023-05-09 16:55:47
|
2023-05-31 19:06:51
|
76
|
30.4
|
170.748
|
Sac_BlwGeorgiana
|
2023-05-06 06:47:16
|
2023-05-11 13:33:48
|
2023-06-01 07:53:48
|
49
|
19.6
|
119.058
|
Sac_BlwGeorgiana2
|
2023-05-06 06:58:30
|
2023-05-11 14:53:56
|
2023-06-01 08:01:07
|
52
|
20.8
|
118.398
|
Benicia_east
|
2023-05-09 09:22:04
|
2023-05-13 05:12:46
|
2023-06-02 17:16:24
|
64
|
25.6
|
52.240
|
Benicia_west
|
2023-05-09 07:03:39
|
2023-05-13 08:38:44
|
2023-06-02 17:23:34
|
58
|
23.2
|
52.040
|
4.2.4 Detections for Second Pulse - Week 2 release groups
general_location
|
First_arrival
|
Mean_arrival
|
Last_arrival
|
Fish_count
|
Percent_arrived
|
river_km
|
Blw_Salt_RT
|
2023-05-09 10:06:23
|
2023-05-10 02:06:06
|
2023-05-10 12:31:40
|
135
|
54.0
|
457.000
|
MeridianBr
|
2023-05-10 19:28:48
|
2023-05-12 19:06:31
|
2023-06-01 19:55:48
|
140
|
56.0
|
290.848
|
TowerBridge
|
2023-05-12 05:57:01
|
2023-05-14 16:14:33
|
2023-06-06 05:25:36
|
113
|
45.2
|
172.000
|
I80-50_Br
|
2023-05-12 06:17:59
|
2023-05-14 16:55:33
|
2023-06-06 05:42:59
|
100
|
40.0
|
170.748
|
Sac_BlwGeorgiana
|
2023-05-12 22:50:27
|
2023-05-15 04:25:49
|
2023-06-06 17:39:31
|
75
|
30.0
|
119.058
|
Sac_BlwGeorgiana2
|
2023-05-12 23:09:15
|
2023-05-15 04:11:08
|
2023-06-06 17:50:27
|
76
|
30.4
|
118.398
|
Benicia_east
|
2023-05-15 05:12:28
|
2023-05-17 02:32:26
|
2023-05-27 12:18:23
|
92
|
36.8
|
52.240
|
Benicia_west
|
2023-05-15 05:15:45
|
2023-05-17 07:10:13
|
2023-06-02 07:17:26
|
88
|
35.2
|
52.040
|
4.2.5 Detections for Second Pulse - Week 3 release groups
general_location
|
First_arrival
|
Mean_arrival
|
Last_arrival
|
Fish_count
|
Percent_arrived
|
river_km
|
Blw_Salt_RT
|
2023-05-16 09:16:54
|
2023-05-16 22:43:59
|
2023-05-18 05:01:20
|
182
|
72.8
|
457.000
|
MeridianBr
|
2023-05-17 19:32:07
|
2023-05-19 11:10:02
|
2023-05-26 13:34:00
|
131
|
52.4
|
290.848
|
TowerBridge
|
2023-05-19 07:55:20
|
2023-05-21 12:20:54
|
2023-06-10 08:38:08
|
88
|
35.2
|
172.000
|
I80-50_Br
|
2023-05-19 08:10:06
|
2023-05-21 09:39:33
|
2023-06-10 08:58:06
|
96
|
38.4
|
170.748
|
Sac_BlwGeorgiana
|
2023-05-19 23:33:24
|
2023-05-22 02:56:55
|
2023-06-10 20:22:23
|
71
|
28.4
|
119.058
|
Sac_BlwGeorgiana2
|
2023-05-19 23:46:41
|
2023-05-22 05:50:21
|
2023-06-10 20:35:34
|
73
|
29.2
|
118.398
|
Benicia_east
|
2023-05-21 09:23:09
|
2023-05-24 02:14:06
|
2023-06-15 03:19:25
|
62
|
24.8
|
52.240
|
Benicia_west
|
2023-05-21 09:27:27
|
2023-05-24 10:19:20
|
2023-06-15 03:25:47
|
50
|
20.0
|
52.040
|
library(dplyr)
library(dbplyr)
library(DBI)
library(odbc)
library(data.table)
# Create connection with cloud database
con <- dbConnect(odbc(),
Driver = "SQL Server",
Server = "calfishtrack-server.database.windows.net",
Database = "realtime_detections",
UID = "realtime_user",
PWD = "Pass@123",
Port = 1433)
try(setwd(paste(file.path(Sys.getenv("USERPROFILE"),"Desktop",fsep="\\"), "\\Real-time data massaging\\products", sep = "")))
# THIS CODE CHUNK WILL NOT WORK IF USING ONLY ERDDAP DATA, REQUIRES ACCESS TO LOCAL FILES
if(nrow(detects_study[is.na(detects_study$DateTime_PST)==F,]) == 0){
"No detections yet"
} else {
arrivals <- detects_study %>%
group_by(general_location, TagCode) %>%
summarise(DateTime_PST = min(DateTime_PST)) %>%
mutate(day = as.Date(DateTime_PST, "%Y-%m-%d", tz = "Etc/GMT+8"))
# Use dbplyr to load realtime_locs and qryHexCodes sql table
gen_locs <- tbl(con, "realtime_locs") %>% collect()
# gen_locs <- read.csv("realtime_locs.csv", stringsAsFactors = F)
beacon_by_day <- fread("beacon_by_day.csv", stringsAsFactors = F) %>%
mutate(day = as.Date(day)) %>%
filter(TagCode == beacon) %>% # Now subset to only look at data for the correct beacon for that day
filter(day >= as.Date(min(study_tagcodes$release_time)) &
day <= endtime) %>% # Now only keep beacon by day for days since fish were released
dplyr::left_join(., gen_locs[,c("location", "general_location","rkm")], by = "location")
arrivals_per_day <- arrivals %>%
group_by(day, general_location) %>%
summarise(New_arrivals = length(TagCode)) %>%
arrange(general_location) %>% na.omit() %>%
mutate(day = as.Date(day)) %>%
dplyr::left_join(unique(beacon_by_day[,c("general_location", "day", "rkm")]),
., by = c("general_location", "day")) %>%
arrange(general_location, day) %>%
mutate(day = factor(day)) %>%
filter(general_location != "Bench_test") %>% # Remove bench test and other NA locations
filter(!(is.na(general_location))) %>%
arrange(desc(rkm)) %>% # Change order of data to plot decreasing river_km
mutate(general_location = factor(general_location, unique(general_location)))
endtime <- min(as.Date(format(Sys.time(), "%Y-%m-%d")),
max(as.Date(detects_study$release_time)+(as.numeric(detects_study$tag_life)*1.5)))
crosstab <- xtabs(formula = arrivals_per_day$New_arrivals ~ arrivals_per_day$day + arrivals_per_day$general_location,
addNA =T)
crosstab[is.na(crosstab)] <- ""
crosstab[crosstab==0] <- NA
crosstab <- as.data.frame.matrix(crosstab)
kable(crosstab, align = "c", caption = "4.3 Fish arrivals per day (\"NA\" means receivers were non-operational)") %>%
kable_styling(c("striped", "condensed"), font_size = 11, full_width = F, position = "left", fixed_thead = TRUE) %>%
column_spec(column = 1:ncol(crosstab),width_min = "50px",border_left = T, border_right = T) %>%
column_spec(1, bold = T, width_min = "75px")%>%
scroll_box(height = "700px")
}
4.3 Fish arrivals per day (“NA” means receivers were non-operational)
|
Blw_Salt_RT
|
MeridianBr
|
TowerBridge
|
I80-50_Br
|
MiddleRiver
|
Clifton_Court_US_Radial_Gates
|
Holland_Cut_Quimby
|
CVP_Tank
|
CVP_Trash_Rack_1
|
Clifton_Court_Intake_Canal
|
Old_River_Quimby
|
Sac_BlwGeorgiana
|
Sac_BlwGeorgiana2
|
Benicia_east
|
Benicia_west
|
2023-04-18
|
93
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2023-04-19
|
115
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2023-04-20
|
10
|
1
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2023-04-21
|
1
|
2
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2023-04-22
|
|
9
|
1
|
1
|
|
|
|
|
|
|
|
|
|
|
|
2023-04-23
|
1
|
9
|
3
|
3
|
|
|
|
|
|
|
|
|
|
|
|
2023-04-24
|
115
|
35
|
8
|
8
|
|
|
|
|
|
|
|
1
|
1
|
|
|
2023-04-25
|
129
|
30
|
22
|
21
|
|
|
|
|
|
|
|
11
|
10
|
|
|
2023-04-26
|
6
|
43
|
28
|
25
|
|
|
|
|
|
|
|
12
|
13
|
1
|
|
2023-04-27
|
1
|
62
|
25
|
30
|
|
|
|
|
|
|
|
27
|
27
|
2
|
3
|
2023-04-28
|
|
19
|
46
|
48
|
|
|
|
|
|
|
|
25
|
25
|
13
|
13
|
2023-04-29
|
1
|
8
|
30
|
32
|
|
|
|
|
|
|
|
28
|
29
|
19
|
19
|
2023-04-30
|
|
5
|
10
|
10
|
|
|
|
|
|
|
|
7
|
7
|
32
|
31
|
2023-05-01
|
|
1
|
1
|
1
|
|
|
|
|
|
|
|
1
|
1
|
44
|
43
|
2023-05-02
|
115
|
2
|
1
|
1
|
|
|
|
|
|
|
|
1
|
1
|
18
|
18
|
2023-05-03
|
113
|
3
|
2
|
1
|
|
|
|
|
|
|
|
|
|
5
|
5
|
2023-05-04
|
|
21
|
|
|
|
|
|
|
|
|
|
1
|
1
|
7
|
7
|
2023-05-05
|
|
46
|
8
|
7
|
|
|
|
|
|
|
|
|
|
2
|
2
|
2023-05-06
|
|
9
|
22
|
22
|
|
|
|
|
|
|
|
7
|
7
|
|
|
2023-05-07
|
|
9
|
15
|
16
|
|
|
|
|
|
|
|
11
|
13
|
|
|
2023-05-08
|
|
4
|
9
|
8
|
|
|
|
|
|
|
|
11
|
11
|
|
|
2023-05-09
|
47
|
1
|
5
|
6
|
|
|
|
|
|
|
|
7
|
7
|
9
|
9
|
2023-05-10
|
88
|
17
|
4
|
3
|
|
|
|
|
|
|
|
2
|
2
|
15
|
14
|
2023-05-11
|
|
53
|
|
2
|
|
|
|
|
|
|
|
|
|
17
|
14
|
2023-05-12
|
|
46
|
37
|
32
|
|
|
|
|
|
|
|
1
|
1
|
4
|
4
|
2023-05-13
|
|
12
|
41
|
36
|
|
|
|
|
|
|
|
30
|
31
|
6
|
4
|
2023-05-14
|
|
8
|
17
|
14
|
|
|
|
|
|
|
|
28
|
28
|
3
|
4
|
2023-05-15
|
|
1
|
5
|
7
|
|
|
|
|
|
|
|
7
|
7
|
28
|
27
|
2023-05-16
|
87
|
3
|
6
|
5
|
|
|
|
|
|
|
|
6
|
6
|
37
|
36
|
2023-05-17
|
94
|
15
|
1
|
2
|
|
|
|
|
|
|
|
1
|
1
|
14
|
11
|
2023-05-18
|
1
|
53
|
3
|
3
|
|
|
|
|
|
|
|
3
|
3
|
8
|
7
|
2023-05-19
|
|
43
|
21
|
19
|
|
|
|
|
|
|
|
3
|
3
|
4
|
4
|
2023-05-20
|
|
9
|
42
|
42
|
|
|
|
|
|
|
|
29
|
27
|
4
|
4
|
2023-05-21
|
|
8
|
11
|
24
|
|
|
|
|
|
|
|
27
|
27
|
4
|
3
|
2023-05-22
|
|
2
|
8
|
7
|
|
|
|
|
|
|
|
3
|
5
|
21
|
17
|
2023-05-23
|
|
1
|
2
|
3
|
|
|
|
|
|
|
|
4
|
5
|
27
|
20
|
2023-05-24
|
|
5
|
2
|
2
|
|
|
|
|
|
|
|
2
|
4
|
7
|
7
|
2023-05-25
|
|
5
|
5
|
5
|
|
|
|
|
|
|
|
1
|
1
|
4
|
4
|
2023-05-26
|
|
1
|
1
|
1
|
|
|
|
|
|
|
|
3
|
3
|
|
|
2023-05-27
|
|
|
2
|
1
|
|
|
|
|
|
|
|
1
|
1
|
2
|
1
|
2023-05-28
|
|
6
|
3
|
1
|
|
|
|
|
|
|
|
1
|
1
|
|
1
|
2023-05-29
|
|
1
|
|
|
|
|
|
|
|
|
|
2
|
2
|
2
|
1
|
2023-05-30
|
|
|
2
|
1
|
|
|
|
|
|
|
|
3
|
3
|
1
|
1
|
2023-05-31
|
|
1
|
4
|
3
|
|
|
|
|
|
|
|
2
|
2
|
2
|
3
|
2023-06-01
|
|
1
|
|
|
|
|
|
|
|
|
|
1
|
1
|
|
|
2023-06-02
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1
|
2
|
2023-06-03
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2023-06-04
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2023-06-05
|
|
|
1
|
1
|
|
|
|
|
|
|
|
1
|
1
|
|
|
2023-06-06
|
|
|
1
|
1
|
|
|
|
|
|
|
|
1
|
1
|
|
|
2023-06-07
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2023-06-08
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2023-06-09
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2023-06-10
|
|
|
1
|
1
|
|
|
|
|
|
|
|
1
|
1
|
|
|
2023-06-11
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2023-06-12
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2023-06-13
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2023-06-14
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2023-06-15
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1
|
1
|
2023-06-16
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2023-06-17
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2023-06-18
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2023-06-19
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2023-06-20
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2023-06-21
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2023-06-22
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2023-06-23
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2023-06-24
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2023-06-25
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2023-06-26
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2023-06-27
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2023-06-28
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2023-06-29
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2023-06-30
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2023-07-01
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2023-07-02
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2023-07-03
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2023-07-04
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2023-07-05
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2023-07-06
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2023-07-07
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2023-07-08
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2023-07-09
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2023-07-10
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2023-07-11
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2023-07-12
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2023-07-13
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2023-07-14
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2023-07-15
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2023-07-16
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2023-07-17
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2023-07-18
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2023-07-19
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2023-07-20
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2023-07-21
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2023-07-22
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2023-07-23
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2023-07-24
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2023-07-25
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2023-07-26
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2023-07-27
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
rm(list = ls())
cleanup(ask = F)
For questions or comments, please contact cyril.michel@noaa.gov