这就是rust match给我带来的自信

This commit is contained in:
shenjack 2024-07-20 23:34:20 +08:00
parent 72d29d3772
commit 6c1a914b6a
Signed by: shenjack
GPG Key ID: 7B1134A979775551
3 changed files with 54 additions and 29 deletions

View File

@ -1,7 +1,7 @@
use blake3::Hasher; use blake3::Hasher;
use sea_orm::{ use sea_orm::{
ActiveModelTrait, ConnectOptions, Database, DatabaseConnection, EntityTrait, ActiveModelTrait, ConnectOptions, Database, DatabaseConnection, EntityTrait, IntoActiveModel,
IntoActiveModel, ModelTrait, QueryOrder, QuerySelect, TransactionTrait, ModelTrait, QueryOrder, QuerySelect, TransactionTrait,
}; };
use tracing::{event, Level}; use tracing::{event, Level};
@ -59,19 +59,21 @@ pub enum CoverStrategy {
/// 如果失败, 会返回 Err /// 如果失败, 会返回 Err
/// 如果成功, 会返回 Ok(true) /// 如果成功, 会返回 Ok(true)
/// 如果数据已经存在, 会根据策略返回 /// 如果数据已经存在, 会根据策略返回
pub async fn save_data_to_db<D>( pub async fn save_data_to_db<T, D>(
save_id: SaveId, save_id: SaveId,
save_type: SaveType, save_type: T,
data: D, data: D,
cover_strategy: Option<CoverStrategy>, cover_strategy: Option<CoverStrategy>,
db: &DatabaseConnection, db: &DatabaseConnection,
) -> anyhow::Result<bool> ) -> anyhow::Result<bool>
where where
D: Into<String>, D: Into<String>,
T: Into<SaveType>,
{ {
// 干活之前, 先检查一下数据是否已经存在 // 干活之前, 先检查一下数据是否已经存在
// 如果已经存在, 那就根据策略来处理 // 如果已经存在, 那就根据策略来处理
let cover_strategy = cover_strategy.unwrap_or_default(); let cover_strategy = cover_strategy.unwrap_or_default();
let save_type: SaveType = save_type.into();
let exitst_data: Option<model::main_data::Model> = { let exitst_data: Option<model::main_data::Model> = {
model::main_data::Entity::find_by_id(save_id as i32) model::main_data::Entity::find_by_id(save_id as i32)
.select_only() .select_only()

View File

@ -1,32 +1,32 @@
use futures::future::select_all;
use std::{ops::Range, path::Path}; use std::{ops::Range, path::Path};
use tracing::{event, Level}; use tracing::{event, Level};
use futures::{future::select_all, task};
mod config; mod config;
mod db; mod db_part;
mod model; mod model;
mod net; mod net;
pub type SaveId = u32; pub type SaveId = u32;
pub const TEXT_DATA_MAX_LEN: usize = 1024; pub const TEXT_DATA_MAX_LEN: usize = 1024;
use net::DownloadFile; use model::sea_orm_active_enums::SaveType;
async fn big_worker(db: sea_orm::DatabaseConnection, work_range: Range<SaveId>) { async fn big_worker(db: sea_orm::DatabaseConnection, work_range: Range<SaveId>) {
let client = net::Downloader::default(); let client = net::Downloader::default();
for work_id in work_range { for work_id in work_range {
match client.try_download_as_any(work_id).await { match match client.try_download_as_any(work_id).await {
Some(file) => { Some(file) => {
match file { let save_type = (&file).into();
DownloadFile::Save(data) => { db_part::save_data_to_db(work_id, save_type, file.take_data(), None, &db)
},
DownloadFile::Ship(data) => {
},
}
} }
}; None => db_part::save_data_to_db(work_id, SaveType::None, "".to_string(), None, &db),
}
.await
{
Ok(_) => event!(Level::INFO, "Save data {} success", work_id),
Err(e) => event!(Level::WARN, "Save data {} failed: {:?}", work_id, e),
}
} }
} }
@ -43,14 +43,14 @@ async fn main() -> anyhow::Result<()> {
} }
}; };
let db_connect = db::connect(&conf).await?; let db_connect = db_part::connect(&conf).await?;
let start_id = db::find_max_id(&db_connect).await; let start_id = db_part::find_max_id(&db_connect).await;
event!(Level::INFO, "Starting download from save_id: {}", start_id); event!(Level::INFO, "Starting download from save_id: {}", start_id);
// 1321469 end // 1321469 end
let end_id: SaveId = 1321469; let end_id: SaveId = 1321469;
let mut current_id = start_id; let mut current_id = start_id;
let batch_size = 100; let batch_size = 100;
@ -59,19 +59,30 @@ async fn main() -> anyhow::Result<()> {
let max_works = 10; let max_works = 10;
for _ in 0..10 { for _ in 0..10 {
let end = current_id + batch_size; let end = current_id + batch_size;
works.push(tokio::spawn(big_worker(db_connect.clone(), current_id..end))); works.push(tokio::spawn(big_worker(
db_connect.clone(),
current_id..end,
)));
current_id = end; current_id = end;
} }
while current_id < end_id || !works.is_empty() { while current_id < end_id || !works.is_empty() {
while current_id < end_id && works.len() < max_works { while current_id < end_id && works.len() < max_works {
let end = current_id + batch_size; let end = current_id + batch_size;
works.push(tokio::spawn(big_worker(db_connect.clone(), current_id..end))); works.push(tokio::spawn(big_worker(
db_connect.clone(),
current_id..end,
)));
current_id = end; current_id = end;
} }
if !works.is_empty() { if !works.is_empty() {
let (result, index, remain) = select_all(works).await; let (result, index, remain) = select_all(works).await;
event!(Level::INFO, "worker {} finish with result: {:?}", index, result); event!(
Level::INFO,
"worker {} finish with result: {:?}",
index,
result
);
works = remain; works = remain;
} }
} }

View File

@ -1,7 +1,7 @@
use reqwest::Client; use reqwest::Client;
use std::time::Duration; use std::time::Duration;
use crate::SaveId; use crate::{model::sea_orm_active_enums::SaveType, SaveId};
pub struct Downloader { pub struct Downloader {
pub client: Client, pub client: Client,
@ -23,20 +23,32 @@ impl DownloadFile {
_ => None, _ => None,
} }
} }
pub fn as_save(&self) -> Option<&str> { pub fn as_save(&self) -> Option<&str> {
match self { match self {
DownloadFile::Save(s) => Some(s), DownloadFile::Save(s) => Some(s),
_ => None, _ => None,
} }
} }
pub fn is_ship(&self) -> bool { pub fn is_ship(&self) -> bool {
self.as_ship().is_some() matches!(self, DownloadFile::Ship(_))
} }
pub fn is_save(&self) -> bool { pub fn is_save(&self) -> bool {
self.as_save().is_some() matches!(self, DownloadFile::Save(_))
}
pub fn take_data(self) -> String {
match self {
DownloadFile::Ship(s) => s,
DownloadFile::Save(s) => s,
}
}
}
impl From<&DownloadFile> for SaveType {
fn from(file: &DownloadFile) -> Self {
match file {
DownloadFile::Ship(_) => SaveType::Ship,
DownloadFile::Save(_) => SaveType::Save,
}
} }
} }