mirror of
https://github.com/house-of-vanity/mus-fuse.git
synced 2025-07-06 21:24:09 +00:00
Cache reworked. Auth implemented. Metrics exposed.
This commit is contained in:
1
.gitignore
vendored
1
.gitignore
vendored
@ -1,3 +1,4 @@
|
|||||||
/target
|
/target
|
||||||
Cargo.lock
|
Cargo.lock
|
||||||
/mnt
|
/mnt
|
||||||
|
.vscode/
|
||||||
|
16
Cargo.toml
16
Cargo.toml
@ -1,6 +1,6 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "musfuse"
|
name = "musfuse"
|
||||||
version = "0.1.0"
|
version = "0.4.0"
|
||||||
authors = ["AB <ultradesu@hexor.ru>"]
|
authors = ["AB <ultradesu@hexor.ru>"]
|
||||||
edition = "2018"
|
edition = "2018"
|
||||||
|
|
||||||
@ -10,8 +10,12 @@ reqwest = { version = "0.10", features = ["json", "blocking"] }
|
|||||||
tokio = { version = "0.2", features = ["full"] }
|
tokio = { version = "0.2", features = ["full"] }
|
||||||
serde = { version = "1.0", features = ["derive"] }
|
serde = { version = "1.0", features = ["derive"] }
|
||||||
serde_json = "1.0"
|
serde_json = "1.0"
|
||||||
percent-encoding = "*"
|
percent-encoding = "2.1.0"
|
||||||
fuse = "*"
|
fuse = "0.3.1"
|
||||||
time = "0.1"
|
time = "0.1.42"
|
||||||
libc = "*"
|
libc = "0.2.69"
|
||||||
rustc-serialize = "*"
|
chrono = "0.4.11"
|
||||||
|
env_logger = "0.7.1"
|
||||||
|
log = { version = "^0.4.5", features = ["std"] }
|
||||||
|
size_format = "1.0.2"
|
||||||
|
base64 = "0.12.0"
|
||||||
|
370
src/main.rs
370
src/main.rs
@ -1,4 +1,6 @@
|
|||||||
// Fuse staff
|
// Fuse staff
|
||||||
|
|
||||||
|
extern crate base64;
|
||||||
extern crate fuse;
|
extern crate fuse;
|
||||||
extern crate libc;
|
extern crate libc;
|
||||||
extern crate time;
|
extern crate time;
|
||||||
@ -9,17 +11,62 @@ use libc::ENOENT;
|
|||||||
use reqwest::blocking::Client;
|
use reqwest::blocking::Client;
|
||||||
use reqwest::blocking::Response;
|
use reqwest::blocking::Response;
|
||||||
use reqwest::header::CONTENT_LENGTH;
|
use reqwest::header::CONTENT_LENGTH;
|
||||||
use std::collections::BTreeMap;
|
use size_format::SizeFormatterBinary;
|
||||||
|
use std::collections::{BTreeMap, HashMap, HashSet};
|
||||||
use std::env;
|
use std::env;
|
||||||
use std::ffi::OsStr;
|
use std::ffi::OsStr;
|
||||||
|
use std::fmt;
|
||||||
use time::Timespec;
|
use time::Timespec;
|
||||||
//use http::Method;
|
#[macro_use]
|
||||||
|
extern crate log;
|
||||||
|
extern crate chrono;
|
||||||
|
extern crate env_logger;
|
||||||
|
|
||||||
|
use chrono::Local;
|
||||||
|
use env_logger::Builder;
|
||||||
|
use log::LevelFilter;
|
||||||
|
use std::io::Write;
|
||||||
|
|
||||||
// Download lib staff
|
// Download lib staff
|
||||||
use percent_encoding::percent_decode_str;
|
use percent_encoding::percent_decode_str;
|
||||||
use serde::Deserialize;
|
use serde::Deserialize;
|
||||||
use std::path::Path;
|
use std::path::Path;
|
||||||
|
|
||||||
|
struct Metrics {
|
||||||
|
http_requests: u64,
|
||||||
|
ingress: u64,
|
||||||
|
hit_len_cache: u64,
|
||||||
|
hit_data_cache: u64,
|
||||||
|
miss_len_cache: u64,
|
||||||
|
miss_data_cache: u64,
|
||||||
|
server_addr: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl fmt::Debug for Metrics {
|
||||||
|
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||||
|
write!(f,
|
||||||
|
"http_requests: {}\ningress: {}\nhit_len_cache: {}\nhit_data_cache: {}\nmiss_len_cache: {}\nmiss_data_cache: {}\nserver_addr: {}\n",
|
||||||
|
self.http_requests,
|
||||||
|
self.ingress,
|
||||||
|
self.hit_len_cache,
|
||||||
|
self.hit_data_cache,
|
||||||
|
self.miss_len_cache,
|
||||||
|
self.miss_data_cache,
|
||||||
|
self.server_addr,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static mut metrics: Metrics = Metrics {
|
||||||
|
http_requests: 0,
|
||||||
|
ingress: 0,
|
||||||
|
hit_len_cache: 0,
|
||||||
|
hit_data_cache: 0,
|
||||||
|
miss_len_cache: 0,
|
||||||
|
miss_data_cache: 0,
|
||||||
|
server_addr: String::new(),
|
||||||
|
};
|
||||||
|
|
||||||
#[derive(Default, Debug, Clone, PartialEq, Deserialize)]
|
#[derive(Default, Debug, Clone, PartialEq, Deserialize)]
|
||||||
pub struct Track {
|
pub struct Track {
|
||||||
pub id: Option<String>,
|
pub id: Option<String>,
|
||||||
@ -34,8 +81,9 @@ pub struct Track {
|
|||||||
pub Size: Option<i64>,
|
pub Size: Option<i64>,
|
||||||
}
|
}
|
||||||
|
|
||||||
const CACHE_HEAD: i64 = 1024 * 1024;
|
const CACHE_HEAD: i64 = 768 * 1024;
|
||||||
const MAX_CACHE_SIZE: i64 = 10 * 1024 * 1025; // Mb
|
const MAX_CACHE_SIZE: i64 = 10; // Count
|
||||||
|
static mut http_auth: String = String::new();
|
||||||
|
|
||||||
fn get_basename(path: Option<&String>) -> Option<String> {
|
fn get_basename(path: Option<&String>) -> Option<String> {
|
||||||
let base = match percent_decode_str(path.unwrap().as_str()).decode_utf8() {
|
let base = match percent_decode_str(path.unwrap().as_str()).decode_utf8() {
|
||||||
@ -54,12 +102,18 @@ fn get_basename(path: Option<&String>) -> Option<String> {
|
|||||||
|
|
||||||
#[tokio::main]
|
#[tokio::main]
|
||||||
async fn get_tracks(server: &String) -> Result<Vec<Track>, Box<dyn std::error::Error>> {
|
async fn get_tracks(server: &String) -> Result<Vec<Track>, Box<dyn std::error::Error>> {
|
||||||
let resp = reqwest::get(format!("{}/songs", server).as_str())
|
let client = reqwest::Client::new();
|
||||||
.await?
|
unsafe {
|
||||||
.json::<Vec<Track>>()
|
let resp = client
|
||||||
.await?;
|
.get(format!("{}/songs", server).as_str())
|
||||||
println!("Found {} tracks.", resp.len());
|
.header("Authorization", format!("Basic {}", http_auth))
|
||||||
Ok(resp)
|
.send()
|
||||||
|
.await?
|
||||||
|
.json::<Vec<Track>>()
|
||||||
|
.await?;
|
||||||
|
info!("Found {} tracks.", resp.len());
|
||||||
|
Ok(resp)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(target_family = "unix")]
|
#[cfg(target_family = "unix")]
|
||||||
@ -68,8 +122,10 @@ struct JsonFilesystem {
|
|||||||
tree: Vec<Track>,
|
tree: Vec<Track>,
|
||||||
attrs: BTreeMap<u64, FileAttr>,
|
attrs: BTreeMap<u64, FileAttr>,
|
||||||
inodes: BTreeMap<String, u64>,
|
inodes: BTreeMap<String, u64>,
|
||||||
buffer_head: BTreeMap<String, Vec<u8>>,
|
buffer_head_index: HashSet<u64>,
|
||||||
|
buffer_head_data: HashMap<u64, Vec<u8>>,
|
||||||
buffer_length: BTreeMap<String, i64>,
|
buffer_length: BTreeMap<String, i64>,
|
||||||
|
metrics_inode: u64,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(target_family = "unix")]
|
#[cfg(target_family = "unix")]
|
||||||
@ -78,6 +134,7 @@ impl JsonFilesystem {
|
|||||||
let mut attrs = BTreeMap::new();
|
let mut attrs = BTreeMap::new();
|
||||||
let mut inodes = BTreeMap::new();
|
let mut inodes = BTreeMap::new();
|
||||||
let ts = time::now().to_timespec();
|
let ts = time::now().to_timespec();
|
||||||
|
let mut total_size: i64 = 0;
|
||||||
let attr = FileAttr {
|
let attr = FileAttr {
|
||||||
ino: 1,
|
ino: 1,
|
||||||
size: 0,
|
size: 0,
|
||||||
@ -96,26 +153,17 @@ impl JsonFilesystem {
|
|||||||
};
|
};
|
||||||
attrs.insert(1, attr);
|
attrs.insert(1, attr);
|
||||||
inodes.insert("/".to_string(), 1);
|
inodes.insert("/".to_string(), 1);
|
||||||
let client = Client::new();
|
|
||||||
let mut resp: Response;
|
|
||||||
for (i, track) in tree.iter().enumerate() {
|
for (i, track) in tree.iter().enumerate() {
|
||||||
let basename = get_basename(track.path.as_ref()).unwrap().to_string();
|
let basename = get_basename(track.path.as_ref()).unwrap().to_string();
|
||||||
/*
|
debug!(
|
||||||
let full_url = format!("{}/{}", server, track.path.as_ref().unwrap().to_string());
|
"Added inode: {} - {} [{}]",
|
||||||
resp = client.head(full_url.as_str()).send().unwrap();
|
i + 2,
|
||||||
let content_length = resp
|
basename,
|
||||||
.headers()
|
track.Size.unwrap()
|
||||||
.get(CONTENT_LENGTH)
|
);
|
||||||
.unwrap()
|
total_size = total_size + track.Size.unwrap();
|
||||||
.to_str()
|
|
||||||
.unwrap()
|
|
||||||
.parse::<u64>()
|
|
||||||
.unwrap();
|
|
||||||
println!("{} len is {}", basename, content_length);
|
|
||||||
*/
|
|
||||||
let attr = FileAttr {
|
let attr = FileAttr {
|
||||||
ino: i as u64 + 2,
|
ino: i as u64 + 2,
|
||||||
//size: 1024 * 1024 * 1024 as u64,
|
|
||||||
size: track.Size.unwrap() as u64,
|
size: track.Size.unwrap() as u64,
|
||||||
blocks: 0,
|
blocks: 0,
|
||||||
atime: ts,
|
atime: ts,
|
||||||
@ -133,13 +181,41 @@ impl JsonFilesystem {
|
|||||||
attrs.insert(attr.ino, attr);
|
attrs.insert(attr.ino, attr);
|
||||||
inodes.insert(basename.clone(), attr.ino);
|
inodes.insert(basename.clone(), attr.ino);
|
||||||
}
|
}
|
||||||
|
// Metrics file
|
||||||
|
let metrics_inode = 2 + tree.len() as u64;
|
||||||
|
let metrics_attr = FileAttr {
|
||||||
|
ino: metrics_inode,
|
||||||
|
size: 4096,
|
||||||
|
blocks: 0,
|
||||||
|
atime: ts,
|
||||||
|
mtime: ts,
|
||||||
|
ctime: ts,
|
||||||
|
crtime: ts,
|
||||||
|
kind: FileType::RegularFile,
|
||||||
|
perm: 0o444,
|
||||||
|
nlink: 0,
|
||||||
|
uid: 0,
|
||||||
|
gid: 0,
|
||||||
|
rdev: 0,
|
||||||
|
flags: 0,
|
||||||
|
};
|
||||||
|
attrs.insert(metrics_attr.ino, metrics_attr);
|
||||||
|
inodes.insert("METRICS.TXT".to_string(), metrics_attr.ino);
|
||||||
|
warn!("Len: attrs: {}, ino: {}", attrs.len(), inodes.len());
|
||||||
|
info!(
|
||||||
|
"Filesystem initialized. Size: {} files, {}B in total.",
|
||||||
|
inodes.len(),
|
||||||
|
(SizeFormatterBinary::new(total_size as u64))
|
||||||
|
);
|
||||||
JsonFilesystem {
|
JsonFilesystem {
|
||||||
server: server,
|
server: server,
|
||||||
tree: tree.clone(),
|
tree: tree.clone(),
|
||||||
attrs: attrs,
|
attrs: attrs,
|
||||||
inodes: inodes,
|
inodes: inodes,
|
||||||
buffer_head: BTreeMap::new(),
|
buffer_head_data: HashMap::new(),
|
||||||
|
buffer_head_index: HashSet::new(),
|
||||||
buffer_length: BTreeMap::new(),
|
buffer_length: BTreeMap::new(),
|
||||||
|
metrics_inode: metrics_inode,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -147,7 +223,7 @@ impl JsonFilesystem {
|
|||||||
#[cfg(target_family = "unix")]
|
#[cfg(target_family = "unix")]
|
||||||
impl Filesystem for JsonFilesystem {
|
impl Filesystem for JsonFilesystem {
|
||||||
fn getattr(&mut self, _req: &Request, ino: u64, reply: ReplyAttr) {
|
fn getattr(&mut self, _req: &Request, ino: u64, reply: ReplyAttr) {
|
||||||
//println!("getattr(ino={})", ino);
|
debug!("getattr(ino={})", ino);
|
||||||
match self.attrs.get(&ino) {
|
match self.attrs.get(&ino) {
|
||||||
Some(attr) => {
|
Some(attr) => {
|
||||||
let ttl = Timespec::new(1, 0);
|
let ttl = Timespec::new(1, 0);
|
||||||
@ -158,7 +234,7 @@ impl Filesystem for JsonFilesystem {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn lookup(&mut self, _req: &Request, parent: u64, name: &OsStr, reply: ReplyEntry) {
|
fn lookup(&mut self, _req: &Request, parent: u64, name: &OsStr, reply: ReplyEntry) {
|
||||||
//println!("lookup(parent={}, name={})", parent, name.to_str().unwrap());
|
debug!("lookup(parent={}, name={})", parent, name.to_str().unwrap());
|
||||||
let inode = match self.inodes.get(name.to_str().unwrap()) {
|
let inode = match self.inodes.get(name.to_str().unwrap()) {
|
||||||
Some(inode) => inode,
|
Some(inode) => inode,
|
||||||
None => {
|
None => {
|
||||||
@ -169,6 +245,7 @@ impl Filesystem for JsonFilesystem {
|
|||||||
match self.attrs.get(inode) {
|
match self.attrs.get(inode) {
|
||||||
Some(attr) => {
|
Some(attr) => {
|
||||||
let ttl = Timespec::new(1, 0);
|
let ttl = Timespec::new(1, 0);
|
||||||
|
debug!("{:#?}", attr);
|
||||||
reply.entry(&ttl, attr, 0);
|
reply.entry(&ttl, attr, 0);
|
||||||
}
|
}
|
||||||
None => reply.error(ENOENT),
|
None => reply.error(ENOENT),
|
||||||
@ -184,14 +261,37 @@ impl Filesystem for JsonFilesystem {
|
|||||||
size: u32,
|
size: u32,
|
||||||
reply: ReplyData,
|
reply: ReplyData,
|
||||||
) {
|
) {
|
||||||
print!(
|
// return usage statistics
|
||||||
"read(ino={}, fh={}, offset={}, size={}) ",
|
if ino == self.metrics_inode {
|
||||||
ino, fh, offset, size
|
unsafe {
|
||||||
|
let metrics_str = format!("{:#?}", metrics);
|
||||||
|
reply.data(&metrics_str.as_bytes());
|
||||||
|
}
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// cleaning cache
|
||||||
|
if self.buffer_head_index.len() > MAX_CACHE_SIZE as usize {
|
||||||
|
let mut iter = self.buffer_head_index.iter().filter(|&x| *x != ino);
|
||||||
|
let old_entry = iter.next().unwrap();
|
||||||
|
self.buffer_head_data.remove(old_entry);
|
||||||
|
let old_entry_copy = old_entry.clone();
|
||||||
|
self.buffer_head_index.remove(&old_entry_copy);
|
||||||
|
let basename = &self.tree[(ino - 2) as usize].path.as_ref();
|
||||||
|
debug!(
|
||||||
|
"{} - Cache dropped for: {} ",
|
||||||
|
ino,
|
||||||
|
get_basename(*basename).unwrap().to_string()
|
||||||
|
);
|
||||||
|
}
|
||||||
|
debug!(
|
||||||
|
"{} - read(ino={}, fh={}, offset={}, size={}) ",
|
||||||
|
ino, ino, fh, offset, size
|
||||||
);
|
);
|
||||||
|
|
||||||
let url = &self.tree[(ino - 2) as usize].path.as_ref().unwrap();
|
let url = &self.tree[(ino - 2) as usize].path.as_ref().unwrap();
|
||||||
let id = &self.tree[(ino - 2) as usize].id.as_ref().unwrap();
|
let id = &self.tree[(ino - 2) as usize].id.as_ref().unwrap();
|
||||||
let full_url = format!("{}/{}", self.server, url);
|
let full_url = format!("{}{}", self.server, url);
|
||||||
let mut chunk: Vec<u8>;
|
let mut chunk: Vec<u8>;
|
||||||
let content_length: i64;
|
let content_length: i64;
|
||||||
let client = Client::new();
|
let client = Client::new();
|
||||||
@ -200,9 +300,18 @@ impl Filesystem for JsonFilesystem {
|
|||||||
// content_length cache.
|
// content_length cache.
|
||||||
if self.buffer_length.contains_key(id.as_str()) {
|
if self.buffer_length.contains_key(id.as_str()) {
|
||||||
content_length = self.buffer_length[id.as_str()];
|
content_length = self.buffer_length[id.as_str()];
|
||||||
print!("Hit LC ");
|
debug!("{} - Hit length cache", ino);
|
||||||
|
unsafe {
|
||||||
|
metrics.hit_len_cache += 1;
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
resp = client.head(full_url.as_str()).send().unwrap();
|
unsafe {
|
||||||
|
resp = client
|
||||||
|
.head(full_url.as_str())
|
||||||
|
.header("Authorization", format!("Basic {}", http_auth))
|
||||||
|
.send()
|
||||||
|
.unwrap();
|
||||||
|
}
|
||||||
content_length = resp
|
content_length = resp
|
||||||
.headers()
|
.headers()
|
||||||
.get(CONTENT_LENGTH)
|
.get(CONTENT_LENGTH)
|
||||||
@ -211,14 +320,18 @@ impl Filesystem for JsonFilesystem {
|
|||||||
.unwrap()
|
.unwrap()
|
||||||
.parse::<i64>()
|
.parse::<i64>()
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
unsafe {
|
||||||
|
metrics.http_requests += 1;
|
||||||
|
}
|
||||||
self.buffer_length.insert(id.to_string(), content_length);
|
self.buffer_length.insert(id.to_string(), content_length);
|
||||||
print!("Miss LC ");
|
debug!("{} - Miss length cache", ino);
|
||||||
|
unsafe {
|
||||||
|
metrics.miss_len_cache += 1;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
print!("LC: {} ", self.buffer_length.len());
|
// Check for API wrong file size here
|
||||||
print!("HC: {} ", self.buffer_head.len());
|
|
||||||
|
|
||||||
if content_length > offset {
|
if content_length > offset {
|
||||||
print!("Content len {:?} ", content_length);
|
debug!("{} - Content len {:?} ", ino, content_length);
|
||||||
let end_of_chunk = if size - 1 + offset as u32 > content_length as u32 {
|
let end_of_chunk = if size - 1 + offset as u32 > content_length as u32 {
|
||||||
content_length
|
content_length
|
||||||
} else {
|
} else {
|
||||||
@ -228,66 +341,84 @@ impl Filesystem for JsonFilesystem {
|
|||||||
|
|
||||||
// if it's beginning of file...
|
// if it's beginning of file...
|
||||||
if end_of_chunk < CACHE_HEAD {
|
if end_of_chunk < CACHE_HEAD {
|
||||||
// cleaning cache before. it should be less than MAX_CACHE_SIZE bytes
|
|
||||||
if self.buffer_head.len() as i64 * CACHE_HEAD > MAX_CACHE_SIZE {
|
|
||||||
let (key, _) = self.buffer_head.iter_mut().next().unwrap();
|
|
||||||
let key_cpy: String = key.to_string();
|
|
||||||
if *key == key_cpy {
|
|
||||||
self.buffer_head.remove(&key_cpy);
|
|
||||||
print!(" *Cache Cleaned* ");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// looking for CACHE_HEAD bytes file beginning in cache
|
// looking for CACHE_HEAD bytes file beginning in cache
|
||||||
if self.buffer_head.contains_key(id.as_str()) {
|
if self.buffer_head_data.contains_key(&ino) {
|
||||||
print!("Hit head cache! ");
|
// Cache found
|
||||||
chunk = self.buffer_head[id.as_str()][offset as usize..end_of_chunk as usize]
|
debug!("{} - Hit data cache", ino);
|
||||||
|
unsafe {
|
||||||
|
metrics.hit_data_cache += 1;
|
||||||
|
}
|
||||||
|
chunk = self.buffer_head_data[&ino][offset as usize..end_of_chunk as usize]
|
||||||
.to_vec()
|
.to_vec()
|
||||||
.clone();
|
.clone();
|
||||||
reply.data(&chunk);
|
reply.data(&chunk);
|
||||||
} else {
|
} else {
|
||||||
print!("Miss head cache! ");
|
// Cache doesn't found
|
||||||
resp = client
|
debug!("{} - Miss data cache", ino);
|
||||||
.get(full_url.as_str())
|
unsafe {
|
||||||
.header(
|
metrics.miss_data_cache += 1;
|
||||||
"Range",
|
}
|
||||||
format!(
|
// Fetch file head (CACHE_HEAD)
|
||||||
"bytes=0-{}",
|
unsafe {
|
||||||
if CACHE_HEAD > content_length {
|
resp = client
|
||||||
content_length - 1
|
.get(full_url.as_str())
|
||||||
} else {
|
.header(
|
||||||
CACHE_HEAD - 1
|
"Range",
|
||||||
}
|
format!(
|
||||||
),
|
"bytes=0-{}",
|
||||||
)
|
if CACHE_HEAD > content_length {
|
||||||
.send()
|
content_length - 1
|
||||||
.unwrap();
|
} else {
|
||||||
|
CACHE_HEAD - 1
|
||||||
|
}
|
||||||
|
),
|
||||||
|
)
|
||||||
|
.header("Authorization", format!("Basic {}", http_auth))
|
||||||
|
.send()
|
||||||
|
.unwrap();
|
||||||
|
}
|
||||||
let response = resp.bytes().unwrap();
|
let response = resp.bytes().unwrap();
|
||||||
self.buffer_head.insert(id.to_string(), response.to_vec());
|
unsafe {
|
||||||
|
metrics.http_requests += 1;
|
||||||
|
metrics.ingress += response.len() as u64;
|
||||||
|
}
|
||||||
|
// Save cache
|
||||||
|
self.buffer_head_data.insert(ino, response.to_vec());
|
||||||
|
self.buffer_head_index.insert(ino);
|
||||||
chunk = response[offset as usize..end_of_chunk as usize].to_vec();
|
chunk = response[offset as usize..end_of_chunk as usize].to_vec();
|
||||||
reply.data(&chunk);
|
reply.data(&chunk);
|
||||||
}
|
}
|
||||||
println!("Chunk len: {:?} ", chunk.len());
|
debug!("{} - Chunk len: {:?} ", ino, chunk.len());
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
resp = client
|
// If it isn't a beginning of file don't cache it and fetch over HTTP directly.
|
||||||
.get(full_url.as_str())
|
unsafe {
|
||||||
.header("Range", &range)
|
resp = client
|
||||||
.send()
|
.get(full_url.as_str())
|
||||||
.unwrap();
|
.header("Range", &range)
|
||||||
let test = resp.bytes().unwrap();
|
.header("Authorization", format!("Basic {}", http_auth))
|
||||||
chunk = test.to_vec().clone();
|
.send()
|
||||||
|
.unwrap();
|
||||||
|
}
|
||||||
|
let response = resp.bytes().unwrap();
|
||||||
|
unsafe {
|
||||||
|
metrics.http_requests += 1;
|
||||||
|
metrics.ingress += response.len() as u64;
|
||||||
|
}
|
||||||
|
chunk = response.to_vec().clone();
|
||||||
reply.data(&chunk);
|
reply.data(&chunk);
|
||||||
println!(
|
debug!(
|
||||||
" Len: {}, Chunk {} - {}",
|
"{} - Len: {}, Chunk {} - {}",
|
||||||
|
ino,
|
||||||
chunk.len(),
|
chunk.len(),
|
||||||
offset,
|
offset,
|
||||||
offset + chunk.len() as i64
|
offset + chunk.len() as i64
|
||||||
);
|
);
|
||||||
} else {
|
} else {
|
||||||
println!(
|
// Wrong filesize detected.
|
||||||
"Wrong offset. Len is {} but offset {}",
|
warn!(
|
||||||
content_length, offset
|
"{} - Wrong offset. Len is {} but offset {}",
|
||||||
|
ino, content_length, offset
|
||||||
);
|
);
|
||||||
reply.data(&[]);
|
reply.data(&[]);
|
||||||
}
|
}
|
||||||
@ -302,7 +433,7 @@ impl Filesystem for JsonFilesystem {
|
|||||||
offset: i64,
|
offset: i64,
|
||||||
mut reply: ReplyDirectory,
|
mut reply: ReplyDirectory,
|
||||||
) {
|
) {
|
||||||
//println!("readdir(ino={}, fh={}, offset={})", ino, fh, offset);
|
debug!("readdir(ino={}, fh={}, offset={})", ino, fh, offset);
|
||||||
if ino == 1 {
|
if ino == 1 {
|
||||||
if offset == 0 {
|
if offset == 0 {
|
||||||
reply.add(1, 0, FileType::Directory, ".");
|
reply.add(1, 0, FileType::Directory, ".");
|
||||||
@ -322,6 +453,18 @@ impl Filesystem for JsonFilesystem {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn main() {
|
fn main() {
|
||||||
|
Builder::new()
|
||||||
|
.format(|buf, record| {
|
||||||
|
writeln!(
|
||||||
|
buf,
|
||||||
|
"{} [{}] - {}",
|
||||||
|
Local::now().format("%Y-%m-%dT%H:%M:%S"),
|
||||||
|
record.level(),
|
||||||
|
record.args()
|
||||||
|
)
|
||||||
|
})
|
||||||
|
.filter(None, LevelFilter::Info)
|
||||||
|
.init();
|
||||||
let mountpoint = match env::args().nth(1) {
|
let mountpoint = match env::args().nth(1) {
|
||||||
Some(path) => path,
|
Some(path) => path,
|
||||||
None => {
|
None => {
|
||||||
@ -342,11 +485,64 @@ fn main() {
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
let lib = get_tracks(&server).unwrap();
|
unsafe {
|
||||||
|
metrics.server_addr = server.clone();
|
||||||
|
}
|
||||||
|
let http_user_var = "HTTP_USER";
|
||||||
|
let http_pass_var = "HTTP_PASS";
|
||||||
|
|
||||||
|
let http_user = match env::var_os(http_user_var) {
|
||||||
|
Some(val) => {
|
||||||
|
info!(
|
||||||
|
"Variable {} is set. Will be used for http auth as user.",
|
||||||
|
http_user_var
|
||||||
|
);
|
||||||
|
val.to_str().unwrap().to_string()
|
||||||
|
}
|
||||||
|
None => {
|
||||||
|
info!("{} is not defined in the environment.", http_user_var);
|
||||||
|
"".to_string()
|
||||||
|
}
|
||||||
|
};
|
||||||
|
let http_pass = match env::var_os(http_pass_var) {
|
||||||
|
Some(val) => {
|
||||||
|
info!(
|
||||||
|
"Variable {} is set. Will be used for http auth as password.",
|
||||||
|
http_pass_var
|
||||||
|
);
|
||||||
|
val.to_str().unwrap().to_string()
|
||||||
|
}
|
||||||
|
None => {
|
||||||
|
info!("{} is not defined in the environment.", http_pass_var);
|
||||||
|
"".to_string()
|
||||||
|
}
|
||||||
|
};
|
||||||
|
unsafe {
|
||||||
|
let mut buf = String::new();
|
||||||
|
buf.push_str(&http_user);
|
||||||
|
buf.push_str(":");
|
||||||
|
buf.push_str(&http_pass);
|
||||||
|
http_auth = base64::encode(buf)
|
||||||
|
}
|
||||||
|
let lib = match get_tracks(&server) {
|
||||||
|
Ok(library) => library,
|
||||||
|
Err(err) => {
|
||||||
|
panic!("Can't fetch library from remote server: {}", err);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
info!("Remote library host: {}", &server);
|
||||||
let fs = JsonFilesystem::new(&lib, server);
|
let fs = JsonFilesystem::new(&lib, server);
|
||||||
let options = ["-o", "ro", "-o", "fsname=musfs", "-o", "async_read"]
|
let options = ["-o", "ro", "-o", "fsname=musfs", "-o", "sync_read"]
|
||||||
.iter()
|
.iter()
|
||||||
.map(|o| o.as_ref())
|
.map(|o| o.as_ref())
|
||||||
.collect::<Vec<&OsStr>>();
|
.collect::<Vec<&OsStr>>();
|
||||||
|
|
||||||
|
info!(
|
||||||
|
"Caching {}B bytes in head of files.",
|
||||||
|
SizeFormatterBinary::new(CACHE_HEAD as u64)
|
||||||
|
);
|
||||||
|
info!("Max cache is {} files.", MAX_CACHE_SIZE);
|
||||||
|
info!("Mount options: {:?}", options);
|
||||||
|
|
||||||
fuse::mount(fs, &mountpoint, &options).expect("Couldn't mount filesystem");
|
fuse::mount(fs, &mountpoint, &options).expect("Couldn't mount filesystem");
|
||||||
}
|
}
|
||||||
|
Reference in New Issue
Block a user