mirror of
https://github.com/leptos-rs/leptos.git
synced 2025-12-28 06:42:35 -05:00
Compare commits
15 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
f1bc734dcf | ||
|
|
f71b4aae69 | ||
|
|
a834c03974 | ||
|
|
595013579c | ||
|
|
8b1bd1ae9e | ||
|
|
6ef1531059 | ||
|
|
9f1406250e | ||
|
|
1f6a892291 | ||
|
|
0ff1e279a2 | ||
|
|
c6096cc2a0 | ||
|
|
8a2ae7fc7c | ||
|
|
9de34b74cf | ||
|
|
1b5961edaa | ||
|
|
26d1aee9ad | ||
|
|
2bf09384df |
26
Cargo.toml
26
Cargo.toml
@@ -25,22 +25,22 @@ members = [
|
||||
exclude = ["benchmarks", "examples"]
|
||||
|
||||
[workspace.package]
|
||||
version = "0.6.0"
|
||||
version = "0.6.5"
|
||||
|
||||
[workspace.dependencies]
|
||||
leptos = { path = "./leptos", version = "0.6.0" }
|
||||
leptos_dom = { path = "./leptos_dom", version = "0.6.0" }
|
||||
leptos_hot_reload = { path = "./leptos_hot_reload", version = "0.6.0" }
|
||||
leptos_macro = { path = "./leptos_macro", version = "0.6.0" }
|
||||
leptos_reactive = { path = "./leptos_reactive", version = "0.6.0" }
|
||||
leptos_server = { path = "./leptos_server", version = "0.6.0" }
|
||||
server_fn = { path = "./server_fn", version = "0.6.0" }
|
||||
server_fn_macro = { path = "./server_fn_macro", version = "0.6.0" }
|
||||
leptos = { path = "./leptos", version = "0.6.5" }
|
||||
leptos_dom = { path = "./leptos_dom", version = "0.6.5" }
|
||||
leptos_hot_reload = { path = "./leptos_hot_reload", version = "0.6.5" }
|
||||
leptos_macro = { path = "./leptos_macro", version = "0.6.5" }
|
||||
leptos_reactive = { path = "./leptos_reactive", version = "0.6.5" }
|
||||
leptos_server = { path = "./leptos_server", version = "0.6.5" }
|
||||
server_fn = { path = "./server_fn", version = "0.6.5" }
|
||||
server_fn_macro = { path = "./server_fn_macro", version = "0.6.5" }
|
||||
server_fn_macro_default = { path = "./server_fn/server_fn_macro_default", version = "0.6" }
|
||||
leptos_config = { path = "./leptos_config", version = "0.6.0" }
|
||||
leptos_router = { path = "./router", version = "0.6.0" }
|
||||
leptos_meta = { path = "./meta", version = "0.6.0" }
|
||||
leptos_integration_utils = { path = "./integrations/utils", version = "0.6.0" }
|
||||
leptos_config = { path = "./leptos_config", version = "0.6.5" }
|
||||
leptos_router = { path = "./router", version = "0.6.5" }
|
||||
leptos_meta = { path = "./meta", version = "0.6.5" }
|
||||
leptos_integration_utils = { path = "./integrations/utils", version = "0.6.5" }
|
||||
|
||||
[profile.release]
|
||||
codegen-units = 1
|
||||
|
||||
42
README.md
42
README.md
@@ -40,6 +40,25 @@ pub fn SimpleCounter(initial_value: i32) -> impl IntoView {
|
||||
}
|
||||
}
|
||||
|
||||
// we also support a builder syntax rather than the JSX-like `view` macro
|
||||
#[component]
|
||||
pub fn SimpleCounterWithBuilder(initial_value: i32) -> impl IntoView {
|
||||
use leptos::html::*;
|
||||
|
||||
let (value, set_value) = create_signal(initial_value);
|
||||
let clear = move |_| set_value(0);
|
||||
let decrement = move |_| set_value.update(|value| *value -= 1);
|
||||
let increment = move |_| set_value.update(|value| *value += 1);
|
||||
|
||||
// the `view` macro above expands to this builder syntax
|
||||
div().child((
|
||||
button().on(ev::click, clear).child("Clear"),
|
||||
button().on(ev::click, decrement).child("-1"),
|
||||
span().child(("Value: ", value, "!")),
|
||||
button().on(ev::click, increment).child("+1")
|
||||
))
|
||||
}
|
||||
|
||||
// Easy to use with Trunk (trunkrs.dev) or with a simple wasm-bindgen setup
|
||||
pub fn main() {
|
||||
mount_to_body(|| view! {
|
||||
@@ -141,3 +160,26 @@ Sure! Obviously the `view` macro is for generating DOM nodes but you can use the
|
||||
I've put together a [very simple GTK example](https://github.com/leptos-rs/leptos/blob/main/examples/gtk/src/main.rs) so you can see what I mean.
|
||||
|
||||
The new rendering approach being developed for 0.7 supports “universal rendering,” i.e., it can use any rendering library that supports a small set of 6-8 functions. (This is intended as a layer over typical retained-mode, OOP-style GUI toolkits like the DOM, GTK, etc.) That future rendering work will allow creating native UI in a way that is much more similar to the declarative approach used by the web framework.
|
||||
|
||||
### How is this different from Yew?
|
||||
|
||||
Yew is the most-used library for Rust web UI development, but there are several differences between Yew and Leptos, in philosophy, approach, and performance.
|
||||
|
||||
- **VDOM vs. fine-grained:** Yew is built on the virtual DOM (VDOM) model: state changes cause components to re-render, generating a new virtual DOM tree. Yew diffs this against the previous VDOM, and applies those patches to the actual DOM. Component functions rerun whenever state changes. Leptos takes an entirely different approach. Components run once, creating (and returning) actual DOM nodes and setting up a reactive system to update those DOM nodes.
|
||||
- **Performance:** This has huge performance implications: Leptos is simply much faster at both creating and updating the UI than Yew is.
|
||||
- **Server integration:** Yew was created in an era in which browser-rendered single-page apps (SPAs) were the dominant paradigm. While Leptos supports client-side rendering, it also focuses on integrating with the server side of your application via server functions and multiple modes of serving HTML, including out-of-order streaming.
|
||||
|
||||
- ### How is this different from Dioxus?
|
||||
|
||||
Like Leptos, Dioxus is a framework for building UIs using web technologies. However, there are significant differences in approach and features.
|
||||
|
||||
- **VDOM vs. fine-grained:** While Dioxus has a performant virtual DOM (VDOM), it still uses coarse-grained/component-scoped reactivity: changing a stateful value reruns the component function and diffs the old UI against the new one. Leptos components use a different mental model, creating (and returning) actual DOM nodes and setting up a reactive system to update those DOM nodes.
|
||||
- **Web vs. desktop priorities:** Dioxus uses Leptos server functions in its fullstack mode, but does not have the same `<Suspense>`-based support for things like streaming HTML rendering, or share the same focus on holistic web performance. Leptos tends to prioritize holistic web performance (streaming HTML rendering, smaller WASM binary sizes, etc.), whereas Dioxus has an unparalleled experience when building desktop apps, because your application logic runs as a native Rust binary.
|
||||
|
||||
- ### How is this different from Sycamore?
|
||||
|
||||
Sycamore and Leptos are both heavily influenced by SolidJS. At this point, Leptos has a larger community and ecosystem and is more actively developed. Other differences:
|
||||
|
||||
- **Templating DSLs:** Sycamore uses a custom templating language for its views, while Leptos uses a JSX-like template format.
|
||||
- **`'static` signals:** One of Leptos’s main innovations was the creation of `Copy + 'static` signals, which have excellent ergonomics. Sycamore is in the process of adopting the same pattern, but this is not yet released.
|
||||
- **Perseus vs. server functions:** The Perseus metaframework provides an opinionated way to build Sycamore apps that include server functionality. Leptos instead provides primitives like server functions in the core of the framework.
|
||||
|
||||
@@ -31,6 +31,9 @@ web-sys = { version = "0.3.67", features = ["FileList", "File"] }
|
||||
strum = { version = "0.25.0", features = ["strum_macros", "derive"] }
|
||||
notify = { version = "6.1.1", optional = true }
|
||||
pin-project-lite = "0.2.13"
|
||||
dashmap = { version = "5.5.3", optional = true }
|
||||
once_cell = { version = "1.19.0", optional = true }
|
||||
async-broadcast = { version = "0.6.0", optional = true }
|
||||
|
||||
[features]
|
||||
hydrate = ["leptos/hydrate", "leptos_meta/hydrate", "leptos_router/hydrate"]
|
||||
@@ -43,7 +46,10 @@ ssr = [
|
||||
"leptos_meta/ssr",
|
||||
"leptos_router/ssr",
|
||||
"dep:leptos_axum",
|
||||
"dep:notify"
|
||||
"dep:notify",
|
||||
"dep:dashmap",
|
||||
"dep:once_cell",
|
||||
"dep:async-broadcast"
|
||||
]
|
||||
|
||||
[package.metadata.cargo-all-features]
|
||||
|
||||
@@ -5,13 +5,15 @@ use leptos_meta::{provide_meta_context, Link, Meta, Stylesheet};
|
||||
use leptos_router::{ActionForm, Route, Router, Routes};
|
||||
use serde::{de::DeserializeOwned, Deserialize, Serialize};
|
||||
use server_fn::{
|
||||
client::{browser::BrowserClient, Client},
|
||||
codec::{
|
||||
Encoding, FromReq, FromRes, GetUrl, IntoReq, IntoRes, MultipartData,
|
||||
MultipartFormData, Rkyv, SerdeLite, StreamingText, TextStream,
|
||||
},
|
||||
request::{ClientReq, Req},
|
||||
response::{ClientRes, Res},
|
||||
request::{browser::BrowserRequest, ClientReq, Req},
|
||||
response::{browser::BrowserResponse, ClientRes, Res},
|
||||
};
|
||||
use std::future::Future;
|
||||
#[cfg(feature = "ssr")]
|
||||
use std::sync::{
|
||||
atomic::{AtomicU8, Ordering},
|
||||
@@ -55,8 +57,10 @@ pub fn HomePage() -> impl IntoView {
|
||||
<ServerFnArgumentExample/>
|
||||
<RkyvExample/>
|
||||
<FileUpload/>
|
||||
<FileUploadWithProgress/>
|
||||
<FileWatcher/>
|
||||
<CustomEncoding/>
|
||||
<CustomClientExample/>
|
||||
}
|
||||
}
|
||||
|
||||
@@ -331,8 +335,8 @@ pub fn FileUpload() -> impl IntoView {
|
||||
///
|
||||
/// On the server, this uses the `multer` crate, which provides a streaming API.
|
||||
#[server(
|
||||
input = MultipartFormData,
|
||||
)]
|
||||
input = MultipartFormData,
|
||||
)]
|
||||
pub async fn file_length(
|
||||
data: MultipartData,
|
||||
) -> Result<usize, ServerFnError> {
|
||||
@@ -390,6 +394,168 @@ pub fn FileUpload() -> impl IntoView {
|
||||
}
|
||||
}
|
||||
|
||||
/// This component uses server functions to upload a file, while streaming updates on the upload
|
||||
/// progress.
|
||||
#[component]
|
||||
pub fn FileUploadWithProgress() -> impl IntoView {
|
||||
/// In theory, you could create a single server function which
|
||||
/// 1) received multipart form data
|
||||
/// 2) returned a stream that contained updates on the progress
|
||||
///
|
||||
/// In reality, browsers do not actually support duplexing requests in this way. In other
|
||||
/// words, every existing browser actually requires that the request stream be complete before
|
||||
/// it begins processing the response stream.
|
||||
///
|
||||
/// Instead, we can create two separate server functions:
|
||||
/// 1) one that receives multipart form data and begins processing the upload
|
||||
/// 2) a second that returns a stream of updates on the progress
|
||||
///
|
||||
/// This requires us to store some global state of all the uploads. In a real app, you probably
|
||||
/// shouldn't do exactly what I'm doing here in the demo. For example, this map just
|
||||
/// distinguishes between files by filename, not by user.
|
||||
|
||||
#[cfg(feature = "ssr")]
|
||||
mod progress {
|
||||
use async_broadcast::{broadcast, Receiver, Sender};
|
||||
use dashmap::DashMap;
|
||||
use futures::Stream;
|
||||
use once_cell::sync::Lazy;
|
||||
|
||||
struct File {
|
||||
total: usize,
|
||||
tx: Sender<usize>,
|
||||
rx: Receiver<usize>,
|
||||
}
|
||||
|
||||
static FILES: Lazy<DashMap<String, File>> = Lazy::new(DashMap::new);
|
||||
|
||||
pub async fn add_chunk(filename: &str, len: usize) {
|
||||
println!("[{filename}]\tadding {len}");
|
||||
let mut entry =
|
||||
FILES.entry(filename.to_string()).or_insert_with(|| {
|
||||
println!("[{filename}]\tinserting channel");
|
||||
let (tx, rx) = broadcast(128);
|
||||
File { total: 0, tx, rx }
|
||||
});
|
||||
entry.total += len;
|
||||
let new_total = entry.total;
|
||||
|
||||
// we're about to do an async broadcast, so we don't want to hold a lock across it
|
||||
let tx = entry.tx.clone();
|
||||
drop(entry);
|
||||
|
||||
// now we send the message and don't have to worry about it
|
||||
tx.broadcast(new_total)
|
||||
.await
|
||||
.expect("couldn't send a message over channel");
|
||||
}
|
||||
|
||||
pub fn for_file(filename: &str) -> impl Stream<Item = usize> {
|
||||
let entry =
|
||||
FILES.entry(filename.to_string()).or_insert_with(|| {
|
||||
println!("[{filename}]\tinserting channel");
|
||||
let (tx, rx) = broadcast(128);
|
||||
File { total: 0, tx, rx }
|
||||
});
|
||||
entry.rx.clone()
|
||||
}
|
||||
}
|
||||
|
||||
#[server(
|
||||
input = MultipartFormData,
|
||||
)]
|
||||
pub async fn upload_file(data: MultipartData) -> Result<(), ServerFnError> {
|
||||
let mut data = data.into_inner().unwrap();
|
||||
|
||||
while let Ok(Some(mut field)) = data.next_field().await {
|
||||
let name =
|
||||
field.file_name().expect("no filename on field").to_string();
|
||||
while let Ok(Some(chunk)) = field.chunk().await {
|
||||
let len = chunk.len();
|
||||
println!("[{name}]\t{len}");
|
||||
progress::add_chunk(&name, len).await;
|
||||
// in a real server function, you'd do something like saving the file here
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[server(output = StreamingText)]
|
||||
pub async fn file_progress(
|
||||
filename: String,
|
||||
) -> Result<TextStream, ServerFnError> {
|
||||
println!("getting progress on {filename}");
|
||||
// get the stream of current length for the file
|
||||
let progress = progress::for_file(&filename);
|
||||
// separate each number with a newline
|
||||
// the HTTP response might pack multiple lines of this into a single chunk
|
||||
// we need some way of dividing them up
|
||||
let progress = progress.map(|bytes| Ok(format!("{bytes}\n")));
|
||||
Ok(TextStream::new(progress))
|
||||
}
|
||||
|
||||
let (filename, set_filename) = create_signal(None);
|
||||
let (max, set_max) = create_signal(None);
|
||||
let (current, set_current) = create_signal(None);
|
||||
let on_submit = move |ev: SubmitEvent| {
|
||||
ev.prevent_default();
|
||||
let target = ev.target().unwrap().unchecked_into::<HtmlFormElement>();
|
||||
let form_data = FormData::new_with_form(&target).unwrap();
|
||||
let file = form_data
|
||||
.get("file_to_upload")
|
||||
.unchecked_into::<web_sys::File>();
|
||||
let filename = file.name();
|
||||
let size = file.size() as usize;
|
||||
set_filename(Some(filename.clone()));
|
||||
set_max(Some(size));
|
||||
set_current(None::<usize>);
|
||||
|
||||
spawn_local(async move {
|
||||
let mut progress = file_progress(filename)
|
||||
.await
|
||||
.expect("couldn't initialize stream")
|
||||
.into_inner();
|
||||
while let Some(Ok(len)) = progress.next().await {
|
||||
// the TextStream from the server function will be a series of `usize` values
|
||||
// however, the response itself may pack those chunks into a smaller number of
|
||||
// chunks, each with more text in it
|
||||
// so we've padded them with newspace, and will split them out here
|
||||
// each value is the latest total, so we'll just take the last one
|
||||
let len = len
|
||||
.split('\n')
|
||||
.filter(|n| !n.is_empty())
|
||||
.last()
|
||||
.expect(
|
||||
"expected at least one non-empty value from \
|
||||
newline-delimited rows",
|
||||
)
|
||||
.parse::<usize>()
|
||||
.expect("invalid length");
|
||||
set_current(Some(len));
|
||||
}
|
||||
});
|
||||
spawn_local(async move {
|
||||
upload_file(form_data.into())
|
||||
.await
|
||||
.expect("couldn't upload file");
|
||||
});
|
||||
};
|
||||
|
||||
view! {
|
||||
<h3>File Upload with Progress</h3>
|
||||
<p>A file upload with progress can be handled with two separate server functions.</p>
|
||||
<aside>See the doc comment on the component for an explanation.</aside>
|
||||
<form on:submit=on_submit>
|
||||
<input type="file" name="file_to_upload"/>
|
||||
<input type="submit"/>
|
||||
</form>
|
||||
{move || filename().map(|filename| view! { <p>Uploading {filename}</p> })}
|
||||
{move || max().map(|max| view! {
|
||||
<progress max=max value=move || current().unwrap_or_default()/>
|
||||
})}
|
||||
}
|
||||
}
|
||||
#[component]
|
||||
pub fn FileWatcher() -> impl IntoView {
|
||||
#[server(input = GetUrl, output = StreamingText)]
|
||||
@@ -632,3 +798,55 @@ pub fn CustomEncoding() -> impl IntoView {
|
||||
<p>{result}</p>
|
||||
}
|
||||
}
|
||||
|
||||
/// Middleware lets you modify the request/response on the server.
|
||||
///
|
||||
/// On the client, you might also want to modify the request. For example, you may need to add a
|
||||
/// custom header for authentication on every request. You can do this by creating a "custom
|
||||
/// client."
|
||||
#[component]
|
||||
pub fn CustomClientExample() -> impl IntoView {
|
||||
// Define a type for our client.
|
||||
pub struct CustomClient;
|
||||
|
||||
// Implement the `Client` trait for it.
|
||||
impl<CustErr> Client<CustErr> for CustomClient {
|
||||
// BrowserRequest and BrowserResponse are the defaults used by other server functions.
|
||||
// They are wrappers for the underlying Web Fetch API types.
|
||||
type Request = BrowserRequest;
|
||||
type Response = BrowserResponse;
|
||||
|
||||
// Our custom `send()` implementation does all the work.
|
||||
fn send(
|
||||
req: Self::Request,
|
||||
) -> impl Future<Output = Result<Self::Response, ServerFnError<CustErr>>>
|
||||
+ Send {
|
||||
// BrowserRequest derefs to the underlying Request type from gloo-net,
|
||||
// so we can get access to the headers here
|
||||
let headers = req.headers();
|
||||
// modify the headers by appending one
|
||||
headers.append("X-Custom-Header", "foobar");
|
||||
// delegate back out to BrowserClient to send the modified request
|
||||
BrowserClient::send(req)
|
||||
}
|
||||
}
|
||||
|
||||
// Specify our custom client with `client = `
|
||||
#[server(client = CustomClient)]
|
||||
pub async fn fn_with_custom_client() -> Result<(), ServerFnError> {
|
||||
use http::header::HeaderMap;
|
||||
use leptos_axum::extract;
|
||||
|
||||
let headers: HeaderMap = extract().await?;
|
||||
let custom_header = headers.get("X-Custom-Header");
|
||||
println!("X-Custom-Header = {custom_header:?}");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
view! {
|
||||
<h3>Custom clients</h3>
|
||||
<p>You can define a custom server function client to do something like adding a header to every request.</p>
|
||||
<p>Check the network request in your browser devtools to see how this client adds a custom header.</p>
|
||||
<button on:click=|_| spawn_local(async { fn_with_custom_client().await.unwrap() })>Click me</button>
|
||||
}
|
||||
}
|
||||
|
||||
@@ -150,14 +150,6 @@ pub fn redirect(path: &str) {
|
||||
to redirect()."
|
||||
);
|
||||
}
|
||||
if let Some(response_options) = use_context::<ResponseOptions>() {
|
||||
response_options.set_status(StatusCode::FOUND);
|
||||
response_options.insert_header(
|
||||
header::LOCATION,
|
||||
header::HeaderValue::from_str(path)
|
||||
.expect("Failed to create HeaderValue"),
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
/// An Actix [struct@Route](actix_web::Route) that listens for a `POST` request with
|
||||
@@ -1390,15 +1382,13 @@ impl LeptosRoutes for &mut ServiceConfig {
|
||||
/// Ok(data)
|
||||
/// }
|
||||
/// ```
|
||||
pub async fn extract<T, CustErr>() -> Result<T, ServerFnError<CustErr>>
|
||||
pub async fn extract<T>() -> Result<T, ServerFnError>
|
||||
where
|
||||
T: actix_web::FromRequest,
|
||||
<T as FromRequest>::Error: Display,
|
||||
{
|
||||
let req = use_context::<HttpRequest>().ok_or_else(|| {
|
||||
ServerFnError::ServerError(
|
||||
"HttpRequest should have been provided via context".to_string(),
|
||||
)
|
||||
ServerFnError::new("HttpRequest should have been provided via context")
|
||||
})?;
|
||||
|
||||
T::extract(&req)
|
||||
|
||||
@@ -55,10 +55,7 @@ use leptos_router::*;
|
||||
use once_cell::sync::OnceCell;
|
||||
use parking_lot::RwLock;
|
||||
use server_fn::redirect::REDIRECT_HEADER;
|
||||
use std::{
|
||||
error::Error, fmt::Debug, io, pin::Pin, sync::Arc,
|
||||
thread::available_parallelism,
|
||||
};
|
||||
use std::{fmt::Debug, io, pin::Pin, sync::Arc, thread::available_parallelism};
|
||||
use tokio_util::task::LocalPoolHandle;
|
||||
use tracing::Instrument;
|
||||
|
||||
@@ -1772,13 +1769,12 @@ fn get_leptos_pool() -> LocalPoolHandle {
|
||||
/// Ok(query)
|
||||
/// }
|
||||
/// ```
|
||||
pub async fn extract<T, CustErr>() -> Result<T, ServerFnError>
|
||||
pub async fn extract<T>() -> Result<T, ServerFnError>
|
||||
where
|
||||
T: Sized + FromRequestParts<()>,
|
||||
T::Rejection: Debug,
|
||||
CustErr: Error + 'static,
|
||||
{
|
||||
extract_with_state::<T, (), CustErr>(&()).await
|
||||
extract_with_state::<T, ()>(&()).await
|
||||
}
|
||||
|
||||
/// A helper to make it easier to use Axum extractors in server functions. This
|
||||
@@ -1800,18 +1796,14 @@ where
|
||||
/// Ok(query)
|
||||
/// }
|
||||
/// ```
|
||||
pub async fn extract_with_state<T, S, CustErr>(
|
||||
state: &S,
|
||||
) -> Result<T, ServerFnError>
|
||||
pub async fn extract_with_state<T, S>(state: &S) -> Result<T, ServerFnError>
|
||||
where
|
||||
T: Sized + FromRequestParts<S>,
|
||||
T::Rejection: Debug,
|
||||
CustErr: Error + 'static,
|
||||
{
|
||||
let mut parts = use_context::<Parts>().ok_or_else(|| {
|
||||
ServerFnError::ServerError::<CustErr>(
|
||||
"should have had Parts provided by the leptos_axum integration"
|
||||
.to_string(),
|
||||
ServerFnError::new(
|
||||
"should have had Parts provided by the leptos_axum integration",
|
||||
)
|
||||
})?;
|
||||
T::from_request_parts(&mut parts, state)
|
||||
|
||||
@@ -385,7 +385,10 @@ where
|
||||
// client
|
||||
create_render_effect({
|
||||
let r = Rc::clone(&r);
|
||||
move |_| r.load(false, id)
|
||||
move |_| {
|
||||
source.track();
|
||||
r.load(false, id)
|
||||
}
|
||||
});
|
||||
|
||||
Resource {
|
||||
|
||||
@@ -281,7 +281,7 @@ impl Runtime {
|
||||
let source_map = self.node_sources.borrow();
|
||||
for effect in subs.borrow().iter() {
|
||||
if let Some(effect_sources) = source_map.get(*effect) {
|
||||
effect_sources.borrow_mut().remove(&node);
|
||||
effect_sources.borrow_mut().swap_remove(&node);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -308,7 +308,7 @@ impl Runtime {
|
||||
let subs = self.node_subscribers.borrow();
|
||||
for source in sources.borrow().iter() {
|
||||
if let Some(source) = subs.get(*source) {
|
||||
source.borrow_mut().remove(&node_id);
|
||||
source.borrow_mut().swap_remove(&node_id);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "leptos_meta"
|
||||
version = "0.6.0"
|
||||
version = "0.6.5"
|
||||
edition = "2021"
|
||||
authors = ["Greg Johnston"]
|
||||
license = "MIT"
|
||||
|
||||
@@ -158,7 +158,7 @@ impl MetaTagsContext {
|
||||
move || {
|
||||
let head = document().head().unwrap_throw();
|
||||
_ = head.remove_child(&el);
|
||||
els.borrow_mut().remove(&id);
|
||||
els.borrow_mut().swap_remove(&id);
|
||||
}
|
||||
});
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "leptos_router"
|
||||
version = "0.6.0"
|
||||
version = "0.6.5"
|
||||
edition = "2021"
|
||||
authors = ["Greg Johnston"]
|
||||
license = "MIT"
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
edition = "2021"
|
||||
imports_granularity = "Crate"
|
||||
max_width = 80
|
||||
format_strings = true
|
||||
|
||||
@@ -9,7 +9,7 @@ description = "RPC for any web framework."
|
||||
readme = "../README.md"
|
||||
|
||||
[dependencies]
|
||||
server_fn_macro_default = "0.6.0"
|
||||
server_fn_macro_default = { workspace = true }
|
||||
# used for hashing paths in #[server] macro
|
||||
const_format = "0.2"
|
||||
xxhash-rust = { version = "0.8", features = ["const_xxh64"] }
|
||||
@@ -18,7 +18,7 @@ serde = { version = "1", features = ["derive"] }
|
||||
send_wrapper = { version = "0.6", features = ["futures"], optional = true }
|
||||
|
||||
# registration system
|
||||
inventory = {version="0.3",optional=true}
|
||||
inventory = { version = "0.3", optional = true }
|
||||
dashmap = "5"
|
||||
once_cell = "1"
|
||||
|
||||
@@ -72,11 +72,11 @@ reqwest = { version = "0.11", default-features = false, optional = true, feature
|
||||
url = "2"
|
||||
|
||||
[features]
|
||||
default = [ "json", "cbor"]
|
||||
default = ["json", "cbor"]
|
||||
form-redirects = []
|
||||
actix = ["ssr", "dep:actix-web", "dep:send_wrapper"]
|
||||
axum = [
|
||||
"ssr",
|
||||
"ssr",
|
||||
"dep:axum",
|
||||
"dep:hyper",
|
||||
"dep:http-body-util",
|
||||
@@ -109,4 +109,21 @@ all-features = true
|
||||
# disables some feature combos for testing in CI
|
||||
[package.metadata.cargo-all-features]
|
||||
denylist = ["rustls", "default-tls", "form-redirects"]
|
||||
skip_feature_sets = [["actix", "axum"], ["browser", "actix"], ["browser", "axum"], ["browser", "reqwest"]]
|
||||
skip_feature_sets = [
|
||||
[
|
||||
"actix",
|
||||
"axum",
|
||||
],
|
||||
[
|
||||
"browser",
|
||||
"actix",
|
||||
],
|
||||
[
|
||||
"browser",
|
||||
"axum",
|
||||
],
|
||||
[
|
||||
"browser",
|
||||
"reqwest",
|
||||
],
|
||||
]
|
||||
|
||||
@@ -175,7 +175,6 @@ impl<E> ViaError<E> for WrapError<E> {
|
||||
/// This means that other error types can easily be converted into it using the
|
||||
/// `?` operator.
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
|
||||
#[serde(tag = "type")]
|
||||
pub enum ServerFnError<E = NoCustomError> {
|
||||
/// A user-defined custom error type, which defaults to [`NoCustomError`].
|
||||
WrappedServerError(E),
|
||||
|
||||
@@ -5,6 +5,7 @@ use futures::{Stream, StreamExt};
|
||||
pub use gloo_net::http::Request;
|
||||
use js_sys::{Reflect, Uint8Array};
|
||||
use send_wrapper::SendWrapper;
|
||||
use std::ops::{Deref, DerefMut};
|
||||
use wasm_bindgen::JsValue;
|
||||
use wasm_streams::ReadableStream;
|
||||
use web_sys::{FormData, Headers, RequestInit, UrlSearchParams};
|
||||
@@ -19,6 +20,32 @@ impl From<Request> for BrowserRequest {
|
||||
}
|
||||
}
|
||||
|
||||
impl From<BrowserRequest> for Request {
|
||||
fn from(value: BrowserRequest) -> Self {
|
||||
value.0.take()
|
||||
}
|
||||
}
|
||||
|
||||
impl From<BrowserRequest> for web_sys::Request {
|
||||
fn from(value: BrowserRequest) -> Self {
|
||||
value.0.take().into()
|
||||
}
|
||||
}
|
||||
|
||||
impl Deref for BrowserRequest {
|
||||
type Target = Request;
|
||||
|
||||
fn deref(&self) -> &Self::Target {
|
||||
self.0.deref()
|
||||
}
|
||||
}
|
||||
|
||||
impl DerefMut for BrowserRequest {
|
||||
fn deref_mut(&mut self) -> &mut Self::Target {
|
||||
self.0.deref_mut()
|
||||
}
|
||||
}
|
||||
|
||||
/// The `FormData` type available in the browser.
|
||||
#[derive(Debug)]
|
||||
pub struct BrowserFormData(pub(crate) SendWrapper<FormData>);
|
||||
|
||||
Reference in New Issue
Block a user