<?xml version="1.0" encoding="UTF-8"?>
<rss version="2.0"
  xmlns:atom="http://www.w3.org/2005/Atom"
  xmlns:media="http://search.yahoo.com/mrss/"
  xmlns:dc="http://purl.org/dc/elements/1.1/">
  <channel>
    <title>AI Tool Discovery — How-To Guides</title>
    <link>https://www.aitooldiscovery.com/how-to</link>
    <atom:link href="https://www.aitooldiscovery.com/feed/how-to.xml" rel="self" type="application/rss+xml"/>
    <description>Step-by-step guides to run AI tools locally, build AI agents, automate workflows with n8n, and set up open-source LLMs. Practical setup guides with real commands.</description>
    <language>en-us</language>
    <managingEditor>hello@aitooldiscovery.com (AI Tool Discovery)</managingEditor>
    <webMaster>hello@aitooldiscovery.com (AI Tool Discovery)</webMaster>
    <lastBuildDate>Fri, 03 Apr 2026 19:46:23 GMT</lastBuildDate>
    <ttl>1440</ttl>
    
    <item>
      <title><![CDATA[How to Set Up a Self-Hosted Perplexity Alternative with Perplexica]]></title>
      <link>https://www.aitooldiscovery.com/how-to/self-hosted-perplexity-alternative</link>
      <guid isPermaLink="true">https://www.aitooldiscovery.com/how-to/self-hosted-perplexity-alternative</guid>
      <pubDate>Sat, 28 Mar 2026 00:00:00 GMT</pubDate>
      <description><![CDATA[Perplexica is an open-source AI search engine that works like Perplexity.ai: type a question, get a cited answer. The difference is that every part of the stack runs on your own machine. SearXNG handl...]]></description>
      <category><![CDATA[Local AI]]></category>
      <enclosure url="https://www.aitooldiscovery.com/images/how-to/self-hosted-perplexity-alternative/hero.png" type="image/png" length="0"/>
      <media:content url="https://www.aitooldiscovery.com/images/how-to/self-hosted-perplexity-alternative/hero.png" medium="image" width="1536" height="1024"/>
      <media:thumbnail url="https://www.aitooldiscovery.com/images/how-to/self-hosted-perplexity-alternative/hero.png" width="1536" height="1024"/>
    </item>
    <item>
      <title><![CDATA[Best Open-Source AI Coding Assistants in 2026: Compared and Ranked]]></title>
      <link>https://www.aitooldiscovery.com/how-to/best-open-source-ai-coding-assistants</link>
      <guid isPermaLink="true">https://www.aitooldiscovery.com/how-to/best-open-source-ai-coding-assistants</guid>
      <pubDate>Sat, 28 Mar 2026 00:00:00 GMT</pubDate>
      <description><![CDATA[GitHub Copilot costs $10 to $19 per month per developer. Continue.dev, Aider, Tabby, Cody, and OpenCode are open-source alternatives that match or exceed Copilot for specific workflows at zero tool co...]]></description>
      <category><![CDATA[Local AI]]></category>
      <enclosure url="https://www.aitooldiscovery.com/images/how-to/best-open-source-ai-coding-assistants/hero.png" type="image/png" length="0"/>
      <media:content url="https://www.aitooldiscovery.com/images/how-to/best-open-source-ai-coding-assistants/hero.png" medium="image" width="1328" height="706"/>
      <media:thumbnail url="https://www.aitooldiscovery.com/images/how-to/best-open-source-ai-coding-assistants/hero.png" width="1328" height="706"/>
    </item>
    <item>
      <title><![CDATA[Flowise vs Langflow: Which AI Builder Should You Use in 2026?]]></title>
      <link>https://www.aitooldiscovery.com/how-to/flowise-vs-langflow</link>
      <guid isPermaLink="true">https://www.aitooldiscovery.com/how-to/flowise-vs-langflow</guid>
      <pubDate>Tue, 24 Mar 2026 00:00:00 GMT</pubDate>
      <description><![CDATA[Flowise and Langflow are both open-source, visual drag-and-drop builders for LLM applications. Both let you assemble AI pipelines without writing orchestration code from scratch, and both run in Docke...]]></description>
      <category><![CDATA[Comparisons]]></category>
      <enclosure url="https://www.aitooldiscovery.com/images/how-to/flowise-vs-langflow/hero.jpg" type="image/png" length="0"/>
      <media:content url="https://www.aitooldiscovery.com/images/how-to/flowise-vs-langflow/hero.jpg" medium="image" width="1438" height="752"/>
      <media:thumbnail url="https://www.aitooldiscovery.com/images/how-to/flowise-vs-langflow/hero.jpg" width="1438" height="752"/>
    </item>
    <item>
      <title><![CDATA[n8n AI Workflow Examples: 5 Practical Automations to Build in 2026]]></title>
      <link>https://www.aitooldiscovery.com/how-to/n8n-ai-workflow-examples</link>
      <guid isPermaLink="true">https://www.aitooldiscovery.com/how-to/n8n-ai-workflow-examples</guid>
      <pubDate>Tue, 24 Mar 2026 00:00:00 GMT</pubDate>
      <description><![CDATA[n8n's AI Agent node, released in stable form in late 2024, turns n8n from a pure automation tool into a platform where LLMs can call tools, access memory, and execute multi-step reasoning inside your...]]></description>
      <category><![CDATA[Automation]]></category>
      <enclosure url="https://www.aitooldiscovery.com/images/how-to/n8n-ai-workflow-examples/hero.png" type="image/png" length="0"/>
      <media:content url="https://www.aitooldiscovery.com/images/how-to/n8n-ai-workflow-examples/hero.png" medium="image" width="1335" height="749"/>
      <media:thumbnail url="https://www.aitooldiscovery.com/images/how-to/n8n-ai-workflow-examples/hero.png" width="1335" height="749"/>
    </item>
    <item>
      <title><![CDATA[How to Set Up Claude MCP Servers: Complete Configuration Guide (2026)]]></title>
      <link>https://www.aitooldiscovery.com/how-to/claude-mcp-servers-setup</link>
      <guid isPermaLink="true">https://www.aitooldiscovery.com/how-to/claude-mcp-servers-setup</guid>
      <pubDate>Tue, 24 Mar 2026 00:00:00 GMT</pubDate>
      <description><![CDATA[Model Context Protocol (MCP) is an open standard introduced by Anthropic in November 2024 that lets Claude connect to external data sources and tools. Instead of copy-pasting content into Claude's con...]]></description>
      <category><![CDATA[AI Agents]]></category>
      <enclosure url="https://www.aitooldiscovery.com/images/how-to/claude-mcp-servers-setup/hero.jpg" type="image/png" length="0"/>
      <media:content url="https://www.aitooldiscovery.com/images/how-to/claude-mcp-servers-setup/hero.jpg" medium="image" width="1400" height="788"/>
      <media:thumbnail url="https://www.aitooldiscovery.com/images/how-to/claude-mcp-servers-setup/hero.jpg" width="1400" height="788"/>
    </item>
    <item>
      <title><![CDATA[How to Run OpenClaw with Ollama Local Models (2026 Guide)]]></title>
      <link>https://www.aitooldiscovery.com/how-to/openclaw-ollama-local-models</link>
      <guid isPermaLink="true">https://www.aitooldiscovery.com/how-to/openclaw-ollama-local-models</guid>
      <pubDate>Thu, 19 Mar 2026 00:00:00 GMT</pubDate>
      <description><![CDATA[OpenClaw is an autonomous AI agent framework that gained 113,000+ GitHub stars in five days during January 2026. It runs continuously on your machine, connects to messaging apps like WhatsApp and Tele...]]></description>
      <category><![CDATA[AI Agents]]></category>
      <enclosure url="https://www.aitooldiscovery.com/images/how-to/openclaw-ollama-local-models/hero.png" type="image/png" length="0"/>
      <media:content url="https://www.aitooldiscovery.com/images/how-to/openclaw-ollama-local-models/hero.png" medium="image" width="1536" height="1024"/>
      <media:thumbnail url="https://www.aitooldiscovery.com/images/how-to/openclaw-ollama-local-models/hero.png" width="1536" height="1024"/>
    </item>
    <item>
      <title><![CDATA[How to Run DeepSeek R1 Locally with Ollama (2026 Guide)]]></title>
      <link>https://www.aitooldiscovery.com/how-to/run-deepseek-r1-locally</link>
      <guid isPermaLink="true">https://www.aitooldiscovery.com/how-to/run-deepseek-r1-locally</guid>
      <pubDate>Thu, 19 Mar 2026 00:00:00 GMT</pubDate>
      <description><![CDATA[DeepSeek R1 is a reasoning model released by Chinese AI lab DeepSeek in January 2026. It uses chain-of-thought reasoning, producing a visible thinking process before its final answer. On math, coding,...]]></description>
      <category><![CDATA[Local AI]]></category>
      <enclosure url="https://www.aitooldiscovery.com/images/how-to/run-deepseek-r1-locally/hero.png" type="image/png" length="0"/>
      <media:content url="https://www.aitooldiscovery.com/images/how-to/run-deepseek-r1-locally/hero.png" medium="image" width="1536" height="1024"/>
      <media:thumbnail url="https://www.aitooldiscovery.com/images/how-to/run-deepseek-r1-locally/hero.png" width="1536" height="1024"/>
    </item>
    <item>
      <title><![CDATA[How to Build an AI Agent with n8n (2026 Guide)]]></title>
      <link>https://www.aitooldiscovery.com/how-to/build-ai-agent-n8n</link>
      <guid isPermaLink="true">https://www.aitooldiscovery.com/how-to/build-ai-agent-n8n</guid>
      <pubDate>Thu, 19 Mar 2026 00:00:00 GMT</pubDate>
      <description><![CDATA[n8n's AI Agent node, introduced in n8n 1.19.0 (August 2024), lets you build autonomous agents that reason over input, select tools, call external APIs, and loop until a task is complete. Unlike a simp...]]></description>
      <category><![CDATA[AI Agents]]></category>
      <enclosure url="https://www.aitooldiscovery.com/images/how-to/build-ai-agent-n8n/hero.png" type="image/png" length="0"/>
      <media:content url="https://www.aitooldiscovery.com/images/how-to/build-ai-agent-n8n/hero.png" medium="image" width="1536" height="1024"/>
      <media:thumbnail url="https://www.aitooldiscovery.com/images/how-to/build-ai-agent-n8n/hero.png" width="1536" height="1024"/>
    </item>
    <item>
      <title><![CDATA[Best Local LLM Models to Run in 2026 (Benchmarks + Use Cases)]]></title>
      <link>https://www.aitooldiscovery.com/how-to/best-local-llm-models</link>
      <guid isPermaLink="true">https://www.aitooldiscovery.com/how-to/best-local-llm-models</guid>
      <pubDate>Tue, 17 Mar 2026 00:00:00 GMT</pubDate>
      <description><![CDATA[Running an LLM locally means zero API costs, complete data privacy, and no rate limits. The practical question is which model to actually run. In 2026 there are more than 100 quantised models availabl...]]></description>
      <category><![CDATA[Local AI]]></category>
      <enclosure url="https://www.aitooldiscovery.com/images/how-to/best-local-llm-models/hero.png" type="image/png" length="0"/>
      <media:content url="https://www.aitooldiscovery.com/images/how-to/best-local-llm-models/hero.png" medium="image" width="1200" height="630"/>
      <media:thumbnail url="https://www.aitooldiscovery.com/images/how-to/best-local-llm-models/hero.png" width="1200" height="630"/>
    </item>
    <item>
      <title><![CDATA[How to Deploy n8n on a VPS: Production Setup with Nginx and SSL (2026)]]></title>
      <link>https://www.aitooldiscovery.com/how-to/how-to-deploy-n8n-on-vps</link>
      <guid isPermaLink="true">https://www.aitooldiscovery.com/how-to/how-to-deploy-n8n-on-vps</guid>
      <pubDate>Tue, 17 Mar 2026 00:00:00 GMT</pubDate>
      <description><![CDATA[Running n8n on your own VPS gives you a persistent automation server accessible from anywhere, with no monthly subscription fees beyond the server cost. Unlike the n8n cloud plan ($24/month), a self-h...]]></description>
      <category><![CDATA[Docker & VPS]]></category>
      <enclosure url="https://www.aitooldiscovery.com/images/how-to/how-to-deploy-n8n-on-vps/hero.png" type="image/png" length="0"/>
      <media:content url="https://www.aitooldiscovery.com/images/how-to/how-to-deploy-n8n-on-vps/hero.png" medium="image" width="1200" height="630"/>
      <media:thumbnail url="https://www.aitooldiscovery.com/images/how-to/how-to-deploy-n8n-on-vps/hero.png" width="1200" height="630"/>
    </item>
    <item>
      <title><![CDATA[Ollama vs LM Studio: Which Local LLM Tool Should You Use in 2026?]]></title>
      <link>https://www.aitooldiscovery.com/how-to/ollama-vs-lm-studio</link>
      <guid isPermaLink="true">https://www.aitooldiscovery.com/how-to/ollama-vs-lm-studio</guid>
      <pubDate>Tue, 17 Mar 2026 00:00:00 GMT</pubDate>
      <description><![CDATA[Ollama and LM Studio are the two most widely used tools for running large language models locally in 2026. They download the same underlying models and produce the same output quality — the difference...]]></description>
      <category><![CDATA[Comparisons]]></category>
      <enclosure url="https://www.aitooldiscovery.com/images/how-to/ollama-vs-lm-studio/hero.png" type="image/png" length="0"/>
      <media:content url="https://www.aitooldiscovery.com/images/how-to/ollama-vs-lm-studio/hero.png" medium="image" width="1200" height="630"/>
      <media:thumbnail url="https://www.aitooldiscovery.com/images/how-to/ollama-vs-lm-studio/hero.png" width="1200" height="630"/>
    </item>
    <item>
      <title><![CDATA[How to Install Flowise with Docker: AI Agent Builder Setup Guide]]></title>
      <link>https://www.aitooldiscovery.com/how-to/how-to-install-flowise-docker</link>
      <guid isPermaLink="true">https://www.aitooldiscovery.com/how-to/how-to-install-flowise-docker</guid>
      <pubDate>Tue, 17 Mar 2026 00:00:00 GMT</pubDate>
      <description><![CDATA[Flowise is a visual drag-and-drop builder for LLM applications, AI agents, and chatbots. Instead of writing orchestration code from scratch, you connect nodes on a canvas to build chains, retrieval-au...]]></description>
      <category><![CDATA[AI Agents]]></category>
      <enclosure url="https://www.aitooldiscovery.com/images/how-to/how-to-install-flowise-docker/hero.png" type="image/png" length="0"/>
      <media:content url="https://www.aitooldiscovery.com/images/how-to/how-to-install-flowise-docker/hero.png" medium="image" width="1200" height="630"/>
      <media:thumbnail url="https://www.aitooldiscovery.com/images/how-to/how-to-install-flowise-docker/hero.png" width="1200" height="630"/>
    </item>
    <item>
      <title><![CDATA[How to Use Ollama with Python: API Integration Tutorial (2026)]]></title>
      <link>https://www.aitooldiscovery.com/how-to/how-to-use-ollama-with-python</link>
      <guid isPermaLink="true">https://www.aitooldiscovery.com/how-to/how-to-use-ollama-with-python</guid>
      <pubDate>Tue, 17 Mar 2026 00:00:00 GMT</pubDate>
      <description><![CDATA[Ollama exposes a local REST API on port 11434 that any language can call. Python has two clean approaches: the official ollama library (a thin wrapper around the HTTP API) and direct requests or httpx...]]></description>
      <category><![CDATA[Local AI]]></category>
      <enclosure url="https://www.aitooldiscovery.com/images/how-to/how-to-use-ollama-with-python/hero.png" type="image/png" length="0"/>
      <media:content url="https://www.aitooldiscovery.com/images/how-to/how-to-use-ollama-with-python/hero.png" medium="image" width="1200" height="630"/>
      <media:thumbnail url="https://www.aitooldiscovery.com/images/how-to/how-to-use-ollama-with-python/hero.png" width="1200" height="630"/>
    </item>
    <item>
      <title><![CDATA[How to Run Stable Diffusion Locally: ComfyUI and AUTOMATIC1111 Guide]]></title>
      <link>https://www.aitooldiscovery.com/how-to/how-to-run-stable-diffusion-locally</link>
      <guid isPermaLink="true">https://www.aitooldiscovery.com/how-to/how-to-run-stable-diffusion-locally</guid>
      <pubDate>Tue, 17 Mar 2026 00:00:00 GMT</pubDate>
      <description><![CDATA[Stable Diffusion is an open-source image generation model you can run on your own GPU without sending prompts or images to an external server. Unlike Midjourney or DALL-E 3, local Stable Diffusion has...]]></description>
      <category><![CDATA[Local AI]]></category>
      <enclosure url="https://www.aitooldiscovery.com/images/how-to/how-to-run-stable-diffusion-locally/hero.png" type="image/png" length="0"/>
      <media:content url="https://www.aitooldiscovery.com/images/how-to/how-to-run-stable-diffusion-locally/hero.png" medium="image" width="1200" height="630"/>
      <media:thumbnail url="https://www.aitooldiscovery.com/images/how-to/how-to-run-stable-diffusion-locally/hero.png" width="1200" height="630"/>
    </item>
    <item>
      <title><![CDATA[How to Run Ollama Locally: Complete Setup Guide (2026)]]></title>
      <link>https://www.aitooldiscovery.com/how-to/run-ollama-locally</link>
      <guid isPermaLink="true">https://www.aitooldiscovery.com/how-to/run-ollama-locally</guid>
      <pubDate>Mon, 16 Mar 2026 00:00:00 GMT</pubDate>
      <description><![CDATA[Ollama lets you run large language models on your own hardware without sending data to external servers. The project reached 95,000+ GitHub stars in early 2026, making it the most widely adopted local...]]></description>
      <category><![CDATA[Local AI]]></category>
      <enclosure url="https://www.aitooldiscovery.com/images/how-to/run-ollama-locally/hero.png" type="image/png" length="0"/>
      <media:content url="https://www.aitooldiscovery.com/images/how-to/run-ollama-locally/hero.png" medium="image" width="1200" height="630"/>
      <media:thumbnail url="https://www.aitooldiscovery.com/images/how-to/run-ollama-locally/hero.png" width="1200" height="630"/>
    </item>
    <item>
      <title><![CDATA[How to Install n8n with Docker Compose (Self-Hosted Setup)]]></title>
      <link>https://www.aitooldiscovery.com/how-to/install-n8n-docker</link>
      <guid isPermaLink="true">https://www.aitooldiscovery.com/how-to/install-n8n-docker</guid>
      <pubDate>Mon, 16 Mar 2026 00:00:00 GMT</pubDate>
      <description><![CDATA[n8n is an open-source workflow automation platform with 400+ integrations. It lets you build automations between any API, database, or service using a visual node editor, without writing code for most...]]></description>
      <category><![CDATA[Automation]]></category>
      <enclosure url="https://www.aitooldiscovery.com/images/how-to/install-n8n-docker/hero.png" type="image/png" length="0"/>
      <media:content url="https://www.aitooldiscovery.com/images/how-to/install-n8n-docker/hero.png" medium="image" width="1200" height="630"/>
      <media:thumbnail url="https://www.aitooldiscovery.com/images/how-to/install-n8n-docker/hero.png" width="1200" height="630"/>
    </item>
    <item>
      <title><![CDATA[How to Set Up Open-WebUI with Ollama (Docker Guide)]]></title>
      <link>https://www.aitooldiscovery.com/how-to/setup-open-webui-ollama</link>
      <guid isPermaLink="true">https://www.aitooldiscovery.com/how-to/setup-open-webui-ollama</guid>
      <pubDate>Mon, 16 Mar 2026 00:00:00 GMT</pubDate>
      <description><![CDATA[Open-WebUI is a browser-based interface for Ollama that provides a ChatGPT-style chat experience for your local models. It adds features that the Ollama CLI lacks: a conversation history sidebar, mode...]]></description>
      <category><![CDATA[Local AI]]></category>
      <enclosure url="https://www.aitooldiscovery.com/images/how-to/setup-open-webui-ollama/hero.png" type="image/png" length="0"/>
      <media:content url="https://www.aitooldiscovery.com/images/how-to/setup-open-webui-ollama/hero.png" medium="image" width="1200" height="630"/>
      <media:thumbnail url="https://www.aitooldiscovery.com/images/how-to/setup-open-webui-ollama/hero.png" width="1200" height="630"/>
    </item>
  </channel>
</rss>