ATProto Browser

ATProto Browser

Experimental browser for the Atmosphere

Post

MagicPIG: LSH Sampling for Efficient LLM Generation This repo is for exploring the possibility of GPU-CPU system powered by LSH. Three models are supported now: llama3-8b-chat-128k, llama3-70b-chat-128k, mistral-7b-chat-512k. github.com/Infini-AI-La...

Nov 27, 2024, 8:52 PM

{
  "text": "MagicPIG: LSH Sampling for Efficient LLM Generation\n\nThis repo is for exploring the possibility of GPU-CPU system powered by LSH. Three models are supported now: llama3-8b-chat-128k, llama3-70b-chat-128k, mistral-7b-chat-512k.\n\ngithub.com/Infini-AI-La...",
  "$type": "app.bsky.feed.post",
  "embed": {
    "$type": "app.bsky.embed.external",
    "external": {
      "uri": "https://github.com/Infini-AI-Lab/MagicPIG",
      "thumb": {
        "$type": "blob",
        "ref": {
          "$link": "bafkreigj2a6c6sdhtvlysrymwn2eabq6amsrgsfykiyxt2rwhoskcyznte"
        },
        "mimeType": "image/jpeg",
        "size": 245220
      },
      "title": "GitHub - Infini-AI-Lab/MagicPIG: MagicPIG: LSH Sampling for Efficient LLM Generation",
      "description": "MagicPIG: LSH Sampling for Efficient LLM Generation - Infini-AI-Lab/MagicPIG"
    }
  },
  "langs": [
    "en"
  ],
  "facets": [
    {
      "index": {
        "byteEnd": 254,
        "byteStart": 228
      },
      "features": [
        {
          "uri": "https://github.com/Infini-AI-Lab/MagicPIG",
          "$type": "app.bsky.richtext.facet#link"
        }
      ]
    }
  ],
  "createdAt": "2024-11-27T20:52:10.417Z"
}