summaryrefslogtreecommitdiffhomepage
path: root/spec/dispatch/adapter
diff options
context:
space:
mode:
Diffstat (limited to 'spec/dispatch/adapter')
-rw-r--r--spec/dispatch/adapter/copilot_rate_limiting_spec.rb245
-rw-r--r--spec/dispatch/adapter/copilot_spec.rb611
-rw-r--r--spec/dispatch/adapter/errors_spec.rb8
-rw-r--r--spec/dispatch/adapter/rate_limiter_spec.rb407
4 files changed, 976 insertions, 295 deletions
diff --git a/spec/dispatch/adapter/copilot_rate_limiting_spec.rb b/spec/dispatch/adapter/copilot_rate_limiting_spec.rb
new file mode 100644
index 0000000..abd21ee
--- /dev/null
+++ b/spec/dispatch/adapter/copilot_rate_limiting_spec.rb
@@ -0,0 +1,245 @@
+# frozen_string_literal: true
+
+require "webmock/rspec"
+require "fileutils"
+require "tmpdir"
+
+RSpec.describe Dispatch::Adapter::Copilot, "rate limiting" do
+ let(:copilot_token) { "cop_test_token_abc" }
+ let(:github_token) { "gho_test_github_token" }
+ let(:tmpdir) { Dir.mktmpdir("copilot_rate_limit_test") }
+ let(:token_path) { File.join(tmpdir, "copilot_github_token") }
+
+ let(:chat_response_body) do
+ JSON.generate({
+ "choices" => [{ "message" => { "content" => "ok" }, "finish_reason" => "stop" }],
+ "usage" => { "prompt_tokens" => 5, "completion_tokens" => 1 }
+ })
+ end
+
+ let(:messages) { [Dispatch::Adapter::Message.new(role: "user", content: "Hi")] }
+
+ before do
+ stub_request(:get, "https://api.github.com/copilot_internal/v2/token")
+ .with(headers: { "Authorization" => "token #{github_token}" })
+ .to_return(
+ status: 200,
+ body: JSON.generate({
+ "token" => copilot_token,
+ "expires_at" => (Time.now.to_i + 3600)
+ }),
+ headers: { "Content-Type" => "application/json" }
+ )
+
+ stub_request(:post, "https://api.githubcopilot.com/chat/completions")
+ .to_return(
+ status: 200,
+ body: chat_response_body,
+ headers: { "Content-Type" => "application/json" }
+ )
+ end
+
+ after { FileUtils.rm_rf(tmpdir) }
+
+ describe "constructor rate limit parameters" do
+ it "accepts default rate limit parameters" do
+ adapter = described_class.new(
+ model: "gpt-4.1",
+ github_token: github_token,
+ token_path: token_path
+ )
+ expect(adapter).to be_a(described_class)
+ end
+
+ it "accepts custom min_request_interval" do
+ adapter = described_class.new(
+ model: "gpt-4.1",
+ github_token: github_token,
+ token_path: token_path,
+ min_request_interval: 5.0
+ )
+ expect(adapter).to be_a(described_class)
+ end
+
+ it "accepts nil min_request_interval to disable cooldown" do
+ adapter = described_class.new(
+ model: "gpt-4.1",
+ github_token: github_token,
+ token_path: token_path,
+ min_request_interval: nil
+ )
+ expect(adapter).to be_a(described_class)
+ end
+
+ it "accepts rate_limit hash for sliding window" do
+ adapter = described_class.new(
+ model: "gpt-4.1",
+ github_token: github_token,
+ token_path: token_path,
+ rate_limit: { requests: 10, period: 60 }
+ )
+ expect(adapter).to be_a(described_class)
+ end
+
+ it "raises ArgumentError for invalid min_request_interval" do
+ expect do
+ described_class.new(
+ model: "gpt-4.1",
+ github_token: github_token,
+ token_path: token_path,
+ min_request_interval: -1
+ )
+ end.to raise_error(ArgumentError)
+ end
+
+ it "raises ArgumentError for invalid rate_limit hash" do
+ expect do
+ described_class.new(
+ model: "gpt-4.1",
+ github_token: github_token,
+ token_path: token_path,
+ rate_limit: { requests: 0, period: 60 }
+ )
+ end.to raise_error(ArgumentError)
+ end
+ end
+
+ describe "#chat with rate limiting" do
+ context "with default 3s cooldown" do
+ let(:adapter) do
+ described_class.new(
+ model: "gpt-4.1",
+ github_token: github_token,
+ token_path: token_path
+ )
+ end
+
+ it "does not sleep on the first request" do
+ rate_limiter = instance_double(Dispatch::Adapter::RateLimiter)
+ allow(Dispatch::Adapter::RateLimiter).to receive(:new).and_return(rate_limiter)
+ allow(rate_limiter).to receive(:wait!)
+
+ fresh_adapter = described_class.new(
+ model: "gpt-4.1",
+ github_token: github_token,
+ token_path: token_path
+ )
+ fresh_adapter.chat(messages)
+
+ expect(rate_limiter).to have_received(:wait!).once
+ end
+
+ it "calls wait! before every chat request" do
+ rate_limiter = instance_double(Dispatch::Adapter::RateLimiter)
+ allow(Dispatch::Adapter::RateLimiter).to receive(:new).and_return(rate_limiter)
+ allow(rate_limiter).to receive(:wait!)
+
+ fresh_adapter = described_class.new(
+ model: "gpt-4.1",
+ github_token: github_token,
+ token_path: token_path
+ )
+ fresh_adapter.chat(messages)
+ fresh_adapter.chat(messages)
+ fresh_adapter.chat(messages)
+
+ expect(rate_limiter).to have_received(:wait!).exactly(3).times
+ end
+ end
+
+ context "with rate limiting disabled" do
+ let(:adapter) do
+ described_class.new(
+ model: "gpt-4.1",
+ github_token: github_token,
+ token_path: token_path,
+ min_request_interval: nil,
+ rate_limit: nil
+ )
+ end
+
+ it "does not sleep between rapid requests" do
+ rate_limiter = instance_double(Dispatch::Adapter::RateLimiter)
+ allow(Dispatch::Adapter::RateLimiter).to receive(:new).and_return(rate_limiter)
+ allow(rate_limiter).to receive(:wait!)
+
+ fresh_adapter = described_class.new(
+ model: "gpt-4.1",
+ github_token: github_token,
+ token_path: token_path,
+ min_request_interval: nil,
+ rate_limit: nil
+ )
+ fresh_adapter.chat(messages)
+ fresh_adapter.chat(messages)
+
+ expect(rate_limiter).to have_received(:wait!).twice
+ end
+ end
+ end
+
+ describe "#chat streaming with rate limiting" do
+ it "calls wait! before a streaming request" do
+ sse_body = [
+ "data: #{JSON.generate({ "choices" => [{ "delta" => { "content" => "hi" }, "index" => 0 }] })}\n\n",
+ "data: #{JSON.generate({ "choices" => [{ "delta" => {}, "index" => 0, "finish_reason" => "stop" }],
+ "usage" => { "prompt_tokens" => 5, "completion_tokens" => 1 } })}\n\n",
+ "data: [DONE]\n\n"
+ ].join
+
+ stub_request(:post, "https://api.githubcopilot.com/chat/completions")
+ .to_return(status: 200, body: sse_body, headers: { "Content-Type" => "text/event-stream" })
+
+ rate_limiter = instance_double(Dispatch::Adapter::RateLimiter)
+ allow(Dispatch::Adapter::RateLimiter).to receive(:new).and_return(rate_limiter)
+ allow(rate_limiter).to receive(:wait!)
+
+ adapter = described_class.new(
+ model: "gpt-4.1",
+ github_token: github_token,
+ token_path: token_path
+ )
+ adapter.chat(messages, stream: true) { |_| }
+
+ expect(rate_limiter).to have_received(:wait!).once
+ end
+ end
+
+ describe "#list_models with rate limiting" do
+ it "calls wait! before list_models request" do
+ stub_request(:get, "https://api.githubcopilot.com/v1/models")
+ .to_return(
+ status: 200,
+ body: JSON.generate({ "data" => [{ "id" => "gpt-4.1", "object" => "model" }] }),
+ headers: { "Content-Type" => "application/json" }
+ )
+
+ rate_limiter = instance_double(Dispatch::Adapter::RateLimiter)
+ allow(Dispatch::Adapter::RateLimiter).to receive(:new).and_return(rate_limiter)
+ allow(rate_limiter).to receive(:wait!)
+
+ adapter = described_class.new(
+ model: "gpt-4.1",
+ github_token: github_token,
+ token_path: token_path
+ )
+ adapter.list_models
+
+ expect(rate_limiter).to have_received(:wait!).once
+ end
+ end
+
+ describe "rate limit file location" do
+ it "stores the rate limit file in the same directory as the token file" do
+ adapter = described_class.new(
+ model: "gpt-4.1",
+ github_token: github_token,
+ token_path: token_path
+ )
+ adapter.chat(messages)
+
+ rate_limit_path = File.join(tmpdir, "copilot_rate_limit")
+ expect(File.exist?(rate_limit_path)).to be(true)
+ end
+ end
+end
diff --git a/spec/dispatch/adapter/copilot_spec.rb b/spec/dispatch/adapter/copilot_spec.rb
index 13c37be..61766bf 100644
--- a/spec/dispatch/adapter/copilot_spec.rb
+++ b/spec/dispatch/adapter/copilot_spec.rb
@@ -21,9 +21,9 @@ RSpec.describe Dispatch::Adapter::Copilot do
.to_return(
status: 200,
body: JSON.generate({
- "token" => copilot_token,
- "expires_at" => (Time.now.to_i + 3600)
- }),
+ "token" => copilot_token,
+ "expires_at" => (Time.now.to_i + 3600)
+ }),
headers: { "Content-Type" => "application/json" }
)
end
@@ -36,7 +36,7 @@ RSpec.describe Dispatch::Adapter::Copilot do
describe "VERSION" do
it "is accessible" do
- expect(Dispatch::Adapter::Copilot::VERSION).to eq("0.1.0")
+ expect(Dispatch::Adapter::Copilot::VERSION).to eq("0.2.0")
end
end
@@ -70,15 +70,15 @@ RSpec.describe Dispatch::Adapter::Copilot do
.to_return(
status: 200,
body: JSON.generate({
- "id" => "chatcmpl-123",
- "model" => "gpt-4.1",
- "choices" => [{
- "index" => 0,
- "message" => { "role" => "assistant", "content" => "Hello there!" },
- "finish_reason" => "stop"
- }],
- "usage" => { "prompt_tokens" => 10, "completion_tokens" => 5 }
- }),
+ "id" => "chatcmpl-123",
+ "model" => "gpt-4.1",
+ "choices" => [{
+ "index" => 0,
+ "message" => { "role" => "assistant", "content" => "Hello there!" },
+ "finish_reason" => "stop"
+ }],
+ "usage" => { "prompt_tokens" => 10, "completion_tokens" => 5 }
+ }),
headers: { "Content-Type" => "application/json" }
)
end
@@ -103,26 +103,26 @@ RSpec.describe Dispatch::Adapter::Copilot do
.to_return(
status: 200,
body: JSON.generate({
- "id" => "chatcmpl-456",
- "model" => "gpt-4.1",
- "choices" => [{
- "index" => 0,
- "message" => {
- "role" => "assistant",
- "content" => nil,
- "tool_calls" => [{
- "id" => "call_abc",
- "type" => "function",
- "function" => {
- "name" => "get_weather",
- "arguments" => '{"city":"New York"}'
- }
- }]
- },
- "finish_reason" => "tool_calls"
- }],
- "usage" => { "prompt_tokens" => 15, "completion_tokens" => 10 }
- }),
+ "id" => "chatcmpl-456",
+ "model" => "gpt-4.1",
+ "choices" => [{
+ "index" => 0,
+ "message" => {
+ "role" => "assistant",
+ "content" => nil,
+ "tool_calls" => [{
+ "id" => "call_abc",
+ "type" => "function",
+ "function" => {
+ "name" => "get_weather",
+ "arguments" => '{"city":"New York"}'
+ }
+ }]
+ },
+ "finish_reason" => "tool_calls"
+ }],
+ "usage" => { "prompt_tokens" => 15, "completion_tokens" => 10 }
+ }),
headers: { "Content-Type" => "application/json" }
)
end
@@ -149,28 +149,28 @@ RSpec.describe Dispatch::Adapter::Copilot do
.to_return(
status: 200,
body: JSON.generate({
- "choices" => [{
- "index" => 0,
- "message" => {
- "role" => "assistant",
- "content" => nil,
- "tool_calls" => [
- {
- "id" => "call_1",
- "type" => "function",
- "function" => { "name" => "get_weather", "arguments" => '{"city":"NYC"}' }
- },
- {
- "id" => "call_2",
- "type" => "function",
- "function" => { "name" => "get_time", "arguments" => '{"timezone":"EST"}' }
- }
- ]
- },
- "finish_reason" => "tool_calls"
- }],
- "usage" => { "prompt_tokens" => 20, "completion_tokens" => 15 }
- }),
+ "choices" => [{
+ "index" => 0,
+ "message" => {
+ "role" => "assistant",
+ "content" => nil,
+ "tool_calls" => [
+ {
+ "id" => "call_1",
+ "type" => "function",
+ "function" => { "name" => "get_weather", "arguments" => '{"city":"NYC"}' }
+ },
+ {
+ "id" => "call_2",
+ "type" => "function",
+ "function" => { "name" => "get_time", "arguments" => '{"timezone":"EST"}' }
+ }
+ ]
+ },
+ "finish_reason" => "tool_calls"
+ }],
+ "usage" => { "prompt_tokens" => 20, "completion_tokens" => 15 }
+ }),
headers: { "Content-Type" => "application/json" }
)
end
@@ -194,26 +194,26 @@ RSpec.describe Dispatch::Adapter::Copilot do
.to_return(
status: 200,
body: JSON.generate({
- "id" => "chatcmpl-789",
- "model" => "gpt-4.1",
- "choices" => [{
- "index" => 0,
- "message" => {
- "role" => "assistant",
- "content" => "Let me check that for you.",
- "tool_calls" => [{
- "id" => "call_def",
- "type" => "function",
- "function" => {
- "name" => "search",
- "arguments" => '{"query":"Ruby gems"}'
- }
- }]
- },
- "finish_reason" => "tool_calls"
- }],
- "usage" => { "prompt_tokens" => 20, "completion_tokens" => 15 }
- }),
+ "id" => "chatcmpl-789",
+ "model" => "gpt-4.1",
+ "choices" => [{
+ "index" => 0,
+ "message" => {
+ "role" => "assistant",
+ "content" => "Let me check that for you.",
+ "tool_calls" => [{
+ "id" => "call_def",
+ "type" => "function",
+ "function" => {
+ "name" => "search",
+ "arguments" => '{"query":"Ruby gems"}'
+ }
+ }]
+ },
+ "finish_reason" => "tool_calls"
+ }],
+ "usage" => { "prompt_tokens" => 20, "completion_tokens" => 15 }
+ }),
headers: { "Content-Type" => "application/json" }
)
end
@@ -231,16 +231,16 @@ RSpec.describe Dispatch::Adapter::Copilot do
context "with system: parameter" do
it "prepends system message in the wire format" do
stub = stub_request(:post, "https://api.githubcopilot.com/chat/completions")
- .with { |req|
- body = JSON.parse(req.body)
- body["messages"].first == { "role" => "system", "content" => "You are helpful." }
- }
+ .with do |req|
+ body = JSON.parse(req.body)
+ body["messages"].first == { "role" => "system", "content" => "You are helpful." }
+ end
.to_return(
status: 200,
body: JSON.generate({
- "choices" => [{ "message" => { "content" => "OK" }, "finish_reason" => "stop" }],
- "usage" => { "prompt_tokens" => 5, "completion_tokens" => 1 }
- }),
+ "choices" => [{ "message" => { "content" => "OK" }, "finish_reason" => "stop" }],
+ "usage" => { "prompt_tokens" => 5, "completion_tokens" => 1 }
+ }),
headers: { "Content-Type" => "application/json" }
)
@@ -254,16 +254,16 @@ RSpec.describe Dispatch::Adapter::Copilot do
context "with max_tokens: per-call override" do
it "uses per-call max_tokens over constructor default" do
stub = stub_request(:post, "https://api.githubcopilot.com/chat/completions")
- .with { |req|
- body = JSON.parse(req.body)
- body["max_tokens"] == 100
- }
+ .with do |req|
+ body = JSON.parse(req.body)
+ body["max_tokens"] == 100
+ end
.to_return(
status: 200,
body: JSON.generate({
- "choices" => [{ "message" => { "content" => "short" }, "finish_reason" => "stop" }],
- "usage" => { "prompt_tokens" => 5, "completion_tokens" => 1 }
- }),
+ "choices" => [{ "message" => { "content" => "short" }, "finish_reason" => "stop" }],
+ "usage" => { "prompt_tokens" => 5, "completion_tokens" => 1 }
+ }),
headers: { "Content-Type" => "application/json" }
)
@@ -275,16 +275,16 @@ RSpec.describe Dispatch::Adapter::Copilot do
it "uses constructor default when max_tokens not specified" do
stub = stub_request(:post, "https://api.githubcopilot.com/chat/completions")
- .with { |req|
- body = JSON.parse(req.body)
- body["max_tokens"] == 4096
- }
+ .with do |req|
+ body = JSON.parse(req.body)
+ body["max_tokens"] == 4096
+ end
.to_return(
status: 200,
body: JSON.generate({
- "choices" => [{ "message" => { "content" => "ok" }, "finish_reason" => "stop" }],
- "usage" => { "prompt_tokens" => 5, "completion_tokens" => 1 }
- }),
+ "choices" => [{ "message" => { "content" => "ok" }, "finish_reason" => "stop" }],
+ "usage" => { "prompt_tokens" => 5, "completion_tokens" => 1 }
+ }),
headers: { "Content-Type" => "application/json" }
)
@@ -304,23 +304,23 @@ RSpec.describe Dispatch::Adapter::Copilot do
)
stub = stub_request(:post, "https://api.githubcopilot.com/chat/completions")
- .with { |req|
- body = JSON.parse(req.body)
- body["tools"] == [{
- "type" => "function",
- "function" => {
- "name" => "get_weather",
- "description" => "Get weather for a city",
- "parameters" => { "type" => "object", "properties" => { "city" => { "type" => "string" } } }
- }
- }]
- }
+ .with do |req|
+ body = JSON.parse(req.body)
+ body["tools"] == [{
+ "type" => "function",
+ "function" => {
+ "name" => "get_weather",
+ "description" => "Get weather for a city",
+ "parameters" => { "type" => "object", "properties" => { "city" => { "type" => "string" } } }
+ }
+ }]
+ end
.to_return(
status: 200,
body: JSON.generate({
- "choices" => [{ "message" => { "content" => "ok" }, "finish_reason" => "stop" }],
- "usage" => { "prompt_tokens" => 5, "completion_tokens" => 1 }
- }),
+ "choices" => [{ "message" => { "content" => "ok" }, "finish_reason" => "stop" }],
+ "usage" => { "prompt_tokens" => 5, "completion_tokens" => 1 }
+ }),
headers: { "Content-Type" => "application/json" }
)
@@ -338,23 +338,23 @@ RSpec.describe Dispatch::Adapter::Copilot do
}
stub = stub_request(:post, "https://api.githubcopilot.com/chat/completions")
- .with { |req|
- body = JSON.parse(req.body)
- body["tools"] == [{
- "type" => "function",
- "function" => {
- "name" => "get_weather",
- "description" => "Get weather for a city",
- "parameters" => { "type" => "object", "properties" => { "city" => { "type" => "string" } } }
- }
- }]
- }
+ .with do |req|
+ body = JSON.parse(req.body)
+ body["tools"] == [{
+ "type" => "function",
+ "function" => {
+ "name" => "get_weather",
+ "description" => "Get weather for a city",
+ "parameters" => { "type" => "object", "properties" => { "city" => { "type" => "string" } } }
+ }
+ }]
+ end
.to_return(
status: 200,
body: JSON.generate({
- "choices" => [{ "message" => { "content" => "ok" }, "finish_reason" => "stop" }],
- "usage" => { "prompt_tokens" => 5, "completion_tokens" => 1 }
- }),
+ "choices" => [{ "message" => { "content" => "ok" }, "finish_reason" => "stop" }],
+ "usage" => { "prompt_tokens" => 5, "completion_tokens" => 1 }
+ }),
headers: { "Content-Type" => "application/json" }
)
@@ -372,23 +372,23 @@ RSpec.describe Dispatch::Adapter::Copilot do
}
stub = stub_request(:post, "https://api.githubcopilot.com/chat/completions")
- .with { |req|
- body = JSON.parse(req.body)
- body["tools"] == [{
- "type" => "function",
- "function" => {
- "name" => "get_weather",
- "description" => "Get weather for a city",
- "parameters" => { "type" => "object", "properties" => { "city" => { "type" => "string" } } }
- }
- }]
- }
+ .with do |req|
+ body = JSON.parse(req.body)
+ body["tools"] == [{
+ "type" => "function",
+ "function" => {
+ "name" => "get_weather",
+ "description" => "Get weather for a city",
+ "parameters" => { "type" => "object", "properties" => { "city" => { "type" => "string" } } }
+ }
+ }]
+ end
.to_return(
status: 200,
body: JSON.generate({
- "choices" => [{ "message" => { "content" => "ok" }, "finish_reason" => "stop" }],
- "usage" => { "prompt_tokens" => 5, "completion_tokens" => 1 }
- }),
+ "choices" => [{ "message" => { "content" => "ok" }, "finish_reason" => "stop" }],
+ "usage" => { "prompt_tokens" => 5, "completion_tokens" => 1 }
+ }),
headers: { "Content-Type" => "application/json" }
)
@@ -411,18 +411,18 @@ RSpec.describe Dispatch::Adapter::Copilot do
}
stub = stub_request(:post, "https://api.githubcopilot.com/chat/completions")
- .with { |req|
- body = JSON.parse(req.body)
- body["tools"].size == 2 &&
- body["tools"][0]["function"]["name"] == "get_weather" &&
- body["tools"][1]["function"]["name"] == "get_time"
- }
+ .with do |req|
+ body = JSON.parse(req.body)
+ body["tools"].size == 2 &&
+ body["tools"][0]["function"]["name"] == "get_weather" &&
+ body["tools"][1]["function"]["name"] == "get_time"
+ end
.to_return(
status: 200,
body: JSON.generate({
- "choices" => [{ "message" => { "content" => "ok" }, "finish_reason" => "stop" }],
- "usage" => { "prompt_tokens" => 5, "completion_tokens" => 1 }
- }),
+ "choices" => [{ "message" => { "content" => "ok" }, "finish_reason" => "stop" }],
+ "usage" => { "prompt_tokens" => 5, "completion_tokens" => 1 }
+ }),
headers: { "Content-Type" => "application/json" }
)
@@ -434,16 +434,16 @@ RSpec.describe Dispatch::Adapter::Copilot do
it "does not include tools key when tools array is empty" do
stub = stub_request(:post, "https://api.githubcopilot.com/chat/completions")
- .with { |req|
- body = JSON.parse(req.body)
- !body.key?("tools")
- }
+ .with do |req|
+ body = JSON.parse(req.body)
+ !body.key?("tools")
+ end
.to_return(
status: 200,
body: JSON.generate({
- "choices" => [{ "message" => { "content" => "ok" }, "finish_reason" => "stop" }],
- "usage" => { "prompt_tokens" => 5, "completion_tokens" => 1 }
- }),
+ "choices" => [{ "message" => { "content" => "ok" }, "finish_reason" => "stop" }],
+ "usage" => { "prompt_tokens" => 5, "completion_tokens" => 1 }
+ }),
headers: { "Content-Type" => "application/json" }
)
@@ -470,26 +470,27 @@ RSpec.describe Dispatch::Adapter::Copilot do
]
stub = stub_request(:post, "https://api.githubcopilot.com/chat/completions")
- .with { |req|
- body = JSON.parse(req.body)
- msgs = body["messages"]
- # user message
- msgs[0]["role"] == "user" &&
- # assistant with tool_calls
- msgs[1]["role"] == "assistant" &&
- msgs[1]["tool_calls"].is_a?(Array) &&
- msgs[1]["tool_calls"][0]["id"] == "call_1" &&
- # tool result
- msgs[2]["role"] == "tool" &&
- msgs[2]["tool_call_id"] == "call_1" &&
- msgs[2]["content"] == "72F and sunny"
- }
+ .with do |req|
+ body = JSON.parse(req.body)
+ msgs = body["messages"]
+ # user message
+ msgs[0]["role"] == "user" &&
+ # assistant with tool_calls
+ msgs[1]["role"] == "assistant" &&
+ msgs[1]["tool_calls"].is_a?(Array) &&
+ msgs[1]["tool_calls"][0]["id"] == "call_1" &&
+ # tool result
+ msgs[2]["role"] == "tool" &&
+ msgs[2]["tool_call_id"] == "call_1" &&
+ msgs[2]["content"] == "72F and sunny"
+ end
.to_return(
status: 200,
body: JSON.generate({
- "choices" => [{ "message" => { "content" => "It's 72F and sunny in NYC!" }, "finish_reason" => "stop" }],
- "usage" => { "prompt_tokens" => 20, "completion_tokens" => 10 }
- }),
+ "choices" => [{ "message" => { "content" => "It's 72F and sunny in NYC!" },
+ "finish_reason" => "stop" }],
+ "usage" => { "prompt_tokens" => 20, "completion_tokens" => 10 }
+ }),
headers: { "Content-Type" => "application/json" }
)
@@ -516,17 +517,17 @@ RSpec.describe Dispatch::Adapter::Copilot do
messages = [Dispatch::Adapter::Message.new(role: "user", content: text_blocks)]
stub = stub_request(:post, "https://api.githubcopilot.com/chat/completions")
- .with { |req|
- body = JSON.parse(req.body)
- msgs = body["messages"]
- msgs[0]["content"] == "First paragraph.\nSecond paragraph."
- }
+ .with do |req|
+ body = JSON.parse(req.body)
+ msgs = body["messages"]
+ msgs[0]["content"] == "First paragraph.\nSecond paragraph."
+ end
.to_return(
status: 200,
body: JSON.generate({
- "choices" => [{ "message" => { "content" => "ok" }, "finish_reason" => "stop" }],
- "usage" => { "prompt_tokens" => 5, "completion_tokens" => 1 }
- }),
+ "choices" => [{ "message" => { "content" => "ok" }, "finish_reason" => "stop" }],
+ "usage" => { "prompt_tokens" => 5, "completion_tokens" => 1 }
+ }),
headers: { "Content-Type" => "application/json" }
)
@@ -555,18 +556,18 @@ RSpec.describe Dispatch::Adapter::Copilot do
]
stub = stub_request(:post, "https://api.githubcopilot.com/chat/completions")
- .with { |req|
- body = JSON.parse(req.body)
- msgs = body["messages"]
- tool_msg = msgs.find { |m| m["role"] == "tool" }
- tool_msg && tool_msg["content"] == "Result line 1\nResult line 2"
- }
+ .with do |req|
+ body = JSON.parse(req.body)
+ msgs = body["messages"]
+ tool_msg = msgs.find { |m| m["role"] == "tool" }
+ tool_msg && tool_msg["content"] == "Result line 1\nResult line 2"
+ end
.to_return(
status: 200,
body: JSON.generate({
- "choices" => [{ "message" => { "content" => "ok" }, "finish_reason" => "stop" }],
- "usage" => { "prompt_tokens" => 10, "completion_tokens" => 1 }
- }),
+ "choices" => [{ "message" => { "content" => "ok" }, "finish_reason" => "stop" }],
+ "usage" => { "prompt_tokens" => 10, "completion_tokens" => 1 }
+ }),
headers: { "Content-Type" => "application/json" }
)
@@ -591,18 +592,19 @@ RSpec.describe Dispatch::Adapter::Copilot do
]
stub = stub_request(:post, "https://api.githubcopilot.com/chat/completions")
- .with { |req|
- body = JSON.parse(req.body)
- msgs = body["messages"]
- tool_msg = msgs.find { |m| m["role"] == "tool" }
- tool_msg && tool_msg["content"] == "Something went wrong" && tool_msg["tool_call_id"] == "call_err"
- }
+ .with do |req|
+ body = JSON.parse(req.body)
+ msgs = body["messages"]
+ tool_msg = msgs.find { |m| m["role"] == "tool" }
+ tool_msg && tool_msg["content"] == "Something went wrong" && tool_msg["tool_call_id"] == "call_err"
+ end
.to_return(
status: 200,
body: JSON.generate({
- "choices" => [{ "message" => { "content" => "I see the error" }, "finish_reason" => "stop" }],
- "usage" => { "prompt_tokens" => 10, "completion_tokens" => 3 }
- }),
+ "choices" => [{ "message" => { "content" => "I see the error" },
+ "finish_reason" => "stop" }],
+ "usage" => { "prompt_tokens" => 10, "completion_tokens" => 3 }
+ }),
headers: { "Content-Type" => "application/json" }
)
@@ -617,12 +619,12 @@ RSpec.describe Dispatch::Adapter::Copilot do
.to_return(
status: 200,
body: JSON.generate({
- "choices" => [{
- "message" => { "content" => "truncated output..." },
- "finish_reason" => "length"
- }],
- "usage" => { "prompt_tokens" => 5, "completion_tokens" => 100 }
- }),
+ "choices" => [{
+ "message" => { "content" => "truncated output..." },
+ "finish_reason" => "length"
+ }],
+ "usage" => { "prompt_tokens" => 5, "completion_tokens" => 100 }
+ }),
headers: { "Content-Type" => "application/json" }
)
@@ -647,21 +649,21 @@ RSpec.describe Dispatch::Adapter::Copilot do
]
stub = stub_request(:post, "https://api.githubcopilot.com/chat/completions")
- .with { |req|
- body = JSON.parse(req.body)
- msgs = body["messages"]
- assistant = msgs.find { |m| m["role"] == "assistant" }
- assistant &&
- assistant["content"] == "Checking..." &&
- assistant["tool_calls"].is_a?(Array) &&
- assistant["tool_calls"][0]["id"] == "call_mixed"
- }
+ .with do |req|
+ body = JSON.parse(req.body)
+ msgs = body["messages"]
+ assistant = msgs.find { |m| m["role"] == "assistant" }
+ assistant &&
+ assistant["content"] == "Checking..." &&
+ assistant["tool_calls"].is_a?(Array) &&
+ assistant["tool_calls"][0]["id"] == "call_mixed"
+ end
.to_return(
status: 200,
body: JSON.generate({
- "choices" => [{ "message" => { "content" => "ok" }, "finish_reason" => "stop" }],
- "usage" => { "prompt_tokens" => 10, "completion_tokens" => 1 }
- }),
+ "choices" => [{ "message" => { "content" => "ok" }, "finish_reason" => "stop" }],
+ "usage" => { "prompt_tokens" => 10, "completion_tokens" => 1 }
+ }),
headers: { "Content-Type" => "application/json" }
)
@@ -678,17 +680,17 @@ RSpec.describe Dispatch::Adapter::Copilot do
]
stub = stub_request(:post, "https://api.githubcopilot.com/chat/completions")
- .with { |req|
- body = JSON.parse(req.body)
- msgs = body["messages"]
- msgs.size == 1 && msgs[0]["role"] == "user" && msgs[0]["content"].include?("First") && msgs[0]["content"].include?("Second")
- }
+ .with do |req|
+ body = JSON.parse(req.body)
+ msgs = body["messages"]
+ msgs.size == 1 && msgs[0]["role"] == "user" && msgs[0]["content"].include?("First") && msgs[0]["content"].include?("Second")
+ end
.to_return(
status: 200,
body: JSON.generate({
- "choices" => [{ "message" => { "content" => "ok" }, "finish_reason" => "stop" }],
- "usage" => { "prompt_tokens" => 5, "completion_tokens" => 1 }
- }),
+ "choices" => [{ "message" => { "content" => "ok" }, "finish_reason" => "stop" }],
+ "usage" => { "prompt_tokens" => 5, "completion_tokens" => 1 }
+ }),
headers: { "Content-Type" => "application/json" }
)
@@ -700,16 +702,17 @@ RSpec.describe Dispatch::Adapter::Copilot do
context "with thinking: parameter" do
it "sends reasoning_effort in the request body" do
stub = stub_request(:post, "https://api.githubcopilot.com/chat/completions")
- .with { |req|
- body = JSON.parse(req.body)
- body["reasoning_effort"] == "high"
- }
+ .with do |req|
+ body = JSON.parse(req.body)
+ body["reasoning_effort"] == "high"
+ end
.to_return(
status: 200,
body: JSON.generate({
- "choices" => [{ "message" => { "content" => "thought deeply" }, "finish_reason" => "stop" }],
- "usage" => { "prompt_tokens" => 5, "completion_tokens" => 3 }
- }),
+ "choices" => [{ "message" => { "content" => "thought deeply" },
+ "finish_reason" => "stop" }],
+ "usage" => { "prompt_tokens" => 5, "completion_tokens" => 3 }
+ }),
headers: { "Content-Type" => "application/json" }
)
@@ -728,16 +731,16 @@ RSpec.describe Dispatch::Adapter::Copilot do
)
stub = stub_request(:post, "https://api.githubcopilot.com/chat/completions")
- .with { |req|
- body = JSON.parse(req.body)
- body["reasoning_effort"] == "medium"
- }
+ .with do |req|
+ body = JSON.parse(req.body)
+ body["reasoning_effort"] == "medium"
+ end
.to_return(
status: 200,
body: JSON.generate({
- "choices" => [{ "message" => { "content" => "ok" }, "finish_reason" => "stop" }],
- "usage" => { "prompt_tokens" => 5, "completion_tokens" => 1 }
- }),
+ "choices" => [{ "message" => { "content" => "ok" }, "finish_reason" => "stop" }],
+ "usage" => { "prompt_tokens" => 5, "completion_tokens" => 1 }
+ }),
headers: { "Content-Type" => "application/json" }
)
@@ -756,16 +759,16 @@ RSpec.describe Dispatch::Adapter::Copilot do
)
stub = stub_request(:post, "https://api.githubcopilot.com/chat/completions")
- .with { |req|
- body = JSON.parse(req.body)
- body["reasoning_effort"] == "low"
- }
+ .with do |req|
+ body = JSON.parse(req.body)
+ body["reasoning_effort"] == "low"
+ end
.to_return(
status: 200,
body: JSON.generate({
- "choices" => [{ "message" => { "content" => "ok" }, "finish_reason" => "stop" }],
- "usage" => { "prompt_tokens" => 5, "completion_tokens" => 1 }
- }),
+ "choices" => [{ "message" => { "content" => "ok" }, "finish_reason" => "stop" }],
+ "usage" => { "prompt_tokens" => 5, "completion_tokens" => 1 }
+ }),
headers: { "Content-Type" => "application/json" }
)
@@ -777,16 +780,16 @@ RSpec.describe Dispatch::Adapter::Copilot do
it "does not send reasoning_effort when thinking is nil" do
stub = stub_request(:post, "https://api.githubcopilot.com/chat/completions")
- .with { |req|
- body = JSON.parse(req.body)
- !body.key?("reasoning_effort")
- }
+ .with do |req|
+ body = JSON.parse(req.body)
+ !body.key?("reasoning_effort")
+ end
.to_return(
status: 200,
body: JSON.generate({
- "choices" => [{ "message" => { "content" => "ok" }, "finish_reason" => "stop" }],
- "usage" => { "prompt_tokens" => 5, "completion_tokens" => 1 }
- }),
+ "choices" => [{ "message" => { "content" => "ok" }, "finish_reason" => "stop" }],
+ "usage" => { "prompt_tokens" => 5, "completion_tokens" => 1 }
+ }),
headers: { "Content-Type" => "application/json" }
)
@@ -797,16 +800,16 @@ RSpec.describe Dispatch::Adapter::Copilot do
end
it "raises ArgumentError for invalid thinking level" do
- expect {
+ expect do
described_class.new(model: "o3", github_token: github_token, thinking: "extreme")
- }.to raise_error(ArgumentError, /Invalid thinking level/)
+ end.to raise_error(ArgumentError, /Invalid thinking level/)
end
it "raises ArgumentError for invalid per-call thinking level" do
messages = [Dispatch::Adapter::Message.new(role: "user", content: "Hi")]
- expect {
+ expect do
adapter.chat(messages, thinking: "extreme")
- }.to raise_error(ArgumentError, /Invalid thinking level/)
+ end.to raise_error(ArgumentError, /Invalid thinking level/)
end
it "allows disabling constructor default with nil per-call" do
@@ -818,16 +821,16 @@ RSpec.describe Dispatch::Adapter::Copilot do
)
stub = stub_request(:post, "https://api.githubcopilot.com/chat/completions")
- .with { |req|
- body = JSON.parse(req.body)
- !body.key?("reasoning_effort")
- }
+ .with do |req|
+ body = JSON.parse(req.body)
+ !body.key?("reasoning_effort")
+ end
.to_return(
status: 200,
body: JSON.generate({
- "choices" => [{ "message" => { "content" => "ok" }, "finish_reason" => "stop" }],
- "usage" => { "prompt_tokens" => 5, "completion_tokens" => 1 }
- }),
+ "choices" => [{ "message" => { "content" => "ok" }, "finish_reason" => "stop" }],
+ "usage" => { "prompt_tokens" => 5, "completion_tokens" => 1 }
+ }),
headers: { "Content-Type" => "application/json" }
)
@@ -844,7 +847,8 @@ RSpec.describe Dispatch::Adapter::Copilot do
sse_body = [
"data: #{JSON.generate({ "choices" => [{ "delta" => { "content" => "Hello" }, "index" => 0 }] })}\n\n",
"data: #{JSON.generate({ "choices" => [{ "delta" => { "content" => " world" }, "index" => 0 }] })}\n\n",
- "data: #{JSON.generate({ "choices" => [{ "delta" => {}, "index" => 0, "finish_reason" => "stop" }], "usage" => { "prompt_tokens" => 5, "completion_tokens" => 2 } })}\n\n",
+ "data: #{JSON.generate({ "choices" => [{ "delta" => {}, "index" => 0, "finish_reason" => "stop" }],
+ "usage" => { "prompt_tokens" => 5, "completion_tokens" => 2 } })}\n\n",
"data: [DONE]\n\n"
].join
@@ -873,10 +877,20 @@ RSpec.describe Dispatch::Adapter::Copilot do
it "yields tool_use_start and tool_use_delta for tool call streams" do
sse_body = [
- "data: #{JSON.generate({ "choices" => [{ "delta" => { "tool_calls" => [{ "index" => 0, "id" => "call_1", "type" => "function", "function" => { "name" => "search", "arguments" => "" } }] }, "index" => 0 }] })}\n\n",
- "data: #{JSON.generate({ "choices" => [{ "delta" => { "tool_calls" => [{ "index" => 0, "function" => { "arguments" => "{\"q\":" } }] }, "index" => 0 }] })}\n\n",
- "data: #{JSON.generate({ "choices" => [{ "delta" => { "tool_calls" => [{ "index" => 0, "function" => { "arguments" => "\"test\"}" } }] }, "index" => 0 }] })}\n\n",
- "data: #{JSON.generate({ "choices" => [{ "delta" => {}, "index" => 0, "finish_reason" => "tool_calls" }] })}\n\n",
+ "data: #{JSON.generate({ "choices" => [{
+ "delta" => { "tool_calls" => [{ "index" => 0, "id" => "call_1", "type" => "function",
+ "function" => { "name" => "search", "arguments" => "" } }] }, "index" => 0
+ }] })}\n\n",
+ "data: #{JSON.generate({ "choices" => [{
+ "delta" => { "tool_calls" => [{ "index" => 0,
+ "function" => { "arguments" => "{\"q\":" } }] }, "index" => 0
+ }] })}\n\n",
+ "data: #{JSON.generate({ "choices" => [{
+ "delta" => { "tool_calls" => [{ "index" => 0,
+ "function" => { "arguments" => "\"test\"}" } }] }, "index" => 0
+ }] })}\n\n",
+ "data: #{JSON.generate({ "choices" => [{ "delta" => {}, "index" => 0,
+ "finish_reason" => "tool_calls" }] })}\n\n",
"data: [DONE]\n\n"
].join
@@ -909,7 +923,8 @@ RSpec.describe Dispatch::Adapter::Copilot do
it "captures usage from streaming response" do
sse_body = [
"data: #{JSON.generate({ "choices" => [{ "delta" => { "content" => "hi" }, "index" => 0 }] })}\n\n",
- "data: #{JSON.generate({ "choices" => [{ "delta" => {}, "index" => 0, "finish_reason" => "stop" }], "usage" => { "prompt_tokens" => 42, "completion_tokens" => 7 } })}\n\n",
+ "data: #{JSON.generate({ "choices" => [{ "delta" => {}, "index" => 0, "finish_reason" => "stop" }],
+ "usage" => { "prompt_tokens" => 42, "completion_tokens" => 7 } })}\n\n",
"data: [DONE]\n\n"
].join
@@ -929,11 +944,24 @@ RSpec.describe Dispatch::Adapter::Copilot do
it "handles multiple parallel tool calls in a stream" do
sse_body = [
- "data: #{JSON.generate({ "choices" => [{ "delta" => { "tool_calls" => [{ "index" => 0, "id" => "call_a", "type" => "function", "function" => { "name" => "tool_a", "arguments" => "" } }] }, "index" => 0 }] })}\n\n",
- "data: #{JSON.generate({ "choices" => [{ "delta" => { "tool_calls" => [{ "index" => 1, "id" => "call_b", "type" => "function", "function" => { "name" => "tool_b", "arguments" => "" } }] }, "index" => 0 }] })}\n\n",
- "data: #{JSON.generate({ "choices" => [{ "delta" => { "tool_calls" => [{ "index" => 0, "function" => { "arguments" => "{\"x\":1}" } }] }, "index" => 0 }] })}\n\n",
- "data: #{JSON.generate({ "choices" => [{ "delta" => { "tool_calls" => [{ "index" => 1, "function" => { "arguments" => "{\"y\":2}" } }] }, "index" => 0 }] })}\n\n",
- "data: #{JSON.generate({ "choices" => [{ "delta" => {}, "index" => 0, "finish_reason" => "tool_calls" }] })}\n\n",
+ "data: #{JSON.generate({ "choices" => [{
+ "delta" => { "tool_calls" => [{ "index" => 0, "id" => "call_a", "type" => "function",
+ "function" => { "name" => "tool_a", "arguments" => "" } }] }, "index" => 0
+ }] })}\n\n",
+ "data: #{JSON.generate({ "choices" => [{
+ "delta" => { "tool_calls" => [{ "index" => 1, "id" => "call_b", "type" => "function",
+ "function" => { "name" => "tool_b", "arguments" => "" } }] }, "index" => 0
+ }] })}\n\n",
+ "data: #{JSON.generate({ "choices" => [{
+ "delta" => { "tool_calls" => [{ "index" => 0,
+ "function" => { "arguments" => "{\"x\":1}" } }] }, "index" => 0
+ }] })}\n\n",
+ "data: #{JSON.generate({ "choices" => [{
+ "delta" => { "tool_calls" => [{ "index" => 1,
+ "function" => { "arguments" => "{\"y\":2}" } }] }, "index" => 0
+ }] })}\n\n",
+ "data: #{JSON.generate({ "choices" => [{ "delta" => {}, "index" => 0,
+ "finish_reason" => "tool_calls" }] })}\n\n",
"data: [DONE]\n\n"
].join
@@ -960,21 +988,22 @@ RSpec.describe Dispatch::Adapter::Copilot do
describe "authentication" do
it "reuses cached Copilot token for subsequent requests" do
token_stub = stub_request(:get, "https://api.github.com/copilot_internal/v2/token")
- .to_return(
- status: 200,
- body: JSON.generate({ "token" => copilot_token, "expires_at" => (Time.now.to_i + 3600) }),
- headers: { "Content-Type" => "application/json" }
- )
+ .to_return(
+ status: 200,
+ body: JSON.generate({ "token" => copilot_token, "expires_at" => (Time.now.to_i + 3600) }),
+ headers: { "Content-Type" => "application/json" }
+ )
chat_stub = stub_request(:post, "https://api.githubcopilot.com/chat/completions")
- .to_return(
- status: 200,
- body: JSON.generate({
- "choices" => [{ "message" => { "content" => "ok" }, "finish_reason" => "stop" }],
- "usage" => { "prompt_tokens" => 5, "completion_tokens" => 1 }
- }),
- headers: { "Content-Type" => "application/json" }
- )
+ .to_return(
+ status: 200,
+ body: JSON.generate({
+ "choices" => [{ "message" => { "content" => "ok" },
+ "finish_reason" => "stop" }],
+ "usage" => { "prompt_tokens" => 5, "completion_tokens" => 1 }
+ }),
+ headers: { "Content-Type" => "application/json" }
+ )
messages = [Dispatch::Adapter::Message.new(role: "user", content: "Hi")]
adapter.chat(messages)
@@ -1007,11 +1036,11 @@ RSpec.describe Dispatch::Adapter::Copilot do
.to_return(
status: 200,
body: JSON.generate({
- "data" => [
- { "id" => "gpt-4.1", "object" => "model" },
- { "id" => "gpt-4o", "object" => "model" }
- ]
- }),
+ "data" => [
+ { "id" => "gpt-4.1", "object" => "model" },
+ { "id" => "gpt-4o", "object" => "model" }
+ ]
+ }),
headers: { "Content-Type" => "application/json" }
)
diff --git a/spec/dispatch/adapter/errors_spec.rb b/spec/dispatch/adapter/errors_spec.rb
index 77c8389..6893ac8 100644
--- a/spec/dispatch/adapter/errors_spec.rb
+++ b/spec/dispatch/adapter/errors_spec.rb
@@ -19,9 +19,9 @@ RSpec.describe Dispatch::Adapter::Error do
end
it "can be rescued as StandardError" do
- expect {
+ expect do
raise described_class.new("test")
- }.to raise_error(StandardError)
+ end.to raise_error(StandardError)
end
end
@@ -44,9 +44,9 @@ RSpec.describe Dispatch::Adapter::RateLimitError do
end
it "is rescuable as Dispatch::Adapter::Error" do
- expect {
+ expect do
raise described_class.new("rate limited")
- }.to raise_error(Dispatch::Adapter::Error)
+ end.to raise_error(Dispatch::Adapter::Error)
end
end
diff --git a/spec/dispatch/adapter/rate_limiter_spec.rb b/spec/dispatch/adapter/rate_limiter_spec.rb
new file mode 100644
index 0000000..5fcf92f
--- /dev/null
+++ b/spec/dispatch/adapter/rate_limiter_spec.rb
@@ -0,0 +1,407 @@
+# frozen_string_literal: true
+
+require "fileutils"
+require "json"
+require "tempfile"
+
+RSpec.describe Dispatch::Adapter::RateLimiter do
+ let(:tmpdir) { Dir.mktmpdir("rate_limiter_test") }
+ let(:rate_limit_path) { File.join(tmpdir, "copilot_rate_limit") }
+
+ after { FileUtils.rm_rf(tmpdir) }
+
+ describe "#initialize" do
+ it "accepts valid min_request_interval and nil rate_limit" do
+ limiter = described_class.new(
+ rate_limit_path: rate_limit_path,
+ min_request_interval: 3.0,
+ rate_limit: nil
+ )
+ expect(limiter).to be_a(described_class)
+ end
+
+ it "accepts nil min_request_interval" do
+ limiter = described_class.new(
+ rate_limit_path: rate_limit_path,
+ min_request_interval: nil,
+ rate_limit: nil
+ )
+ expect(limiter).to be_a(described_class)
+ end
+
+ it "accepts zero min_request_interval" do
+ limiter = described_class.new(
+ rate_limit_path: rate_limit_path,
+ min_request_interval: 0,
+ rate_limit: nil
+ )
+ expect(limiter).to be_a(described_class)
+ end
+
+ it "accepts valid rate_limit hash" do
+ limiter = described_class.new(
+ rate_limit_path: rate_limit_path,
+ min_request_interval: nil,
+ rate_limit: { requests: 10, period: 60 }
+ )
+ expect(limiter).to be_a(described_class)
+ end
+
+ it "accepts both min_request_interval and rate_limit" do
+ limiter = described_class.new(
+ rate_limit_path: rate_limit_path,
+ min_request_interval: 2.0,
+ rate_limit: { requests: 5, period: 30 }
+ )
+ expect(limiter).to be_a(described_class)
+ end
+
+ it "raises ArgumentError for negative min_request_interval" do
+ expect do
+ described_class.new(
+ rate_limit_path: rate_limit_path,
+ min_request_interval: -1,
+ rate_limit: nil
+ )
+ end.to raise_error(ArgumentError, /min_request_interval/)
+ end
+
+ it "raises ArgumentError for non-numeric min_request_interval" do
+ expect do
+ described_class.new(
+ rate_limit_path: rate_limit_path,
+ min_request_interval: "fast",
+ rate_limit: nil
+ )
+ end.to raise_error(ArgumentError, /min_request_interval/)
+ end
+
+ it "raises ArgumentError when rate_limit is missing requests key" do
+ expect do
+ described_class.new(
+ rate_limit_path: rate_limit_path,
+ min_request_interval: nil,
+ rate_limit: { period: 60 }
+ )
+ end.to raise_error(ArgumentError, /requests/)
+ end
+
+ it "raises ArgumentError when rate_limit is missing period key" do
+ expect do
+ described_class.new(
+ rate_limit_path: rate_limit_path,
+ min_request_interval: nil,
+ rate_limit: { requests: 10 }
+ )
+ end.to raise_error(ArgumentError, /period/)
+ end
+
+ it "raises ArgumentError when rate_limit requests is zero" do
+ expect do
+ described_class.new(
+ rate_limit_path: rate_limit_path,
+ min_request_interval: nil,
+ rate_limit: { requests: 0, period: 60 }
+ )
+ end.to raise_error(ArgumentError, /requests/)
+ end
+
+ it "raises ArgumentError when rate_limit requests is negative" do
+ expect do
+ described_class.new(
+ rate_limit_path: rate_limit_path,
+ min_request_interval: nil,
+ rate_limit: { requests: -1, period: 60 }
+ )
+ end.to raise_error(ArgumentError, /requests/)
+ end
+
+ it "raises ArgumentError when rate_limit period is zero" do
+ expect do
+ described_class.new(
+ rate_limit_path: rate_limit_path,
+ min_request_interval: nil,
+ rate_limit: { requests: 10, period: 0 }
+ )
+ end.to raise_error(ArgumentError, /period/)
+ end
+
+ it "raises ArgumentError when rate_limit period is negative" do
+ expect do
+ described_class.new(
+ rate_limit_path: rate_limit_path,
+ min_request_interval: nil,
+ rate_limit: { requests: 10, period: -5 }
+ )
+ end.to raise_error(ArgumentError, /period/)
+ end
+
+ it "raises ArgumentError when rate_limit is not a Hash" do
+ expect do
+ described_class.new(
+ rate_limit_path: rate_limit_path,
+ min_request_interval: nil,
+ rate_limit: "10/60"
+ )
+ end.to raise_error(ArgumentError)
+ end
+ end
+
+ describe "#wait!" do
+ context "with both mechanisms disabled" do
+ let(:limiter) do
+ described_class.new(
+ rate_limit_path: rate_limit_path,
+ min_request_interval: nil,
+ rate_limit: nil
+ )
+ end
+
+ it "returns immediately without sleeping" do
+ expect(limiter).not_to receive(:sleep)
+ limiter.wait!
+ end
+
+ it "does not create a rate limit file" do
+ limiter.wait!
+ expect(File.exist?(rate_limit_path)).to be(false)
+ end
+ end
+
+ context "with per-request cooldown only" do
+ let(:limiter) do
+ described_class.new(
+ rate_limit_path: rate_limit_path,
+ min_request_interval: 1.0,
+ rate_limit: nil
+ )
+ end
+
+ it "does not sleep on the first request" do
+ expect(limiter).not_to receive(:sleep)
+ limiter.wait!
+ end
+
+ it "creates the rate limit file on first request" do
+ limiter.wait!
+ expect(File.exist?(rate_limit_path)).to be(true)
+ end
+
+ it "sets the rate limit file permissions to 0600" do
+ limiter.wait!
+ mode = File.stat(rate_limit_path).mode & 0o777
+ expect(mode).to eq(0o600)
+ end
+
+ it "records last_request_at in the state file" do
+ before = Time.now.to_f
+ limiter.wait!
+ after = Time.now.to_f
+
+ state = JSON.parse(File.read(rate_limit_path))
+ expect(state["last_request_at"]).to be_between(before, after)
+ end
+
+ it "sleeps for the remaining cooldown on a rapid second request" do
+ limiter.wait!
+
+ # Simulate that almost no time has passed
+ allow(limiter).to receive(:sleep) { |duration| expect(duration).to be > 0 }
+ limiter.wait!
+ end
+
+ it "does not sleep when enough time has elapsed between requests" do
+ limiter.wait!
+
+ # Write a past timestamp to simulate time passing
+ state = { "last_request_at" => Time.now.to_f - 2.0, "request_log" => [] }
+ File.write(rate_limit_path, JSON.generate(state))
+
+ expect(limiter).not_to receive(:sleep)
+ limiter.wait!
+ end
+ end
+
+ context "with sliding window only" do
+ let(:limiter) do
+ described_class.new(
+ rate_limit_path: rate_limit_path,
+ min_request_interval: nil,
+ rate_limit: { requests: 3, period: 10 }
+ )
+ end
+
+ it "allows requests up to the window limit without sleeping" do
+ expect(limiter).not_to receive(:sleep)
+ 3.times { limiter.wait! }
+ end
+
+ it "sleeps when the window limit is reached" do
+ now = Time.now.to_f
+ state = {
+ "last_request_at" => now,
+ "request_log" => [now - 2.0, now - 1.0, now]
+ }
+ File.write(rate_limit_path, JSON.generate(state))
+
+ allow(limiter).to receive(:sleep) { |duration| expect(duration).to be > 0 }
+ limiter.wait!
+ end
+
+ it "does not sleep when oldest entries have expired from the window" do
+ now = Time.now.to_f
+ state = {
+ "last_request_at" => now - 5.0,
+ "request_log" => [now - 15.0, now - 12.0, now - 5.0]
+ }
+ File.write(rate_limit_path, JSON.generate(state))
+
+ expect(limiter).not_to receive(:sleep)
+ limiter.wait!
+ end
+
+ it "prunes expired entries from the request_log on write" do
+ now = Time.now.to_f
+ state = {
+ "last_request_at" => now - 5.0,
+ "request_log" => [now - 20.0, now - 15.0, now - 5.0]
+ }
+ File.write(rate_limit_path, JSON.generate(state))
+
+ limiter.wait!
+
+ updated_state = JSON.parse(File.read(rate_limit_path))
+ # Old entries (20s and 15s ago) should be pruned (window is 10s)
+ # Only the 5s-ago entry and the new entry should remain
+ expect(updated_state["request_log"].size).to be <= 2
+ end
+ end
+
+ context "with both mechanisms enabled" do
+ let(:limiter) do
+ described_class.new(
+ rate_limit_path: rate_limit_path,
+ min_request_interval: 1.0,
+ rate_limit: { requests: 3, period: 10 }
+ )
+ end
+
+ it "uses the longer wait time when cooldown is the bottleneck" do
+ limiter.wait!
+
+ # Second request immediately — cooldown should be the bottleneck
+ # (only 1 of 3 window slots used, but cooldown not elapsed)
+ allow(limiter).to receive(:sleep) { |duration| expect(duration).to be > 0 }
+ limiter.wait!
+ end
+
+ it "uses the longer wait time when window limit is the bottleneck" do
+ now = Time.now.to_f
+ state = {
+ "last_request_at" => now - 2.0, # cooldown elapsed
+ "request_log" => [now - 3.0, now - 2.5, now - 2.0] # window full
+ }
+ File.write(rate_limit_path, JSON.generate(state))
+
+ allow(limiter).to receive(:sleep) { |duration| expect(duration).to be > 0 }
+ limiter.wait!
+ end
+ end
+
+ context "with a missing or corrupt state file" do
+ let(:limiter) do
+ described_class.new(
+ rate_limit_path: rate_limit_path,
+ min_request_interval: 1.0,
+ rate_limit: nil
+ )
+ end
+
+ it "treats a non-existent file as fresh state" do
+ expect(File.exist?(rate_limit_path)).to be(false)
+ expect(limiter).not_to receive(:sleep)
+ limiter.wait!
+ end
+
+ it "treats an empty file as fresh state" do
+ FileUtils.mkdir_p(File.dirname(rate_limit_path))
+ File.write(rate_limit_path, "")
+
+ expect(limiter).not_to receive(:sleep)
+ limiter.wait!
+ end
+
+ it "treats a corrupt JSON file as fresh state" do
+ FileUtils.mkdir_p(File.dirname(rate_limit_path))
+ File.write(rate_limit_path, "not valid json{{{")
+
+ expect(limiter).not_to receive(:sleep)
+ limiter.wait!
+ end
+
+ it "overwrites corrupt state with valid state after a request" do
+ FileUtils.mkdir_p(File.dirname(rate_limit_path))
+ File.write(rate_limit_path, "garbage")
+
+ limiter.wait!
+
+ state = JSON.parse(File.read(rate_limit_path))
+ expect(state).to have_key("last_request_at")
+ expect(state["last_request_at"]).to be_a(Float)
+ end
+ end
+
+ context "with a missing parent directory" do
+ let(:nested_path) { File.join(tmpdir, "sub", "dir", "copilot_rate_limit") }
+
+ let(:limiter) do
+ described_class.new(
+ rate_limit_path: nested_path,
+ min_request_interval: 1.0,
+ rate_limit: nil
+ )
+ end
+
+ it "creates parent directories" do
+ limiter.wait!
+ expect(File.exist?(nested_path)).to be(true)
+ end
+ end
+
+ context "cross-process coordination" do
+ let(:limiter) do
+ described_class.new(
+ rate_limit_path: rate_limit_path,
+ min_request_interval: 1.0,
+ rate_limit: nil
+ )
+ end
+
+ it "reads state written by another process" do
+ # Simulate another process having made a request just now
+ now = Time.now.to_f
+ state = { "last_request_at" => now, "request_log" => [now] }
+ FileUtils.mkdir_p(File.dirname(rate_limit_path))
+ File.write(rate_limit_path, JSON.generate(state))
+
+ # Our limiter should see this and wait
+ allow(limiter).to receive(:sleep) { |duration| expect(duration).to be > 0 }
+ limiter.wait!
+ end
+
+ it "writes state that another process can read" do
+ limiter.wait!
+
+ # Another RateLimiter instance (simulating another process) reads the file
+ other_limiter = described_class.new(
+ rate_limit_path: rate_limit_path,
+ min_request_interval: 1.0,
+ rate_limit: nil
+ )
+
+ allow(other_limiter).to receive(:sleep) { |duration| expect(duration).to be > 0 }
+ other_limiter.wait!
+ end
+ end
+ end
+end