feat: implement Phase 2 (Go Agent) and Phase 3 (React Frontend MVP)
Phase 2 - Go Agent Core: - gRPC client with exponential backoff reconnect logic - Command executor (PowerShell/sh cross-platform) - Proto stubs regenerated with module= option (correct output path) - gRPC upgraded to v1.79.3 (BidiStreamingClient support) Phase 3 - React Frontend MVP: - Vite + React 18 + TypeScript setup with Tailwind CSS v4 - TanStack Query for data fetching, API client + TypeScript types - Dashboard page: stats cards (agents/status/tickets) + sortable agents table - Agent detail page: CPU/RAM charts (Recharts), disk usage, shell command executor - Tickets page: CRUD with modals, filters, sortable table - Dark mode with CSS custom properties Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
This commit is contained in:
@@ -1,9 +1,148 @@
|
||||
package main
|
||||
|
||||
import "fmt"
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"os/signal"
|
||||
"runtime"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"nexusrmm.local/agent/internal/collector"
|
||||
"nexusrmm.local/agent/internal/config"
|
||||
"nexusrmm.local/agent/internal/connection"
|
||||
"nexusrmm.local/agent/internal/executor"
|
||||
pb "nexusrmm.local/agent/pkg/proto"
|
||||
)
|
||||
|
||||
var version = "dev"
|
||||
|
||||
func main() {
|
||||
fmt.Printf("NexusRMM Agent %s\n", version)
|
||||
log.Printf("NexusRMM Agent %s starting on %s/%s", version, runtime.GOOS, runtime.GOARCH)
|
||||
|
||||
cfg, err := config.Load("nexus-agent.yaml")
|
||||
if err != nil {
|
||||
log.Fatalf("Config load error: %v", err)
|
||||
}
|
||||
|
||||
client, err := connection.ConnectWithRetry(cfg.ServerAddress, 10)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to connect: %v", err)
|
||||
}
|
||||
defer client.Close()
|
||||
|
||||
if cfg.AgentID == "" {
|
||||
hostname, _ := os.Hostname()
|
||||
metrics, _ := collector.Collect()
|
||||
mac, ip := "", ""
|
||||
if len(metrics.Networks) > 0 {
|
||||
mac = metrics.Networks[0].MAC
|
||||
ip = metrics.Networks[0].IPAddress
|
||||
}
|
||||
|
||||
resp, err := client.Client.Enroll(context.Background(), &pb.EnrollRequest{
|
||||
Hostname: hostname,
|
||||
OsType: runtime.GOOS,
|
||||
OsVersion: fmt.Sprintf("%s/%s", runtime.GOOS, runtime.GOARCH),
|
||||
MacAddress: mac,
|
||||
IpAddress: ip,
|
||||
AgentVersion: version,
|
||||
})
|
||||
if err != nil {
|
||||
log.Fatalf("Enrollment failed: %v", err)
|
||||
}
|
||||
cfg.AgentID = resp.AgentId
|
||||
cfg.HeartbeatInterval = int(resp.HeartbeatInterval)
|
||||
if err := cfg.Save("nexus-agent.yaml"); err != nil {
|
||||
log.Printf("Warning: could not save config: %v", err)
|
||||
}
|
||||
log.Printf("Enrolled with ID: %s", cfg.AgentID)
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
sigCh := make(chan os.Signal, 1)
|
||||
signal.Notify(sigCh, syscall.SIGINT, syscall.SIGTERM)
|
||||
|
||||
ticker := time.NewTicker(time.Duration(cfg.HeartbeatInterval) * time.Second)
|
||||
defer ticker.Stop()
|
||||
|
||||
log.Printf("Agent running. Heartbeat every %ds", cfg.HeartbeatInterval)
|
||||
doHeartbeat(ctx, client, cfg)
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
doHeartbeat(ctx, client, cfg)
|
||||
case <-sigCh:
|
||||
log.Println("Shutting down...")
|
||||
return
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func doHeartbeat(ctx context.Context, client *connection.GrpcClient, cfg *config.Config) {
|
||||
metrics, err := collector.Collect()
|
||||
if err != nil {
|
||||
log.Printf("Metric collection error: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
req := &pb.HeartbeatRequest{
|
||||
AgentId: cfg.AgentID,
|
||||
Metrics: &pb.SystemMetrics{
|
||||
CpuUsagePercent: metrics.CPUPercent,
|
||||
MemoryUsagePercent: metrics.MemoryPercent,
|
||||
MemoryTotalBytes: int64(metrics.MemoryTotal),
|
||||
MemoryAvailableBytes: int64(metrics.MemoryAvailable),
|
||||
UptimeSeconds: metrics.UptimeSeconds,
|
||||
},
|
||||
}
|
||||
for _, d := range metrics.Disks {
|
||||
req.Metrics.Disks = append(req.Metrics.Disks, &pb.DiskInfo{
|
||||
MountPoint: d.MountPoint,
|
||||
TotalBytes: int64(d.Total),
|
||||
FreeBytes: int64(d.Free),
|
||||
Filesystem: d.Filesystem,
|
||||
})
|
||||
}
|
||||
|
||||
resp, err := client.Client.Heartbeat(ctx, req)
|
||||
if err != nil {
|
||||
log.Printf("Heartbeat error: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
for _, cmd := range resp.PendingCommands {
|
||||
log.Printf("Executing command %s (type: %v)", cmd.CommandId, cmd.Type)
|
||||
go executeCommand(ctx, client, cfg.AgentID, cmd)
|
||||
}
|
||||
}
|
||||
|
||||
func executeCommand(ctx context.Context, client *connection.GrpcClient, agentID string, cmd *pb.AgentCommand) {
|
||||
var result *executor.Result
|
||||
switch cmd.Type {
|
||||
case pb.CommandType_COMMAND_TYPE_SHELL:
|
||||
result = executor.Execute(ctx, cmd.Payload, 300)
|
||||
default:
|
||||
result = &executor.Result{ExitCode: -1, Stderr: fmt.Sprintf("unknown command type: %v", cmd.Type)}
|
||||
}
|
||||
|
||||
if err := func() error {
|
||||
_, err := client.Client.ReportCommandResult(ctx, &pb.CommandResult{
|
||||
AgentId: agentID,
|
||||
CommandId: cmd.CommandId,
|
||||
ExitCode: int32(result.ExitCode),
|
||||
Stdout: result.Stdout,
|
||||
Stderr: result.Stderr,
|
||||
Success: result.Success,
|
||||
})
|
||||
return err
|
||||
}(); err != nil {
|
||||
log.Printf("Failed to report result for %s: %v", cmd.CommandId, err)
|
||||
}
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user