Compare commits
2 Commits
main
...
a33ef47a39
| Author | SHA1 | Date | |
|---|---|---|---|
|
a33ef47a39
|
|||
|
8c02b63dc7
|
@@ -12,7 +12,7 @@ import (
|
|||||||
type DistributedVM struct {
|
type DistributedVM struct {
|
||||||
nodeID string
|
nodeID string
|
||||||
cluster *ClusterManager
|
cluster *ClusterManager
|
||||||
localRuntime Runtime // Interface to avoid import cycles
|
localRuntime Runtime
|
||||||
sharding *ShardManager
|
sharding *ShardManager
|
||||||
discovery *NodeDiscovery
|
discovery *NodeDiscovery
|
||||||
natsConn *nats.Conn
|
natsConn *nats.Conn
|
||||||
@@ -20,17 +20,29 @@ type DistributedVM struct {
|
|||||||
cancel context.CancelFunc
|
cancel context.CancelFunc
|
||||||
}
|
}
|
||||||
|
|
||||||
// Runtime interface to avoid import cycles with main aether package
|
// Runtime defines the interface for a local runtime that executes actors.
|
||||||
|
// This interface decouples the cluster package from specific runtime implementations.
|
||||||
type Runtime interface {
|
type Runtime interface {
|
||||||
|
// Start initializes and starts the runtime
|
||||||
Start() error
|
Start() error
|
||||||
LoadModel(model interface{}) error
|
// LoadModel loads an EventStorming model into the runtime
|
||||||
SendMessage(message interface{}) error
|
LoadModel(model RuntimeModel) error
|
||||||
|
// SendMessage sends a message to an actor in the runtime
|
||||||
|
SendMessage(message RuntimeMessage) error
|
||||||
}
|
}
|
||||||
|
|
||||||
// DistributedVMRegistry implements VMRegistry using DistributedVM's local runtime and sharding
|
// DistributedVMRegistry implements VMRegistry using DistributedVM's local runtime and sharding.
|
||||||
|
// It provides the cluster manager with access to VM information without import cycles.
|
||||||
type DistributedVMRegistry struct {
|
type DistributedVMRegistry struct {
|
||||||
runtime interface{} // Runtime interface to avoid import cycles
|
vmProvider VMProvider
|
||||||
sharding *ShardManager
|
sharding *ShardManager
|
||||||
|
}
|
||||||
|
|
||||||
|
// VMProvider defines an interface for accessing VMs from a runtime.
|
||||||
|
// This is used by DistributedVMRegistry to get VM information.
|
||||||
|
type VMProvider interface {
|
||||||
|
// GetActiveVMs returns a map of actor IDs to their VirtualMachine instances
|
||||||
|
GetActiveVMs() map[string]VirtualMachine
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewDistributedVM creates a distributed VM runtime cluster node
|
// NewDistributedVM creates a distributed VM runtime cluster node
|
||||||
@@ -67,16 +79,19 @@ func NewDistributedVM(nodeID string, natsURLs []string, localRuntime Runtime) (*
|
|||||||
cancel: cancel,
|
cancel: cancel,
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create VM registry and connect it to cluster manager
|
|
||||||
vmRegistry := &DistributedVMRegistry{
|
|
||||||
runtime: localRuntime,
|
|
||||||
sharding: sharding,
|
|
||||||
}
|
|
||||||
cluster.SetVMRegistry(vmRegistry)
|
|
||||||
|
|
||||||
return dvm, nil
|
return dvm, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SetVMProvider sets the VM provider for the distributed VM registry.
|
||||||
|
// This should be called after the runtime is fully initialized.
|
||||||
|
func (dvm *DistributedVM) SetVMProvider(provider VMProvider) {
|
||||||
|
vmRegistry := &DistributedVMRegistry{
|
||||||
|
vmProvider: provider,
|
||||||
|
sharding: dvm.sharding,
|
||||||
|
}
|
||||||
|
dvm.cluster.SetVMRegistry(vmRegistry)
|
||||||
|
}
|
||||||
|
|
||||||
// Start begins the distributed VM cluster node
|
// Start begins the distributed VM cluster node
|
||||||
func (dvm *DistributedVM) Start() error {
|
func (dvm *DistributedVM) Start() error {
|
||||||
// Start local runtime
|
// Start local runtime
|
||||||
@@ -103,7 +118,7 @@ func (dvm *DistributedVM) Stop() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// LoadModel distributes EventStorming model across the cluster with VM templates
|
// LoadModel distributes EventStorming model across the cluster with VM templates
|
||||||
func (dvm *DistributedVM) LoadModel(model interface{}) error {
|
func (dvm *DistributedVM) LoadModel(model RuntimeModel) error {
|
||||||
// Load model locally first
|
// Load model locally first
|
||||||
if err := dvm.localRuntime.LoadModel(model); err != nil {
|
if err := dvm.localRuntime.LoadModel(model); err != nil {
|
||||||
return fmt.Errorf("failed to load model locally: %w", err)
|
return fmt.Errorf("failed to load model locally: %w", err)
|
||||||
@@ -121,7 +136,7 @@ func (dvm *DistributedVM) LoadModel(model interface{}) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// SendMessage routes messages across the distributed cluster
|
// SendMessage routes messages across the distributed cluster
|
||||||
func (dvm *DistributedVM) SendMessage(message interface{}) error {
|
func (dvm *DistributedVM) SendMessage(message RuntimeMessage) error {
|
||||||
// This is a simplified implementation
|
// This is a simplified implementation
|
||||||
// In practice, this would determine the target node based on sharding
|
// In practice, this would determine the target node based on sharding
|
||||||
// and route the message appropriately
|
// and route the message appropriately
|
||||||
@@ -162,15 +177,29 @@ func (dvm *DistributedVM) handleClusterMessage(msg *nats.Msg) {
|
|||||||
switch clusterMsg.Type {
|
switch clusterMsg.Type {
|
||||||
case "load_model":
|
case "load_model":
|
||||||
// Handle model loading from other nodes
|
// Handle model loading from other nodes
|
||||||
if model := clusterMsg.Payload; model != nil {
|
// Re-marshal and unmarshal to convert map[string]interface{} to concrete type
|
||||||
dvm.localRuntime.LoadModel(model)
|
payloadBytes, err := json.Marshal(clusterMsg.Payload)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
}
|
}
|
||||||
|
var model ModelPayload
|
||||||
|
if err := json.Unmarshal(payloadBytes, &model); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
dvm.localRuntime.LoadModel(&model)
|
||||||
|
|
||||||
case "route_message":
|
case "route_message":
|
||||||
// Handle message routing from other nodes
|
// Handle message routing from other nodes
|
||||||
if message := clusterMsg.Payload; message != nil {
|
// Re-marshal and unmarshal to convert map[string]interface{} to concrete type
|
||||||
dvm.localRuntime.SendMessage(message)
|
payloadBytes, err := json.Marshal(clusterMsg.Payload)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
}
|
}
|
||||||
|
var message MessagePayload
|
||||||
|
if err := json.Unmarshal(payloadBytes, &message); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
dvm.localRuntime.SendMessage(&message)
|
||||||
|
|
||||||
case "rebalance":
|
case "rebalance":
|
||||||
// Handle shard rebalancing requests
|
// Handle shard rebalancing requests
|
||||||
@@ -200,22 +229,23 @@ func (dvm *DistributedVM) GetClusterInfo() map[string]interface{} {
|
|||||||
nodes := dvm.cluster.GetNodes()
|
nodes := dvm.cluster.GetNodes()
|
||||||
|
|
||||||
return map[string]interface{}{
|
return map[string]interface{}{
|
||||||
"nodeId": dvm.nodeID,
|
"nodeId": dvm.nodeID,
|
||||||
"isLeader": dvm.cluster.IsLeader(),
|
"isLeader": dvm.cluster.IsLeader(),
|
||||||
"leader": dvm.cluster.GetLeader(),
|
"leader": dvm.cluster.GetLeader(),
|
||||||
"nodeCount": len(nodes),
|
"nodeCount": len(nodes),
|
||||||
"nodes": nodes,
|
"nodes": nodes,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetActiveVMs returns a map of active VMs (implementation depends on runtime)
|
// GetActiveVMs returns a map of active VMs from the VM provider
|
||||||
func (dvr *DistributedVMRegistry) GetActiveVMs() map[string]interface{} {
|
func (dvr *DistributedVMRegistry) GetActiveVMs() map[string]VirtualMachine {
|
||||||
// This would need to access the actual runtime's VM registry
|
if dvr.vmProvider == nil {
|
||||||
// For now, return empty map to avoid import cycles
|
return make(map[string]VirtualMachine)
|
||||||
return make(map[string]interface{})
|
}
|
||||||
|
return dvr.vmProvider.GetActiveVMs()
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetShard returns the shard number for the given actor ID
|
// GetShard returns the shard number for the given actor ID
|
||||||
func (dvr *DistributedVMRegistry) GetShard(actorID string) int {
|
func (dvr *DistributedVMRegistry) GetShard(actorID string) int {
|
||||||
return dvr.sharding.GetShard(actorID)
|
return dvr.sharding.GetShard(actorID)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -12,9 +12,12 @@ import (
|
|||||||
"github.com/nats-io/nats.go"
|
"github.com/nats-io/nats.go"
|
||||||
)
|
)
|
||||||
|
|
||||||
// VMRegistry provides access to local VM information for cluster operations
|
// VMRegistry provides access to local VM information for cluster operations.
|
||||||
|
// Implementations must provide thread-safe access to VM data.
|
||||||
type VMRegistry interface {
|
type VMRegistry interface {
|
||||||
GetActiveVMs() map[string]interface{} // VirtualMachine interface to avoid import cycles
|
// GetActiveVMs returns a map of actor IDs to their VirtualMachine instances
|
||||||
|
GetActiveVMs() map[string]VirtualMachine
|
||||||
|
// GetShard returns the shard number for a given actor ID
|
||||||
GetShard(actorID string) int
|
GetShard(actorID string) int
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -50,13 +53,13 @@ func NewClusterManager(nodeID string, natsConn *nats.Conn, ctx context.Context)
|
|||||||
// Create leadership election with callbacks
|
// Create leadership election with callbacks
|
||||||
callbacks := LeaderElectionCallbacks{
|
callbacks := LeaderElectionCallbacks{
|
||||||
OnBecameLeader: func() {
|
OnBecameLeader: func() {
|
||||||
cm.logger.Printf("👑 This node became the cluster leader - can initiate rebalancing")
|
cm.logger.Printf("This node became the cluster leader - can initiate rebalancing")
|
||||||
},
|
},
|
||||||
OnLostLeader: func() {
|
OnLostLeader: func() {
|
||||||
cm.logger.Printf("📉 This node lost cluster leadership")
|
cm.logger.Printf("This node lost cluster leadership")
|
||||||
},
|
},
|
||||||
OnNewLeader: func(leaderID string) {
|
OnNewLeader: func(leaderID string) {
|
||||||
cm.logger.Printf("🔄 Cluster leadership changed to: %s", leaderID)
|
cm.logger.Printf("Cluster leadership changed to: %s", leaderID)
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -71,7 +74,7 @@ func NewClusterManager(nodeID string, natsConn *nats.Conn, ctx context.Context)
|
|||||||
|
|
||||||
// Start begins cluster management operations
|
// Start begins cluster management operations
|
||||||
func (cm *ClusterManager) Start() {
|
func (cm *ClusterManager) Start() {
|
||||||
cm.logger.Printf("🚀 Starting cluster manager")
|
cm.logger.Printf("Starting cluster manager")
|
||||||
|
|
||||||
// Start leader election
|
// Start leader election
|
||||||
cm.election.Start()
|
cm.election.Start()
|
||||||
@@ -88,7 +91,7 @@ func (cm *ClusterManager) Start() {
|
|||||||
|
|
||||||
// Stop gracefully stops the cluster manager
|
// Stop gracefully stops the cluster manager
|
||||||
func (cm *ClusterManager) Stop() {
|
func (cm *ClusterManager) Stop() {
|
||||||
cm.logger.Printf("🛑 Stopping cluster manager")
|
cm.logger.Printf("Stopping cluster manager")
|
||||||
|
|
||||||
if cm.election != nil {
|
if cm.election != nil {
|
||||||
cm.election.Stop()
|
cm.election.Stop()
|
||||||
@@ -138,7 +141,7 @@ func (cm *ClusterManager) GetActorsInShard(shardID int) []string {
|
|||||||
func (cm *ClusterManager) handleClusterMessage(msg *nats.Msg) {
|
func (cm *ClusterManager) handleClusterMessage(msg *nats.Msg) {
|
||||||
var clusterMsg ClusterMessage
|
var clusterMsg ClusterMessage
|
||||||
if err := json.Unmarshal(msg.Data, &clusterMsg); err != nil {
|
if err := json.Unmarshal(msg.Data, &clusterMsg); err != nil {
|
||||||
cm.logger.Printf("⚠️ Invalid cluster message: %v", err)
|
cm.logger.Printf("Invalid cluster message: %v", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -152,7 +155,7 @@ func (cm *ClusterManager) handleClusterMessage(msg *nats.Msg) {
|
|||||||
cm.handleNodeUpdate(update)
|
cm.handleNodeUpdate(update)
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
cm.logger.Printf("⚠️ Unknown cluster message type: %s", clusterMsg.Type)
|
cm.logger.Printf("Unknown cluster message type: %s", clusterMsg.Type)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -165,12 +168,12 @@ func (cm *ClusterManager) handleNodeUpdate(update NodeUpdate) {
|
|||||||
case NodeJoined:
|
case NodeJoined:
|
||||||
cm.nodes[update.Node.ID] = update.Node
|
cm.nodes[update.Node.ID] = update.Node
|
||||||
cm.hashRing.AddNode(update.Node.ID)
|
cm.hashRing.AddNode(update.Node.ID)
|
||||||
cm.logger.Printf("➕ Node joined: %s", update.Node.ID)
|
cm.logger.Printf("Node joined: %s", update.Node.ID)
|
||||||
|
|
||||||
case NodeLeft:
|
case NodeLeft:
|
||||||
delete(cm.nodes, update.Node.ID)
|
delete(cm.nodes, update.Node.ID)
|
||||||
cm.hashRing.RemoveNode(update.Node.ID)
|
cm.hashRing.RemoveNode(update.Node.ID)
|
||||||
cm.logger.Printf("➖ Node left: %s", update.Node.ID)
|
cm.logger.Printf("Node left: %s", update.Node.ID)
|
||||||
|
|
||||||
case NodeUpdated:
|
case NodeUpdated:
|
||||||
if node, exists := cm.nodes[update.Node.ID]; exists {
|
if node, exists := cm.nodes[update.Node.ID]; exists {
|
||||||
@@ -188,7 +191,7 @@ func (cm *ClusterManager) handleNodeUpdate(update NodeUpdate) {
|
|||||||
for _, node := range cm.nodes {
|
for _, node := range cm.nodes {
|
||||||
if now.Sub(node.LastSeen) > 90*time.Second && node.Status != NodeStatusFailed {
|
if now.Sub(node.LastSeen) > 90*time.Second && node.Status != NodeStatusFailed {
|
||||||
node.Status = NodeStatusFailed
|
node.Status = NodeStatusFailed
|
||||||
cm.logger.Printf("❌ Node marked as failed: %s (last seen: %s)",
|
cm.logger.Printf("Node marked as failed: %s (last seen: %s)",
|
||||||
node.ID, node.LastSeen.Format(time.RFC3339))
|
node.ID, node.LastSeen.Format(time.RFC3339))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -212,7 +215,7 @@ func (cm *ClusterManager) handleNodeUpdate(update NodeUpdate) {
|
|||||||
|
|
||||||
// handleRebalanceRequest processes cluster rebalancing requests
|
// handleRebalanceRequest processes cluster rebalancing requests
|
||||||
func (cm *ClusterManager) handleRebalanceRequest(msg ClusterMessage) {
|
func (cm *ClusterManager) handleRebalanceRequest(msg ClusterMessage) {
|
||||||
cm.logger.Printf("🔄 Handling rebalance request from %s", msg.From)
|
cm.logger.Printf("Handling rebalance request from %s", msg.From)
|
||||||
|
|
||||||
// Implementation would handle the specific rebalancing logic
|
// Implementation would handle the specific rebalancing logic
|
||||||
// This is a simplified version
|
// This is a simplified version
|
||||||
@@ -220,7 +223,7 @@ func (cm *ClusterManager) handleRebalanceRequest(msg ClusterMessage) {
|
|||||||
|
|
||||||
// handleMigrationRequest processes actor migration requests
|
// handleMigrationRequest processes actor migration requests
|
||||||
func (cm *ClusterManager) handleMigrationRequest(msg ClusterMessage) {
|
func (cm *ClusterManager) handleMigrationRequest(msg ClusterMessage) {
|
||||||
cm.logger.Printf("🚚 Handling migration request from %s", msg.From)
|
cm.logger.Printf("Handling migration request from %s", msg.From)
|
||||||
|
|
||||||
// Implementation would handle the specific migration logic
|
// Implementation would handle the specific migration logic
|
||||||
// This is a simplified version
|
// This is a simplified version
|
||||||
@@ -232,7 +235,7 @@ func (cm *ClusterManager) triggerShardRebalancing(reason string) {
|
|||||||
return // Only leader can initiate rebalancing
|
return // Only leader can initiate rebalancing
|
||||||
}
|
}
|
||||||
|
|
||||||
cm.logger.Printf("⚖️ Triggering shard rebalancing: %s", reason)
|
cm.logger.Printf("Triggering shard rebalancing: %s", reason)
|
||||||
|
|
||||||
// Get active nodes
|
// Get active nodes
|
||||||
var activeNodes []*NodeInfo
|
var activeNodes []*NodeInfo
|
||||||
@@ -245,12 +248,12 @@ func (cm *ClusterManager) triggerShardRebalancing(reason string) {
|
|||||||
cm.mutex.RUnlock()
|
cm.mutex.RUnlock()
|
||||||
|
|
||||||
if len(activeNodes) == 0 {
|
if len(activeNodes) == 0 {
|
||||||
cm.logger.Printf("⚠️ No active nodes available for rebalancing")
|
cm.logger.Printf("No active nodes available for rebalancing")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// This would implement the actual rebalancing logic
|
// This would implement the actual rebalancing logic
|
||||||
cm.logger.Printf("🎯 Would rebalance across %d active nodes", len(activeNodes))
|
cm.logger.Printf("Would rebalance across %d active nodes", len(activeNodes))
|
||||||
}
|
}
|
||||||
|
|
||||||
// monitorNodes periodically checks node health and updates
|
// monitorNodes periodically checks node health and updates
|
||||||
@@ -279,7 +282,7 @@ func (cm *ClusterManager) checkNodeHealth() {
|
|||||||
for _, node := range cm.nodes {
|
for _, node := range cm.nodes {
|
||||||
if now.Sub(node.LastSeen) > 90*time.Second && node.Status == NodeStatusActive {
|
if now.Sub(node.LastSeen) > 90*time.Second && node.Status == NodeStatusActive {
|
||||||
node.Status = NodeStatusFailed
|
node.Status = NodeStatusFailed
|
||||||
cm.logger.Printf("💔 Node failed: %s", node.ID)
|
cm.logger.Printf("Node failed: %s", node.ID)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -328,4 +331,4 @@ func (cm *ClusterManager) GetShardMap() *ShardMap {
|
|||||||
Nodes: make(map[string]NodeInfo),
|
Nodes: make(map[string]NodeInfo),
|
||||||
UpdateTime: cm.shardMap.UpdateTime,
|
UpdateTime: cm.shardMap.UpdateTime,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -74,23 +74,23 @@ type ClusterMessage struct {
|
|||||||
|
|
||||||
// RebalanceRequest represents a request to rebalance shards
|
// RebalanceRequest represents a request to rebalance shards
|
||||||
type RebalanceRequest struct {
|
type RebalanceRequest struct {
|
||||||
RequestID string `json:"requestId"`
|
RequestID string `json:"requestId"`
|
||||||
FromNode string `json:"fromNode"`
|
FromNode string `json:"fromNode"`
|
||||||
ToNode string `json:"toNode"`
|
ToNode string `json:"toNode"`
|
||||||
ShardIDs []int `json:"shardIds"`
|
ShardIDs []int `json:"shardIds"`
|
||||||
Reason string `json:"reason"`
|
Reason string `json:"reason"`
|
||||||
Migrations []ActorMigration `json:"migrations"`
|
Migrations []ActorMigration `json:"migrations"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// ActorMigration represents the migration of an actor between nodes
|
// ActorMigration represents the migration of an actor between nodes
|
||||||
type ActorMigration struct {
|
type ActorMigration struct {
|
||||||
ActorID string `json:"actorId"`
|
ActorID string `json:"actorId"`
|
||||||
FromNode string `json:"fromNode"`
|
FromNode string `json:"fromNode"`
|
||||||
ToNode string `json:"toNode"`
|
ToNode string `json:"toNode"`
|
||||||
ShardID int `json:"shardId"`
|
ShardID int `json:"shardId"`
|
||||||
State map[string]interface{} `json:"state"`
|
State map[string]interface{} `json:"state"`
|
||||||
Version int64 `json:"version"`
|
Version int64 `json:"version"`
|
||||||
Status string `json:"status"` // "pending", "in_progress", "completed", "failed"
|
Status string `json:"status"` // "pending", "in_progress", "completed", "failed"
|
||||||
}
|
}
|
||||||
|
|
||||||
// LeaderElectionCallbacks defines callbacks for leadership changes
|
// LeaderElectionCallbacks defines callbacks for leadership changes
|
||||||
@@ -108,3 +108,68 @@ type LeadershipLease struct {
|
|||||||
StartedAt time.Time `json:"startedAt"`
|
StartedAt time.Time `json:"startedAt"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// VirtualMachine defines the interface for a virtual machine instance.
|
||||||
|
// This interface provides the minimal contract needed by the cluster package
|
||||||
|
// to interact with VMs without creating import cycles with the runtime package.
|
||||||
|
type VirtualMachine interface {
|
||||||
|
// GetID returns the unique identifier for this VM
|
||||||
|
GetID() string
|
||||||
|
// GetActorID returns the actor ID this VM represents
|
||||||
|
GetActorID() string
|
||||||
|
// GetState returns the current state of the VM
|
||||||
|
GetState() VMState
|
||||||
|
}
|
||||||
|
|
||||||
|
// VMState represents the state of a virtual machine
|
||||||
|
type VMState string
|
||||||
|
|
||||||
|
const (
|
||||||
|
VMStateIdle VMState = "idle"
|
||||||
|
VMStateRunning VMState = "running"
|
||||||
|
VMStatePaused VMState = "paused"
|
||||||
|
VMStateStopped VMState = "stopped"
|
||||||
|
)
|
||||||
|
|
||||||
|
// RuntimeModel defines the interface for an EventStorming model that can be loaded into a runtime.
|
||||||
|
// This decouples the cluster package from the specific eventstorming package.
|
||||||
|
type RuntimeModel interface {
|
||||||
|
// GetID returns the unique identifier for this model
|
||||||
|
GetID() string
|
||||||
|
// GetName returns the name of this model
|
||||||
|
GetName() string
|
||||||
|
}
|
||||||
|
|
||||||
|
// RuntimeMessage defines the interface for messages that can be sent through the runtime.
|
||||||
|
// This provides type safety for inter-actor communication without creating import cycles.
|
||||||
|
type RuntimeMessage interface {
|
||||||
|
// GetTargetActorID returns the ID of the actor this message is addressed to
|
||||||
|
GetTargetActorID() string
|
||||||
|
// GetType returns the message type identifier
|
||||||
|
GetType() string
|
||||||
|
}
|
||||||
|
|
||||||
|
// ModelPayload is a concrete type for JSON-unmarshaling RuntimeModel payloads.
|
||||||
|
// Use this when receiving model data over the network.
|
||||||
|
type ModelPayload struct {
|
||||||
|
ID string `json:"id"`
|
||||||
|
Name string `json:"name"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetID implements RuntimeModel
|
||||||
|
func (m *ModelPayload) GetID() string { return m.ID }
|
||||||
|
|
||||||
|
// GetName implements RuntimeModel
|
||||||
|
func (m *ModelPayload) GetName() string { return m.Name }
|
||||||
|
|
||||||
|
// MessagePayload is a concrete type for JSON-unmarshaling RuntimeMessage payloads.
|
||||||
|
// Use this when receiving message data over the network.
|
||||||
|
type MessagePayload struct {
|
||||||
|
TargetActorID string `json:"targetActorId"`
|
||||||
|
Type string `json:"type"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetTargetActorID implements RuntimeMessage
|
||||||
|
func (m *MessagePayload) GetTargetActorID() string { return m.TargetActorID }
|
||||||
|
|
||||||
|
// GetType implements RuntimeMessage
|
||||||
|
func (m *MessagePayload) GetType() string { return m.Type }
|
||||||
|
|||||||
Reference in New Issue
Block a user