-
Notifications
You must be signed in to change notification settings - Fork 82
add device data in resourceclaim #128
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: main
Are you sure you want to change the base?
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -17,12 +17,17 @@ | |
| package main | ||
|
|
||
| import ( | ||
| "context" | ||
| "encoding/json" | ||
| "fmt" | ||
| "slices" | ||
| "sync" | ||
|
|
||
| resourceapi "k8s.io/api/resource/v1" | ||
| metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" | ||
| "k8s.io/apimachinery/pkg/runtime" | ||
| resourceapply "k8s.io/client-go/applyconfigurations/resource/v1" | ||
| "k8s.io/klog/v2" | ||
| drapbv1 "k8s.io/kubelet/pkg/apis/dra/v1beta1" | ||
| "k8s.io/kubernetes/pkg/kubelet/checkpointmanager" | ||
|
|
||
|
|
@@ -61,6 +66,7 @@ type DeviceState struct { | |
| cdi *CDIHandler | ||
| allocatable AllocatableDevices | ||
| checkpointManager checkpointmanager.CheckpointManager | ||
| config *Config | ||
| } | ||
|
|
||
| func NewDeviceState(config *Config) (*DeviceState, error) { | ||
|
|
@@ -88,6 +94,7 @@ func NewDeviceState(config *Config) (*DeviceState, error) { | |
| cdi: cdi, | ||
| allocatable: allocatable, | ||
| checkpointManager: checkpointManager, | ||
| config: config, | ||
| } | ||
|
|
||
| checkpoints, err := state.checkpointManager.ListCheckpoints() | ||
|
|
@@ -109,7 +116,7 @@ func NewDeviceState(config *Config) (*DeviceState, error) { | |
| return state, nil | ||
| } | ||
|
|
||
| func (s *DeviceState) Prepare(claim *resourceapi.ResourceClaim) ([]*drapbv1.Device, error) { | ||
| func (s *DeviceState) Prepare(ctx context.Context, claim *resourceapi.ResourceClaim) ([]*drapbv1.Device, error) { | ||
| s.Lock() | ||
| defer s.Unlock() | ||
|
|
||
|
|
@@ -125,7 +132,7 @@ func (s *DeviceState) Prepare(claim *resourceapi.ResourceClaim) ([]*drapbv1.Devi | |
| return preparedClaims[claimUID].GetDevices(), nil | ||
| } | ||
|
|
||
| preparedDevices, err := s.prepareDevices(claim) | ||
| preparedDevices, err := s.prepareDevices(ctx, claim) | ||
| if err != nil { | ||
| return nil, fmt.Errorf("prepare failed: %v", err) | ||
| } | ||
|
|
@@ -173,7 +180,7 @@ func (s *DeviceState) Unprepare(claimUID string) error { | |
| return nil | ||
| } | ||
|
|
||
| func (s *DeviceState) prepareDevices(claim *resourceapi.ResourceClaim) (PreparedDevices, error) { | ||
| func (s *DeviceState) prepareDevices(ctx context.Context, claim *resourceapi.ResourceClaim) (PreparedDevices, error) { | ||
| if claim.Status.Allocation == nil { | ||
| return nil, fmt.Errorf("claim not yet allocated") | ||
| } | ||
|
|
@@ -196,13 +203,20 @@ func (s *DeviceState) prepareDevices(claim *resourceapi.ResourceClaim) (Prepared | |
| Config: configapi.DefaultGpuConfig(), | ||
| }) | ||
|
|
||
| // build device status | ||
| var devicesStatus []*resourceapply.AllocatedDeviceStatusApplyConfiguration | ||
|
|
||
| // Look through the configs and figure out which one will be applied to | ||
| // each device allocation result based on their order of precedence. | ||
| configResultsMap := make(map[runtime.Object][]*resourceapi.DeviceRequestAllocationResult) | ||
| for _, result := range claim.Status.Allocation.Devices.Results { | ||
| if _, exists := s.allocatable[result.Device]; !exists { | ||
| return nil, fmt.Errorf("requested GPU is not allocatable: %v", result.Device) | ||
| } | ||
|
|
||
| deviceStatus := s.buildDeviceStatus(result) | ||
| devicesStatus = append(devicesStatus, deviceStatus) | ||
|
|
||
| for _, c := range slices.Backward(configs) { | ||
| if len(c.Requests) == 0 || slices.Contains(c.Requests, result.Request) { | ||
| configResultsMap[c.Config] = append(configResultsMap[c.Config], &result) | ||
|
|
@@ -211,6 +225,11 @@ func (s *DeviceState) prepareDevices(claim *resourceapi.ResourceClaim) (Prepared | |
| } | ||
| } | ||
|
|
||
| klog.Infof("Adding device attribute to claim %s/%s", claim.Namespace, claim.Name) | ||
| if err := s.applyDeviceStatus(ctx, claim.Namespace, claim.Name, devicesStatus...); err != nil { | ||
| klog.Warningf("Failed to update device attributes for claim %s/%s: %v", claim.Namespace, claim.Name, err) | ||
| } | ||
|
|
||
| // Normalize, validate, and apply all configs associated with devices that | ||
| // need to be prepared. Track container edits generated from applying the | ||
| // config to the set of device allocation results. | ||
|
|
@@ -380,3 +399,52 @@ func GetOpaqueDeviceConfigs( | |
|
|
||
| return resultConfigs, nil | ||
| } | ||
|
|
||
| func (s *DeviceState) buildDeviceStatus(res resourceapi.DeviceRequestAllocationResult) *resourceapply.AllocatedDeviceStatusApplyConfiguration { | ||
| dn := res.Device | ||
| deviceInfo := make(map[string]resourceapi.DeviceAttribute) | ||
|
|
||
| if d, ok := s.allocatable[dn]; ok { | ||
| if uuid, ok := d.Attributes["uuid"]; ok { | ||
| deviceInfo["uuid"] = uuid | ||
| } | ||
| if model, ok := d.Attributes["model"]; ok { | ||
| deviceInfo["model"] = model | ||
| } | ||
| if driverVersion, ok := d.Attributes["driverVersion"]; ok { | ||
| deviceInfo["driverVersion"] = driverVersion | ||
| } | ||
| } | ||
|
|
||
| jsonBytes, err := json.Marshal(deviceInfo) | ||
| if err != nil { | ||
| klog.Errorf("Failed to marshal device data: %v", err) | ||
| jsonBytes = []byte("{}") | ||
| } | ||
| data := runtime.RawExtension{ | ||
| Raw: jsonBytes, | ||
| } | ||
|
|
||
| return resourceapply.AllocatedDeviceStatus(). | ||
| WithDevice(dn). | ||
| WithDriver(res.Driver). | ||
| WithPool(res.Pool). | ||
| // WithData records per-allocation metadata used for monitoring and debugging: | ||
| // - Pod→GPU mapping: makes it easier to see which GPU a given pod is using, | ||
| // which is not readily available elsewhere. | ||
| // - Device attributes (e.g. UUID, model, driverVersion): remain available | ||
| // even if the device is later removed from a ResourceSlice (for example, | ||
| // because it becomes unhealthy), so past allocations can still be | ||
| // correlated with later health or scheduling issues. | ||
| WithData(data) | ||
| } | ||
|
|
||
| func (s *DeviceState) applyDeviceStatus(ctx context.Context, ns, name string, devices ...*resourceapply.AllocatedDeviceStatusApplyConfiguration) error { | ||
| claim := resourceapply.ResourceClaim(name, ns). | ||
| WithStatus(resourceapply.ResourceClaimStatus().WithDevices(devices...)) | ||
|
|
||
| opts := metav1.ApplyOptions{FieldManager: consts.DriverName, Force: true} | ||
|
|
||
| _, err := s.config.coreclient.ResourceV1().ResourceClaims(ns).ApplyStatus(ctx, claim, opts) | ||
|
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Is there some lightweight verification we can add to the e2e tests? At least something to verify that something like the condition or one of the attributes is set on even one of the examples would make sure this doesn't totally break later.
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. sure. |
||
| return err | ||
| } | ||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
For my own understanding, who or what generally consumes this data?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
To me, this seems useful for monitoring and debugging when admin want to know pod to gpu mapping, which GPU is being used by a particular pod. right now this info is not readily available.