|
| 1 | +> **_NOTE_** : For now the following content has no deeper meaning. |
| 2 | +> Right now this is my playground for integrating markdown into flutter. |
| 3 | +> |
| 4 | +
|
| 5 | +# AI Blog |
| 6 | + |
| 7 | +*image_caption*d |
| 8 | + |
| 9 | + |
| 10 | +This is inline latex: $f(x) = \sum_{i=0}^{n} \frac{a_i}{1+x}$ |
| 11 | +This is block level latex: |
| 12 | + |
| 13 | +This is inline latex with displayMode: $$f(x) = \sum_{i=0}^{n} \frac{a_i}{1+x}$$ |
| 14 | + |
| 15 | +**The Cauchy-Schwarz Inequality** |
| 16 | + |
| 17 | +```math |
| 18 | + \left( \sum_{k=1}^n a_k b_k \right)^2 \leq \left( \sum_{k=1}^n a_k^2 \right) \left( \sum_{k=1}^n b_k^2 \right) |
| 19 | +``` |
| 20 | + |
| 21 | + |
| 22 | +```c |
| 23 | +/* SPDX-License-Identifier: GPL-2.0 */ |
| 24 | +#ifndef __CGROUP_INTERNAL_H |
| 25 | +#define __CGROUP_INTERNAL_H |
| 26 | + |
| 27 | +#include <linux/cgroup.h> |
| 28 | +#include <linux/kernfs.h> |
| 29 | + |
| 30 | +#define TRACE_CGROUP_PATH_LEN 1024 |
| 31 | + |
| 32 | + |
| 33 | +/* |
| 34 | + * cgroup_path() takes a spin lock. It is good practice not to take |
| 35 | + * spin locks within trace point handlers, as they are mostly hidden |
| 36 | + * from normal view. As cgroup_path() can take the kernfs_rename_lock |
| 37 | + * spin lock, it is best to not call that function from the trace event |
| 38 | + * handler. |
| 39 | + * |
| 40 | + * Note: trace_cgroup_##type##_enabled() is a static branch that will only |
| 41 | + * be set when the trace event is enabled. |
| 42 | + */ |
| 43 | +#define TRACE_CGROUP_PATH(type, cgrp, ...) \ |
| 44 | + do { \ |
| 45 | + if (trace_cgroup_##type##_enabled()) { \ |
| 46 | + unsigned long flags; \ |
| 47 | + spin_lock_irqsave(&trace_cgroup_path_lock, \ |
| 48 | + flags); \ |
| 49 | + cgroup_path(cgrp, trace_cgroup_path, \ |
| 50 | + TRACE_CGROUP_PATH_LEN); \ |
| 51 | + trace_cgroup_##type(cgrp, trace_cgroup_path, \ |
| 52 | + ##__VA_ARGS__); \ |
| 53 | + spin_unlock_irqrestore(&trace_cgroup_path_lock, \ |
| 54 | + flags); \ |
| 55 | + } \ |
| 56 | + } while (0) |
| 57 | + |
| 58 | +/* |
| 59 | + * The cgroup filesystem superblock creation/mount context. |
| 60 | + */ |
| 61 | +struct cgroup_fs_context { |
| 62 | + struct kernfs_fs_context kfc; |
| 63 | + struct cgroup_root *root; |
| 64 | + struct cgroup_namespace *ns; |
| 65 | + unsigned int flags; /* CGRP_ROOT_* flags */ |
| 66 | + |
| 67 | + /* cgroup1 bits */ |
| 68 | + bool cpuset_clone_children; |
| 69 | + bool none; /* User explicitly requested empty subsystem */ |
| 70 | + bool all_ss; /* Seen 'all' option */ |
| 71 | + u16 subsys_mask; /* Selected subsystems */ |
| 72 | + char *name; /* Hierarchy name */ |
| 73 | + char *release_agent; /* Path for release notifications */ |
| 74 | +}; |
| 75 | + |
| 76 | +static inline struct cgroup_fs_context *cgroup_fc2context(struct fs_context *fc) |
| 77 | +{ |
| 78 | + struct kernfs_fs_context *kfc = fc->fs_private; |
| 79 | + |
| 80 | + return container_of(kfc, struct cgroup_fs_context, kfc); |
| 81 | +} |
| 82 | + |
| 83 | +struct cgroup_pidlist; |
| 84 | + |
| 85 | +struct cgroup_file_ctx { |
| 86 | + struct cgroup_namespace *ns; |
| 87 | + |
| 88 | + struct { |
| 89 | + void *trigger; |
| 90 | + } psi; |
| 91 | + |
| 92 | + struct { |
| 93 | + bool started; |
| 94 | + struct css_task_iter iter; |
| 95 | + } procs; |
| 96 | + |
| 97 | + struct { |
| 98 | + struct cgroup_pidlist *pidlist; |
| 99 | + } procs1; |
| 100 | +}; |
| 101 | + |
| 102 | +/* |
| 103 | + * A cgroup can be associated with multiple css_sets as different tasks may |
| 104 | + * belong to different cgroups on different hierarchies. In the other |
| 105 | + * direction, a css_set is naturally associated with multiple cgroups. |
| 106 | + * This M:N relationship is represented by the following link structure |
| 107 | + * which exists for each association and allows traversing the associations |
| 108 | + * from both sides. |
| 109 | + */ |
| 110 | +struct cgrp_cset_link { |
| 111 | + /* the cgroup and css_set this link associates */ |
| 112 | + struct cgroup *cgrp; |
| 113 | + struct css_set *cset; |
| 114 | + |
| 115 | + /* list of cgrp_cset_links anchored at cgrp->cset_links */ |
| 116 | + struct list_head cset_link; |
| 117 | + |
| 118 | + /* list of cgrp_cset_links anchored at css_set->cgrp_links */ |
| 119 | + struct list_head cgrp_link; |
| 120 | +}; |
| 121 | + |
| 122 | +/* used to track tasks and csets during migration */ |
| 123 | +struct cgroup_taskset { |
| 124 | + /* the src and dst cset list running through cset->mg_node */ |
| 125 | + struct list_head src_csets; |
| 126 | + struct list_head dst_csets; |
| 127 | + |
| 128 | + /* the number of tasks in the set */ |
| 129 | + int nr_tasks; |
| 130 | + |
| 131 | + /* the subsys currently being processed */ |
| 132 | + int ssid; |
| 133 | + |
| 134 | + /* |
| 135 | + * Fields for cgroup_taskset_*() iteration. |
| 136 | + * |
| 137 | + * Before migration is committed, the target migration tasks are on |
| 138 | + * ->mg_tasks of the csets on ->src_csets. After, on ->mg_tasks of |
| 139 | + * the csets on ->dst_csets. ->csets point to either ->src_csets |
| 140 | + * or ->dst_csets depending on whether migration is committed. |
| 141 | + * |
| 142 | + * ->cur_csets and ->cur_task point to the current task position |
| 143 | + * during iteration. |
| 144 | + */ |
| 145 | + struct list_head *csets; |
| 146 | + struct css_set *cur_cset; |
| 147 | + struct task_struct *cur_task; |
| 148 | +}; |
| 149 | + |
| 150 | +/* migration context also tracks preloading */ |
| 151 | +struct cgroup_mgctx { |
| 152 | + /* |
| 153 | + * Preloaded source and destination csets. Used to guarantee |
| 154 | + * atomic success or failure on actual migration. |
| 155 | + */ |
| 156 | + struct list_head preloaded_src_csets; |
| 157 | + struct list_head preloaded_dst_csets; |
| 158 | + |
| 159 | + /* tasks and csets to migrate */ |
| 160 | + struct cgroup_taskset tset; |
| 161 | + |
| 162 | + /* subsystems affected by migration */ |
| 163 | + u16 ss_mask; |
| 164 | +}; |
| 165 | + |
| 166 | +#define CGROUP_TASKSET_INIT(tset) \ |
| 167 | +{ \ |
| 168 | + .src_csets = LIST_HEAD_INIT(tset.src_csets), \ |
| 169 | + .dst_csets = LIST_HEAD_INIT(tset.dst_csets), \ |
| 170 | + .csets = &tset.src_csets, \ |
| 171 | +} |
| 172 | + |
| 173 | +#define CGROUP_MGCTX_INIT(name) \ |
| 174 | +{ \ |
| 175 | + LIST_HEAD_INIT(name.preloaded_src_csets), \ |
| 176 | + LIST_HEAD_INIT(name.preloaded_dst_csets), \ |
| 177 | + CGROUP_TASKSET_INIT(name.tset), \ |
| 178 | +} |
| 179 | + |
| 180 | +#define DEFINE_CGROUP_MGCTX(name) \ |
| 181 | + struct cgroup_mgctx name = CGROUP_MGCTX_INIT(name) |
| 182 | + |
| 183 | +extern struct cgroup_subsys *cgroup_subsys[]; |
| 184 | +extern struct list_head cgroup_roots; |
| 185 | + |
| 186 | +/* iterate across the hierarchies */ |
| 187 | +#define for_each_root(root) \ |
| 188 | + list_for_each_entry_rcu((root), &cgroup_roots, root_list, \ |
| 189 | + lockdep_is_held(&cgroup_mutex)) |
| 190 | + |
| 191 | +/** |
| 192 | + * for_each_subsys - iterate all enabled cgroup subsystems |
| 193 | + * @ss: the iteration cursor |
| 194 | + * @ssid: the index of @ss, CGROUP_SUBSYS_COUNT after reaching the end |
| 195 | + */ |
| 196 | +#define for_each_subsys(ss, ssid) \ |
| 197 | + for ((ssid) = 0; (ssid) < CGROUP_SUBSYS_COUNT && \ |
| 198 | + (((ss) = cgroup_subsys[ssid]) || true); (ssid)++) |
| 199 | + |
| 200 | +static inline bool cgroup_is_dead(const struct cgroup *cgrp) |
| 201 | +{ |
| 202 | + return !(cgrp->self.flags & CSS_ONLINE); |
| 203 | +} |
| 204 | + |
| 205 | +static inline bool notify_on_release(const struct cgroup *cgrp) |
| 206 | +{ |
| 207 | + return test_bit(CGRP_NOTIFY_ON_RELEASE, &cgrp->flags); |
| 208 | +} |
| 209 | + |
| 210 | +void put_css_set_locked(struct css_set *cset); |
| 211 | + |
| 212 | +static inline void put_css_set(struct css_set *cset) |
| 213 | +{ |
| 214 | + unsigned long flags; |
| 215 | + |
| 216 | + /* |
| 217 | + * Ensure that the refcount doesn't hit zero while any readers |
| 218 | + * can see it. Similar to atomic_dec_and_lock(), but for an |
| 219 | + * rwlock |
| 220 | + */ |
| 221 | + if (refcount_dec_not_one(&cset->refcount)) |
| 222 | + return; |
| 223 | + |
| 224 | + spin_lock_irqsave(&css_set_lock, flags); |
| 225 | + put_css_set_locked(cset); |
| 226 | + spin_unlock_irqrestore(&css_set_lock, flags); |
| 227 | +} |
| 228 | + |
| 229 | +/* |
| 230 | + * refcounted get/put for css_set objects |
| 231 | + */ |
| 232 | +static inline void get_css_set(struct css_set *cset) |
| 233 | +{ |
| 234 | + refcount_inc(&cset->refcount); |
| 235 | +} |
| 236 | +#endif /* __CGROUP_INTERNAL_H */ |
| 237 | +``` |
| 238 | +| | Command | Description | |
| 239 | +| ----- | -------- | --------------------------------- | |
| 240 | +| | `git status` | List all new or modified files | |
| 241 | +| | `git-diff` | Show file differences that haven't been staged| |
| 242 | + |
| 243 | +> **_NOTE:_** Das Impressum befindet sich in einer experimentellen Phase und |
| 244 | +beinhaltet derzeit unvollständige/dummy Daten. |
| 245 | + |
| 246 | +## Anbieter |
| 247 | +Jonas Heinle |
| 248 | +Elchwinkel 42 |
| 249 | +12345 Bärstadt |
| 250 | +<!-- Telefon: 030 18 305-0 --> |
| 251 | + |
| 252 | +## Links |
| 253 | + |
| 254 | +### E-Mail |
| 255 | +- für allgemeine Anfragen: [Kontakt zu mir](#) |
| 256 | + |
| 257 | +## Externe Links |
| 258 | +- [philoclopedia.de](#) |
| 259 | +- [johannes-heinle.de](#) |
| 260 | + |
| 261 | + |
| 262 | +Stand: 13.02.2024 |
0 commit comments