Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

s390/hypfs: factor out filesystem code

The s390_hypfs filesystem is deprecated and shouldn't be used due to its
rather odd semantics. It creates a whole directory structure with static
file contents so a user can read a consistent state while within that
directory.
Writing to its update attribute will remove and rebuild nearly the whole
filesystem, so that again a user can read a consistent state, even if
multiple files need to be read.

Given that this wastes a lot of CPU cycles, and involves a lot of code,
binary interfaces have been added quite a couple of years ago, which simply
pass the binary data to user space, and let user space decode the data.
This is the preferred and only way how the data should be retrieved.

The assumption is that there are no users of the s390_hypfs filesystem.
However instead of just removing the code, and having to revert in case
there are actually users, factor the filesystem code out and make it only
available via a new config option.

This config option is supposed to be disabled. If it turns out there are no
complaints the filesystem code can be removed probably in a couple of
years.

Acked-by: Alexander Gordeev <agordeev@linux.ibm.com>
Signed-off-by: Heiko Carstens <hca@linux.ibm.com>

+709 -587
+1 -1
arch/s390/Kbuild
··· 3 3 obj-y += mm/ 4 4 obj-$(CONFIG_KVM) += kvm/ 5 5 obj-y += crypto/ 6 - obj-$(CONFIG_S390_HYPFS_FS) += hypfs/ 6 + obj-$(CONFIG_S390_HYPFS) += hypfs/ 7 7 obj-$(CONFIG_APPLDATA_BASE) += appldata/ 8 8 obj-y += net/ 9 9 obj-$(CONFIG_PCI) += pci/
+13 -2
arch/s390/Kconfig
··· 877 877 This can also be compiled as a module, which will be called 878 878 appldata_net_sum.o. 879 879 880 - config S390_HYPFS_FS 880 + config S390_HYPFS 881 881 def_bool y 882 + prompt "s390 hypervisor information" 883 + help 884 + This provides several binary files at (debugfs)/s390_hypfs/ to 885 + provide accounting information in an s390 hypervisor environment. 886 + 887 + config S390_HYPFS_FS 888 + def_bool n 882 889 prompt "s390 hypervisor file system support" 883 890 select SYS_HYPERVISOR 891 + depends on S390_HYPFS 884 892 help 885 893 This is a virtual file system intended to provide accounting 886 - information in an s390 hypervisor environment. 894 + information in an s390 hypervisor environment. This file system 895 + is deprecated and should not be used. 896 + 897 + Say N if you are unsure. 887 898 888 899 source "arch/s390/kvm/Kconfig" 889 900
+8 -3
arch/s390/hypfs/Makefile
··· 3 3 # Makefile for the linux hypfs filesystem routines. 4 4 # 5 5 6 - obj-$(CONFIG_S390_HYPFS_FS) += s390_hypfs.o 6 + obj-$(CONFIG_S390_HYPFS) += hypfs_dbfs.o 7 + obj-$(CONFIG_S390_HYPFS) += hypfs_diag.o 8 + obj-$(CONFIG_S390_HYPFS) += hypfs_diag0c.o 9 + obj-$(CONFIG_S390_HYPFS) += hypfs_sprp.o 10 + obj-$(CONFIG_S390_HYPFS) += hypfs_vm.o 7 11 8 - s390_hypfs-objs := inode.o hypfs_diag.o hypfs_vm.o hypfs_dbfs.o hypfs_sprp.o 9 - s390_hypfs-objs += hypfs_diag0c.o 12 + obj-$(CONFIG_S390_HYPFS_FS) += hypfs_diag_fs.o 13 + obj-$(CONFIG_S390_HYPFS_FS) += hypfs_vm_fs.o 14 + obj-$(CONFIG_S390_HYPFS_FS) += inode.o
+9 -1
arch/s390/hypfs/hypfs.h
··· 46 46 void hypfs_sprp_init(void); 47 47 void hypfs_sprp_exit(void); 48 48 49 + int __hypfs_fs_init(void); 50 + 51 + static inline int hypfs_fs_init(void) 52 + { 53 + if (IS_ENABLED(CONFIG_S390_HYPFS_FS)) 54 + return __hypfs_fs_init(); 55 + return 0; 56 + } 57 + 49 58 /* debugfs interface */ 50 59 struct hypfs_dbfs_file; 51 60 ··· 78 69 struct dentry *dentry; 79 70 }; 80 71 81 - extern void hypfs_dbfs_init(void); 82 72 extern void hypfs_dbfs_exit(void); 83 73 extern void hypfs_dbfs_create_file(struct hypfs_dbfs_file *df); 84 74 extern void hypfs_dbfs_remove_file(struct hypfs_dbfs_file *df);
+26 -5
arch/s390/hypfs/hypfs_dbfs.c
··· 90 90 debugfs_remove(df->dentry); 91 91 } 92 92 93 - void hypfs_dbfs_init(void) 93 + static int __init hypfs_dbfs_init(void) 94 94 { 95 - dbfs_dir = debugfs_create_dir("s390_hypfs", NULL); 96 - } 95 + int rc = -ENODATA; 97 96 98 - void hypfs_dbfs_exit(void) 99 - { 97 + dbfs_dir = debugfs_create_dir("s390_hypfs", NULL); 98 + if (hypfs_diag_init()) 99 + goto fail_dbfs_exit; 100 + if (hypfs_vm_init()) 101 + goto fail_hypfs_diag_exit; 102 + hypfs_sprp_init(); 103 + if (hypfs_diag0c_init()) 104 + goto fail_hypfs_sprp_exit; 105 + rc = hypfs_fs_init(); 106 + if (rc) 107 + goto fail_hypfs_diag0c_exit; 108 + return 0; 109 + 110 + fail_hypfs_diag0c_exit: 111 + hypfs_diag0c_exit(); 112 + fail_hypfs_sprp_exit: 113 + hypfs_sprp_exit(); 114 + hypfs_vm_exit(); 115 + fail_hypfs_diag_exit: 116 + hypfs_diag_exit(); 117 + pr_err("Initialization of hypfs failed with rc=%i\n", rc); 118 + fail_dbfs_exit: 100 119 debugfs_remove(dbfs_dir); 120 + return rc; 101 121 } 122 + device_initcall(hypfs_dbfs_init)
+20 -380
arch/s390/hypfs/hypfs_diag.c
··· 18 18 #include <linux/mm.h> 19 19 #include <asm/diag.h> 20 20 #include <asm/ebcdic.h> 21 + #include "hypfs_diag.h" 21 22 #include "hypfs.h" 22 - 23 - #define TMP_SIZE 64 /* size of temporary buffers */ 24 23 25 24 #define DBFS_D204_HDR_VERSION 0 26 25 27 - static char *diag224_cpu_names; /* diag 224 name table */ 28 26 static enum diag204_sc diag204_store_sc; /* used subcode for store */ 29 27 static enum diag204_format diag204_info_type; /* used diag 204 data format */ 30 28 ··· 31 33 32 34 static struct dentry *dbfs_d204_file; 33 35 34 - /* 35 - * DIAG 204 member access functions. 36 - * 37 - * Since we have two different diag 204 data formats for old and new s390 38 - * machines, we do not access the structs directly, but use getter functions for 39 - * each struct member instead. This should make the code more readable. 40 - */ 41 - 42 - /* Time information block */ 43 - 44 - static inline int info_blk_hdr__size(enum diag204_format type) 36 + enum diag204_format diag204_get_info_type(void) 45 37 { 46 - if (type == DIAG204_INFO_SIMPLE) 47 - return sizeof(struct diag204_info_blk_hdr); 48 - else /* DIAG204_INFO_EXT */ 49 - return sizeof(struct diag204_x_info_blk_hdr); 38 + return diag204_info_type; 50 39 } 51 40 52 - static inline __u8 info_blk_hdr__npar(enum diag204_format type, void *hdr) 41 + static void diag204_set_info_type(enum diag204_format type) 53 42 { 54 - if (type == DIAG204_INFO_SIMPLE) 55 - return ((struct diag204_info_blk_hdr *)hdr)->npar; 56 - else /* DIAG204_INFO_EXT */ 57 - return ((struct diag204_x_info_blk_hdr *)hdr)->npar; 58 - } 59 - 60 - static inline __u8 info_blk_hdr__flags(enum diag204_format type, void *hdr) 61 - { 62 - if (type == DIAG204_INFO_SIMPLE) 63 - return ((struct diag204_info_blk_hdr *)hdr)->flags; 64 - else /* DIAG204_INFO_EXT */ 65 - return ((struct diag204_x_info_blk_hdr *)hdr)->flags; 66 - } 67 - 68 - /* Partition header */ 69 - 70 - static inline int part_hdr__size(enum diag204_format type) 71 - { 72 - if (type == DIAG204_INFO_SIMPLE) 73 - return sizeof(struct diag204_part_hdr); 74 - else /* DIAG204_INFO_EXT */ 75 - return sizeof(struct diag204_x_part_hdr); 76 - } 77 - 78 - static inline __u8 part_hdr__rcpus(enum diag204_format type, void *hdr) 79 - { 80 - if (type == DIAG204_INFO_SIMPLE) 81 - return ((struct diag204_part_hdr *)hdr)->cpus; 82 - else /* DIAG204_INFO_EXT */ 83 - return ((struct diag204_x_part_hdr *)hdr)->rcpus; 84 - } 85 - 86 - static inline void part_hdr__part_name(enum diag204_format type, void *hdr, 87 - char *name) 88 - { 89 - if (type == DIAG204_INFO_SIMPLE) 90 - memcpy(name, ((struct diag204_part_hdr *)hdr)->part_name, 91 - DIAG204_LPAR_NAME_LEN); 92 - else /* DIAG204_INFO_EXT */ 93 - memcpy(name, ((struct diag204_x_part_hdr *)hdr)->part_name, 94 - DIAG204_LPAR_NAME_LEN); 95 - EBCASC(name, DIAG204_LPAR_NAME_LEN); 96 - name[DIAG204_LPAR_NAME_LEN] = 0; 97 - strim(name); 98 - } 99 - 100 - /* CPU info block */ 101 - 102 - static inline int cpu_info__size(enum diag204_format type) 103 - { 104 - if (type == DIAG204_INFO_SIMPLE) 105 - return sizeof(struct diag204_cpu_info); 106 - else /* DIAG204_INFO_EXT */ 107 - return sizeof(struct diag204_x_cpu_info); 108 - } 109 - 110 - static inline __u8 cpu_info__ctidx(enum diag204_format type, void *hdr) 111 - { 112 - if (type == DIAG204_INFO_SIMPLE) 113 - return ((struct diag204_cpu_info *)hdr)->ctidx; 114 - else /* DIAG204_INFO_EXT */ 115 - return ((struct diag204_x_cpu_info *)hdr)->ctidx; 116 - } 117 - 118 - static inline __u16 cpu_info__cpu_addr(enum diag204_format type, void *hdr) 119 - { 120 - if (type == DIAG204_INFO_SIMPLE) 121 - return ((struct diag204_cpu_info *)hdr)->cpu_addr; 122 - else /* DIAG204_INFO_EXT */ 123 - return ((struct diag204_x_cpu_info *)hdr)->cpu_addr; 124 - } 125 - 126 - static inline __u64 cpu_info__acc_time(enum diag204_format type, void *hdr) 127 - { 128 - if (type == DIAG204_INFO_SIMPLE) 129 - return ((struct diag204_cpu_info *)hdr)->acc_time; 130 - else /* DIAG204_INFO_EXT */ 131 - return ((struct diag204_x_cpu_info *)hdr)->acc_time; 132 - } 133 - 134 - static inline __u64 cpu_info__lp_time(enum diag204_format type, void *hdr) 135 - { 136 - if (type == DIAG204_INFO_SIMPLE) 137 - return ((struct diag204_cpu_info *)hdr)->lp_time; 138 - else /* DIAG204_INFO_EXT */ 139 - return ((struct diag204_x_cpu_info *)hdr)->lp_time; 140 - } 141 - 142 - static inline __u64 cpu_info__online_time(enum diag204_format type, void *hdr) 143 - { 144 - if (type == DIAG204_INFO_SIMPLE) 145 - return 0; /* online_time not available in simple info */ 146 - else /* DIAG204_INFO_EXT */ 147 - return ((struct diag204_x_cpu_info *)hdr)->online_time; 148 - } 149 - 150 - /* Physical header */ 151 - 152 - static inline int phys_hdr__size(enum diag204_format type) 153 - { 154 - if (type == DIAG204_INFO_SIMPLE) 155 - return sizeof(struct diag204_phys_hdr); 156 - else /* DIAG204_INFO_EXT */ 157 - return sizeof(struct diag204_x_phys_hdr); 158 - } 159 - 160 - static inline __u8 phys_hdr__cpus(enum diag204_format type, void *hdr) 161 - { 162 - if (type == DIAG204_INFO_SIMPLE) 163 - return ((struct diag204_phys_hdr *)hdr)->cpus; 164 - else /* DIAG204_INFO_EXT */ 165 - return ((struct diag204_x_phys_hdr *)hdr)->cpus; 166 - } 167 - 168 - /* Physical CPU info block */ 169 - 170 - static inline int phys_cpu__size(enum diag204_format type) 171 - { 172 - if (type == DIAG204_INFO_SIMPLE) 173 - return sizeof(struct diag204_phys_cpu); 174 - else /* DIAG204_INFO_EXT */ 175 - return sizeof(struct diag204_x_phys_cpu); 176 - } 177 - 178 - static inline __u16 phys_cpu__cpu_addr(enum diag204_format type, void *hdr) 179 - { 180 - if (type == DIAG204_INFO_SIMPLE) 181 - return ((struct diag204_phys_cpu *)hdr)->cpu_addr; 182 - else /* DIAG204_INFO_EXT */ 183 - return ((struct diag204_x_phys_cpu *)hdr)->cpu_addr; 184 - } 185 - 186 - static inline __u64 phys_cpu__mgm_time(enum diag204_format type, void *hdr) 187 - { 188 - if (type == DIAG204_INFO_SIMPLE) 189 - return ((struct diag204_phys_cpu *)hdr)->mgm_time; 190 - else /* DIAG204_INFO_EXT */ 191 - return ((struct diag204_x_phys_cpu *)hdr)->mgm_time; 192 - } 193 - 194 - static inline __u64 phys_cpu__ctidx(enum diag204_format type, void *hdr) 195 - { 196 - if (type == DIAG204_INFO_SIMPLE) 197 - return ((struct diag204_phys_cpu *)hdr)->ctidx; 198 - else /* DIAG204_INFO_EXT */ 199 - return ((struct diag204_x_phys_cpu *)hdr)->ctidx; 43 + diag204_info_type = type; 200 44 } 201 45 202 46 /* Diagnose 204 functions */ ··· 55 215 diag204_buf = NULL; 56 216 } 57 217 58 - static void *diag204_get_buffer(enum diag204_format fmt, int *pages) 218 + void *diag204_get_buffer(enum diag204_format fmt, int *pages) 59 219 { 60 220 if (diag204_buf) { 61 221 *pages = diag204_buf_pages; ··· 102 262 if (diag204((unsigned long)DIAG204_SUBC_STIB7 | 103 263 (unsigned long)DIAG204_INFO_EXT, pages, buf) >= 0) { 104 264 diag204_store_sc = DIAG204_SUBC_STIB7; 105 - diag204_info_type = DIAG204_INFO_EXT; 265 + diag204_set_info_type(DIAG204_INFO_EXT); 106 266 goto out; 107 267 } 108 268 if (diag204((unsigned long)DIAG204_SUBC_STIB6 | 109 269 (unsigned long)DIAG204_INFO_EXT, pages, buf) >= 0) { 110 270 diag204_store_sc = DIAG204_SUBC_STIB6; 111 - diag204_info_type = DIAG204_INFO_EXT; 271 + diag204_set_info_type(DIAG204_INFO_EXT); 112 272 goto out; 113 273 } 114 274 diag204_free_buffer(); ··· 124 284 if (diag204((unsigned long)DIAG204_SUBC_STIB4 | 125 285 (unsigned long)DIAG204_INFO_SIMPLE, pages, buf) >= 0) { 126 286 diag204_store_sc = DIAG204_SUBC_STIB4; 127 - diag204_info_type = DIAG204_INFO_SIMPLE; 287 + diag204_set_info_type(DIAG204_INFO_SIMPLE); 128 288 goto out; 129 289 } else { 130 290 rc = -ENOSYS; ··· 138 298 return rc; 139 299 } 140 300 141 - static int diag204_do_store(void *buf, int pages) 301 + int diag204_store(void *buf, int pages) 142 302 { 143 303 int rc; 144 304 145 - rc = diag204((unsigned long) diag204_store_sc | 146 - (unsigned long) diag204_info_type, pages, buf); 305 + rc = diag204((unsigned long)diag204_store_sc | 306 + (unsigned long)diag204_get_info_type(), pages, buf); 147 307 return rc < 0 ? -ENOSYS : 0; 148 - } 149 - 150 - static void *diag204_store(void) 151 - { 152 - void *buf; 153 - int pages, rc; 154 - 155 - buf = diag204_get_buffer(diag204_info_type, &pages); 156 - if (IS_ERR(buf)) 157 - goto out; 158 - rc = diag204_do_store(buf, pages); 159 - if (rc) 160 - return ERR_PTR(rc); 161 - out: 162 - return buf; 163 - } 164 - 165 - /* Diagnose 224 functions */ 166 - 167 - static int diag224_get_name_table(void) 168 - { 169 - /* memory must be below 2GB */ 170 - diag224_cpu_names = (char *) __get_free_page(GFP_KERNEL | GFP_DMA); 171 - if (!diag224_cpu_names) 172 - return -ENOMEM; 173 - if (diag224(diag224_cpu_names)) { 174 - free_page((unsigned long) diag224_cpu_names); 175 - return -EOPNOTSUPP; 176 - } 177 - EBCASC(diag224_cpu_names + 16, (*diag224_cpu_names + 1) * 16); 178 - return 0; 179 - } 180 - 181 - static void diag224_delete_name_table(void) 182 - { 183 - free_page((unsigned long) diag224_cpu_names); 184 - } 185 - 186 - static int diag224_idx2name(int index, char *name) 187 - { 188 - memcpy(name, diag224_cpu_names + ((index + 1) * DIAG204_CPU_NAME_LEN), 189 - DIAG204_CPU_NAME_LEN); 190 - name[DIAG204_CPU_NAME_LEN] = 0; 191 - strim(name); 192 - return 0; 193 308 } 194 309 195 310 struct dbfs_d204_hdr { ··· 170 375 if (!base) 171 376 return -ENOMEM; 172 377 d204 = PTR_ALIGN(base + sizeof(d204->hdr), PAGE_SIZE) - sizeof(d204->hdr); 173 - rc = diag204_do_store(d204->buf, diag204_buf_pages); 378 + rc = diag204_store(d204->buf, diag204_buf_pages); 174 379 if (rc) { 175 380 vfree(base); 176 381 return rc; ··· 199 404 return -ENODATA; 200 405 } 201 406 202 - if (diag204_info_type == DIAG204_INFO_EXT) 407 + if (diag204_get_info_type() == DIAG204_INFO_EXT) 203 408 hypfs_dbfs_create_file(&dbfs_file_d204); 204 409 205 - if (MACHINE_IS_LPAR) { 206 - rc = diag224_get_name_table(); 207 - if (rc) { 208 - pr_err("The hardware system does not provide all " 209 - "functions required by hypfs\n"); 210 - debugfs_remove(dbfs_d204_file); 211 - return rc; 212 - } 410 + rc = hypfs_diag_fs_init(); 411 + if (rc) { 412 + pr_err("The hardware system does not provide all functions required by hypfs\n"); 413 + debugfs_remove(dbfs_d204_file); 213 414 } 214 - return 0; 415 + return rc; 215 416 } 216 417 217 418 void hypfs_diag_exit(void) 218 419 { 219 420 debugfs_remove(dbfs_d204_file); 220 - diag224_delete_name_table(); 421 + hypfs_diag_fs_exit(); 221 422 diag204_free_buffer(); 222 423 hypfs_dbfs_remove_file(&dbfs_file_d204); 223 - } 224 - 225 - /* 226 - * Functions to create the directory structure 227 - * ******************************************* 228 - */ 229 - 230 - static int hypfs_create_cpu_files(struct dentry *cpus_dir, void *cpu_info) 231 - { 232 - struct dentry *cpu_dir; 233 - char buffer[TMP_SIZE]; 234 - void *rc; 235 - 236 - snprintf(buffer, TMP_SIZE, "%d", cpu_info__cpu_addr(diag204_info_type, 237 - cpu_info)); 238 - cpu_dir = hypfs_mkdir(cpus_dir, buffer); 239 - rc = hypfs_create_u64(cpu_dir, "mgmtime", 240 - cpu_info__acc_time(diag204_info_type, cpu_info) - 241 - cpu_info__lp_time(diag204_info_type, cpu_info)); 242 - if (IS_ERR(rc)) 243 - return PTR_ERR(rc); 244 - rc = hypfs_create_u64(cpu_dir, "cputime", 245 - cpu_info__lp_time(diag204_info_type, cpu_info)); 246 - if (IS_ERR(rc)) 247 - return PTR_ERR(rc); 248 - if (diag204_info_type == DIAG204_INFO_EXT) { 249 - rc = hypfs_create_u64(cpu_dir, "onlinetime", 250 - cpu_info__online_time(diag204_info_type, 251 - cpu_info)); 252 - if (IS_ERR(rc)) 253 - return PTR_ERR(rc); 254 - } 255 - diag224_idx2name(cpu_info__ctidx(diag204_info_type, cpu_info), buffer); 256 - rc = hypfs_create_str(cpu_dir, "type", buffer); 257 - return PTR_ERR_OR_ZERO(rc); 258 - } 259 - 260 - static void *hypfs_create_lpar_files(struct dentry *systems_dir, void *part_hdr) 261 - { 262 - struct dentry *cpus_dir; 263 - struct dentry *lpar_dir; 264 - char lpar_name[DIAG204_LPAR_NAME_LEN + 1]; 265 - void *cpu_info; 266 - int i; 267 - 268 - part_hdr__part_name(diag204_info_type, part_hdr, lpar_name); 269 - lpar_name[DIAG204_LPAR_NAME_LEN] = 0; 270 - lpar_dir = hypfs_mkdir(systems_dir, lpar_name); 271 - if (IS_ERR(lpar_dir)) 272 - return lpar_dir; 273 - cpus_dir = hypfs_mkdir(lpar_dir, "cpus"); 274 - if (IS_ERR(cpus_dir)) 275 - return cpus_dir; 276 - cpu_info = part_hdr + part_hdr__size(diag204_info_type); 277 - for (i = 0; i < part_hdr__rcpus(diag204_info_type, part_hdr); i++) { 278 - int rc; 279 - rc = hypfs_create_cpu_files(cpus_dir, cpu_info); 280 - if (rc) 281 - return ERR_PTR(rc); 282 - cpu_info += cpu_info__size(diag204_info_type); 283 - } 284 - return cpu_info; 285 - } 286 - 287 - static int hypfs_create_phys_cpu_files(struct dentry *cpus_dir, void *cpu_info) 288 - { 289 - struct dentry *cpu_dir; 290 - char buffer[TMP_SIZE]; 291 - void *rc; 292 - 293 - snprintf(buffer, TMP_SIZE, "%i", phys_cpu__cpu_addr(diag204_info_type, 294 - cpu_info)); 295 - cpu_dir = hypfs_mkdir(cpus_dir, buffer); 296 - if (IS_ERR(cpu_dir)) 297 - return PTR_ERR(cpu_dir); 298 - rc = hypfs_create_u64(cpu_dir, "mgmtime", 299 - phys_cpu__mgm_time(diag204_info_type, cpu_info)); 300 - if (IS_ERR(rc)) 301 - return PTR_ERR(rc); 302 - diag224_idx2name(phys_cpu__ctidx(diag204_info_type, cpu_info), buffer); 303 - rc = hypfs_create_str(cpu_dir, "type", buffer); 304 - return PTR_ERR_OR_ZERO(rc); 305 - } 306 - 307 - static void *hypfs_create_phys_files(struct dentry *parent_dir, void *phys_hdr) 308 - { 309 - int i; 310 - void *cpu_info; 311 - struct dentry *cpus_dir; 312 - 313 - cpus_dir = hypfs_mkdir(parent_dir, "cpus"); 314 - if (IS_ERR(cpus_dir)) 315 - return cpus_dir; 316 - cpu_info = phys_hdr + phys_hdr__size(diag204_info_type); 317 - for (i = 0; i < phys_hdr__cpus(diag204_info_type, phys_hdr); i++) { 318 - int rc; 319 - rc = hypfs_create_phys_cpu_files(cpus_dir, cpu_info); 320 - if (rc) 321 - return ERR_PTR(rc); 322 - cpu_info += phys_cpu__size(diag204_info_type); 323 - } 324 - return cpu_info; 325 - } 326 - 327 - int hypfs_diag_create_files(struct dentry *root) 328 - { 329 - struct dentry *systems_dir, *hyp_dir; 330 - void *time_hdr, *part_hdr; 331 - int i, rc; 332 - void *buffer, *ptr; 333 - 334 - buffer = diag204_store(); 335 - if (IS_ERR(buffer)) 336 - return PTR_ERR(buffer); 337 - 338 - systems_dir = hypfs_mkdir(root, "systems"); 339 - if (IS_ERR(systems_dir)) { 340 - rc = PTR_ERR(systems_dir); 341 - goto err_out; 342 - } 343 - time_hdr = (struct x_info_blk_hdr *)buffer; 344 - part_hdr = time_hdr + info_blk_hdr__size(diag204_info_type); 345 - for (i = 0; i < info_blk_hdr__npar(diag204_info_type, time_hdr); i++) { 346 - part_hdr = hypfs_create_lpar_files(systems_dir, part_hdr); 347 - if (IS_ERR(part_hdr)) { 348 - rc = PTR_ERR(part_hdr); 349 - goto err_out; 350 - } 351 - } 352 - if (info_blk_hdr__flags(diag204_info_type, time_hdr) & 353 - DIAG204_LPAR_PHYS_FLG) { 354 - ptr = hypfs_create_phys_files(root, part_hdr); 355 - if (IS_ERR(ptr)) { 356 - rc = PTR_ERR(ptr); 357 - goto err_out; 358 - } 359 - } 360 - hyp_dir = hypfs_mkdir(root, "hyp"); 361 - if (IS_ERR(hyp_dir)) { 362 - rc = PTR_ERR(hyp_dir); 363 - goto err_out; 364 - } 365 - ptr = hypfs_create_str(hyp_dir, "type", "LPAR Hypervisor"); 366 - if (IS_ERR(ptr)) { 367 - rc = PTR_ERR(ptr); 368 - goto err_out; 369 - } 370 - rc = 0; 371 - 372 - err_out: 373 - return rc; 374 424 }
+35
arch/s390/hypfs/hypfs_diag.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* 3 + * Hypervisor filesystem for Linux on s390. Diag 204 and 224 4 + * implementation. 5 + * 6 + * Copyright IBM Corp. 2006, 2008 7 + * Author(s): Michael Holzheu <holzheu@de.ibm.com> 8 + */ 9 + 10 + #ifndef _S390_HYPFS_DIAG_H_ 11 + #define _S390_HYPFS_DIAG_H_ 12 + 13 + #include <asm/diag.h> 14 + 15 + enum diag204_format diag204_get_info_type(void); 16 + void *diag204_get_buffer(enum diag204_format fmt, int *pages); 17 + int diag204_store(void *buf, int pages); 18 + 19 + int __hypfs_diag_fs_init(void); 20 + void __hypfs_diag_fs_exit(void); 21 + 22 + static inline int hypfs_diag_fs_init(void) 23 + { 24 + if (IS_ENABLED(CONFIG_S390_HYPFS_FS)) 25 + return __hypfs_diag_fs_init(); 26 + return 0; 27 + } 28 + 29 + static inline void hypfs_diag_fs_exit(void) 30 + { 31 + if (IS_ENABLED(CONFIG_S390_HYPFS_FS)) 32 + __hypfs_diag_fs_exit(); 33 + } 34 + 35 + #endif /* _S390_HYPFS_DIAG_H_ */
+393
arch/s390/hypfs/hypfs_diag_fs.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* 3 + * Hypervisor filesystem for Linux on s390. Diag 204 and 224 4 + * implementation. 5 + * 6 + * Copyright IBM Corp. 2006, 2008 7 + * Author(s): Michael Holzheu <holzheu@de.ibm.com> 8 + */ 9 + 10 + #define KMSG_COMPONENT "hypfs" 11 + #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 12 + 13 + #include <linux/types.h> 14 + #include <linux/errno.h> 15 + #include <linux/slab.h> 16 + #include <linux/string.h> 17 + #include <linux/vmalloc.h> 18 + #include <linux/mm.h> 19 + #include <asm/diag.h> 20 + #include <asm/ebcdic.h> 21 + #include "hypfs_diag.h" 22 + #include "hypfs.h" 23 + 24 + #define TMP_SIZE 64 /* size of temporary buffers */ 25 + 26 + static char *diag224_cpu_names; /* diag 224 name table */ 27 + static int diag224_idx2name(int index, char *name); 28 + 29 + /* 30 + * DIAG 204 member access functions. 31 + * 32 + * Since we have two different diag 204 data formats for old and new s390 33 + * machines, we do not access the structs directly, but use getter functions for 34 + * each struct member instead. This should make the code more readable. 35 + */ 36 + 37 + /* Time information block */ 38 + 39 + static inline int info_blk_hdr__size(enum diag204_format type) 40 + { 41 + if (type == DIAG204_INFO_SIMPLE) 42 + return sizeof(struct diag204_info_blk_hdr); 43 + else /* DIAG204_INFO_EXT */ 44 + return sizeof(struct diag204_x_info_blk_hdr); 45 + } 46 + 47 + static inline __u8 info_blk_hdr__npar(enum diag204_format type, void *hdr) 48 + { 49 + if (type == DIAG204_INFO_SIMPLE) 50 + return ((struct diag204_info_blk_hdr *)hdr)->npar; 51 + else /* DIAG204_INFO_EXT */ 52 + return ((struct diag204_x_info_blk_hdr *)hdr)->npar; 53 + } 54 + 55 + static inline __u8 info_blk_hdr__flags(enum diag204_format type, void *hdr) 56 + { 57 + if (type == DIAG204_INFO_SIMPLE) 58 + return ((struct diag204_info_blk_hdr *)hdr)->flags; 59 + else /* DIAG204_INFO_EXT */ 60 + return ((struct diag204_x_info_blk_hdr *)hdr)->flags; 61 + } 62 + 63 + /* Partition header */ 64 + 65 + static inline int part_hdr__size(enum diag204_format type) 66 + { 67 + if (type == DIAG204_INFO_SIMPLE) 68 + return sizeof(struct diag204_part_hdr); 69 + else /* DIAG204_INFO_EXT */ 70 + return sizeof(struct diag204_x_part_hdr); 71 + } 72 + 73 + static inline __u8 part_hdr__rcpus(enum diag204_format type, void *hdr) 74 + { 75 + if (type == DIAG204_INFO_SIMPLE) 76 + return ((struct diag204_part_hdr *)hdr)->cpus; 77 + else /* DIAG204_INFO_EXT */ 78 + return ((struct diag204_x_part_hdr *)hdr)->rcpus; 79 + } 80 + 81 + static inline void part_hdr__part_name(enum diag204_format type, void *hdr, 82 + char *name) 83 + { 84 + if (type == DIAG204_INFO_SIMPLE) 85 + memcpy(name, ((struct diag204_part_hdr *)hdr)->part_name, 86 + DIAG204_LPAR_NAME_LEN); 87 + else /* DIAG204_INFO_EXT */ 88 + memcpy(name, ((struct diag204_x_part_hdr *)hdr)->part_name, 89 + DIAG204_LPAR_NAME_LEN); 90 + EBCASC(name, DIAG204_LPAR_NAME_LEN); 91 + name[DIAG204_LPAR_NAME_LEN] = 0; 92 + strim(name); 93 + } 94 + 95 + /* CPU info block */ 96 + 97 + static inline int cpu_info__size(enum diag204_format type) 98 + { 99 + if (type == DIAG204_INFO_SIMPLE) 100 + return sizeof(struct diag204_cpu_info); 101 + else /* DIAG204_INFO_EXT */ 102 + return sizeof(struct diag204_x_cpu_info); 103 + } 104 + 105 + static inline __u8 cpu_info__ctidx(enum diag204_format type, void *hdr) 106 + { 107 + if (type == DIAG204_INFO_SIMPLE) 108 + return ((struct diag204_cpu_info *)hdr)->ctidx; 109 + else /* DIAG204_INFO_EXT */ 110 + return ((struct diag204_x_cpu_info *)hdr)->ctidx; 111 + } 112 + 113 + static inline __u16 cpu_info__cpu_addr(enum diag204_format type, void *hdr) 114 + { 115 + if (type == DIAG204_INFO_SIMPLE) 116 + return ((struct diag204_cpu_info *)hdr)->cpu_addr; 117 + else /* DIAG204_INFO_EXT */ 118 + return ((struct diag204_x_cpu_info *)hdr)->cpu_addr; 119 + } 120 + 121 + static inline __u64 cpu_info__acc_time(enum diag204_format type, void *hdr) 122 + { 123 + if (type == DIAG204_INFO_SIMPLE) 124 + return ((struct diag204_cpu_info *)hdr)->acc_time; 125 + else /* DIAG204_INFO_EXT */ 126 + return ((struct diag204_x_cpu_info *)hdr)->acc_time; 127 + } 128 + 129 + static inline __u64 cpu_info__lp_time(enum diag204_format type, void *hdr) 130 + { 131 + if (type == DIAG204_INFO_SIMPLE) 132 + return ((struct diag204_cpu_info *)hdr)->lp_time; 133 + else /* DIAG204_INFO_EXT */ 134 + return ((struct diag204_x_cpu_info *)hdr)->lp_time; 135 + } 136 + 137 + static inline __u64 cpu_info__online_time(enum diag204_format type, void *hdr) 138 + { 139 + if (type == DIAG204_INFO_SIMPLE) 140 + return 0; /* online_time not available in simple info */ 141 + else /* DIAG204_INFO_EXT */ 142 + return ((struct diag204_x_cpu_info *)hdr)->online_time; 143 + } 144 + 145 + /* Physical header */ 146 + 147 + static inline int phys_hdr__size(enum diag204_format type) 148 + { 149 + if (type == DIAG204_INFO_SIMPLE) 150 + return sizeof(struct diag204_phys_hdr); 151 + else /* DIAG204_INFO_EXT */ 152 + return sizeof(struct diag204_x_phys_hdr); 153 + } 154 + 155 + static inline __u8 phys_hdr__cpus(enum diag204_format type, void *hdr) 156 + { 157 + if (type == DIAG204_INFO_SIMPLE) 158 + return ((struct diag204_phys_hdr *)hdr)->cpus; 159 + else /* DIAG204_INFO_EXT */ 160 + return ((struct diag204_x_phys_hdr *)hdr)->cpus; 161 + } 162 + 163 + /* Physical CPU info block */ 164 + 165 + static inline int phys_cpu__size(enum diag204_format type) 166 + { 167 + if (type == DIAG204_INFO_SIMPLE) 168 + return sizeof(struct diag204_phys_cpu); 169 + else /* DIAG204_INFO_EXT */ 170 + return sizeof(struct diag204_x_phys_cpu); 171 + } 172 + 173 + static inline __u16 phys_cpu__cpu_addr(enum diag204_format type, void *hdr) 174 + { 175 + if (type == DIAG204_INFO_SIMPLE) 176 + return ((struct diag204_phys_cpu *)hdr)->cpu_addr; 177 + else /* DIAG204_INFO_EXT */ 178 + return ((struct diag204_x_phys_cpu *)hdr)->cpu_addr; 179 + } 180 + 181 + static inline __u64 phys_cpu__mgm_time(enum diag204_format type, void *hdr) 182 + { 183 + if (type == DIAG204_INFO_SIMPLE) 184 + return ((struct diag204_phys_cpu *)hdr)->mgm_time; 185 + else /* DIAG204_INFO_EXT */ 186 + return ((struct diag204_x_phys_cpu *)hdr)->mgm_time; 187 + } 188 + 189 + static inline __u64 phys_cpu__ctidx(enum diag204_format type, void *hdr) 190 + { 191 + if (type == DIAG204_INFO_SIMPLE) 192 + return ((struct diag204_phys_cpu *)hdr)->ctidx; 193 + else /* DIAG204_INFO_EXT */ 194 + return ((struct diag204_x_phys_cpu *)hdr)->ctidx; 195 + } 196 + 197 + /* 198 + * Functions to create the directory structure 199 + * ******************************************* 200 + */ 201 + 202 + static int hypfs_create_cpu_files(struct dentry *cpus_dir, void *cpu_info) 203 + { 204 + struct dentry *cpu_dir; 205 + char buffer[TMP_SIZE]; 206 + void *rc; 207 + 208 + snprintf(buffer, TMP_SIZE, "%d", cpu_info__cpu_addr(diag204_get_info_type(), 209 + cpu_info)); 210 + cpu_dir = hypfs_mkdir(cpus_dir, buffer); 211 + rc = hypfs_create_u64(cpu_dir, "mgmtime", 212 + cpu_info__acc_time(diag204_get_info_type(), cpu_info) - 213 + cpu_info__lp_time(diag204_get_info_type(), cpu_info)); 214 + if (IS_ERR(rc)) 215 + return PTR_ERR(rc); 216 + rc = hypfs_create_u64(cpu_dir, "cputime", 217 + cpu_info__lp_time(diag204_get_info_type(), cpu_info)); 218 + if (IS_ERR(rc)) 219 + return PTR_ERR(rc); 220 + if (diag204_get_info_type() == DIAG204_INFO_EXT) { 221 + rc = hypfs_create_u64(cpu_dir, "onlinetime", 222 + cpu_info__online_time(diag204_get_info_type(), 223 + cpu_info)); 224 + if (IS_ERR(rc)) 225 + return PTR_ERR(rc); 226 + } 227 + diag224_idx2name(cpu_info__ctidx(diag204_get_info_type(), cpu_info), buffer); 228 + rc = hypfs_create_str(cpu_dir, "type", buffer); 229 + return PTR_ERR_OR_ZERO(rc); 230 + } 231 + 232 + static void *hypfs_create_lpar_files(struct dentry *systems_dir, void *part_hdr) 233 + { 234 + struct dentry *cpus_dir; 235 + struct dentry *lpar_dir; 236 + char lpar_name[DIAG204_LPAR_NAME_LEN + 1]; 237 + void *cpu_info; 238 + int i; 239 + 240 + part_hdr__part_name(diag204_get_info_type(), part_hdr, lpar_name); 241 + lpar_name[DIAG204_LPAR_NAME_LEN] = 0; 242 + lpar_dir = hypfs_mkdir(systems_dir, lpar_name); 243 + if (IS_ERR(lpar_dir)) 244 + return lpar_dir; 245 + cpus_dir = hypfs_mkdir(lpar_dir, "cpus"); 246 + if (IS_ERR(cpus_dir)) 247 + return cpus_dir; 248 + cpu_info = part_hdr + part_hdr__size(diag204_get_info_type()); 249 + for (i = 0; i < part_hdr__rcpus(diag204_get_info_type(), part_hdr); i++) { 250 + int rc; 251 + 252 + rc = hypfs_create_cpu_files(cpus_dir, cpu_info); 253 + if (rc) 254 + return ERR_PTR(rc); 255 + cpu_info += cpu_info__size(diag204_get_info_type()); 256 + } 257 + return cpu_info; 258 + } 259 + 260 + static int hypfs_create_phys_cpu_files(struct dentry *cpus_dir, void *cpu_info) 261 + { 262 + struct dentry *cpu_dir; 263 + char buffer[TMP_SIZE]; 264 + void *rc; 265 + 266 + snprintf(buffer, TMP_SIZE, "%i", phys_cpu__cpu_addr(diag204_get_info_type(), 267 + cpu_info)); 268 + cpu_dir = hypfs_mkdir(cpus_dir, buffer); 269 + if (IS_ERR(cpu_dir)) 270 + return PTR_ERR(cpu_dir); 271 + rc = hypfs_create_u64(cpu_dir, "mgmtime", 272 + phys_cpu__mgm_time(diag204_get_info_type(), cpu_info)); 273 + if (IS_ERR(rc)) 274 + return PTR_ERR(rc); 275 + diag224_idx2name(phys_cpu__ctidx(diag204_get_info_type(), cpu_info), buffer); 276 + rc = hypfs_create_str(cpu_dir, "type", buffer); 277 + return PTR_ERR_OR_ZERO(rc); 278 + } 279 + 280 + static void *hypfs_create_phys_files(struct dentry *parent_dir, void *phys_hdr) 281 + { 282 + int i; 283 + void *cpu_info; 284 + struct dentry *cpus_dir; 285 + 286 + cpus_dir = hypfs_mkdir(parent_dir, "cpus"); 287 + if (IS_ERR(cpus_dir)) 288 + return cpus_dir; 289 + cpu_info = phys_hdr + phys_hdr__size(diag204_get_info_type()); 290 + for (i = 0; i < phys_hdr__cpus(diag204_get_info_type(), phys_hdr); i++) { 291 + int rc; 292 + 293 + rc = hypfs_create_phys_cpu_files(cpus_dir, cpu_info); 294 + if (rc) 295 + return ERR_PTR(rc); 296 + cpu_info += phys_cpu__size(diag204_get_info_type()); 297 + } 298 + return cpu_info; 299 + } 300 + 301 + int hypfs_diag_create_files(struct dentry *root) 302 + { 303 + struct dentry *systems_dir, *hyp_dir; 304 + void *time_hdr, *part_hdr; 305 + void *buffer, *ptr; 306 + int i, rc, pages; 307 + 308 + buffer = diag204_get_buffer(diag204_get_info_type(), &pages); 309 + if (IS_ERR(buffer)) 310 + return PTR_ERR(buffer); 311 + rc = diag204_store(buffer, pages); 312 + if (rc) 313 + return rc; 314 + 315 + systems_dir = hypfs_mkdir(root, "systems"); 316 + if (IS_ERR(systems_dir)) { 317 + rc = PTR_ERR(systems_dir); 318 + goto err_out; 319 + } 320 + time_hdr = (struct x_info_blk_hdr *)buffer; 321 + part_hdr = time_hdr + info_blk_hdr__size(diag204_get_info_type()); 322 + for (i = 0; i < info_blk_hdr__npar(diag204_get_info_type(), time_hdr); i++) { 323 + part_hdr = hypfs_create_lpar_files(systems_dir, part_hdr); 324 + if (IS_ERR(part_hdr)) { 325 + rc = PTR_ERR(part_hdr); 326 + goto err_out; 327 + } 328 + } 329 + if (info_blk_hdr__flags(diag204_get_info_type(), time_hdr) & 330 + DIAG204_LPAR_PHYS_FLG) { 331 + ptr = hypfs_create_phys_files(root, part_hdr); 332 + if (IS_ERR(ptr)) { 333 + rc = PTR_ERR(ptr); 334 + goto err_out; 335 + } 336 + } 337 + hyp_dir = hypfs_mkdir(root, "hyp"); 338 + if (IS_ERR(hyp_dir)) { 339 + rc = PTR_ERR(hyp_dir); 340 + goto err_out; 341 + } 342 + ptr = hypfs_create_str(hyp_dir, "type", "LPAR Hypervisor"); 343 + if (IS_ERR(ptr)) { 344 + rc = PTR_ERR(ptr); 345 + goto err_out; 346 + } 347 + rc = 0; 348 + 349 + err_out: 350 + return rc; 351 + } 352 + 353 + /* Diagnose 224 functions */ 354 + 355 + static int diag224_idx2name(int index, char *name) 356 + { 357 + memcpy(name, diag224_cpu_names + ((index + 1) * DIAG204_CPU_NAME_LEN), 358 + DIAG204_CPU_NAME_LEN); 359 + name[DIAG204_CPU_NAME_LEN] = 0; 360 + strim(name); 361 + return 0; 362 + } 363 + 364 + static int diag224_get_name_table(void) 365 + { 366 + /* memory must be below 2GB */ 367 + diag224_cpu_names = (char *)__get_free_page(GFP_KERNEL | GFP_DMA); 368 + if (!diag224_cpu_names) 369 + return -ENOMEM; 370 + if (diag224(diag224_cpu_names)) { 371 + free_page((unsigned long)diag224_cpu_names); 372 + return -EOPNOTSUPP; 373 + } 374 + EBCASC(diag224_cpu_names + 16, (*diag224_cpu_names + 1) * 16); 375 + return 0; 376 + } 377 + 378 + static void diag224_delete_name_table(void) 379 + { 380 + free_page((unsigned long)diag224_cpu_names); 381 + } 382 + 383 + int __init __hypfs_diag_fs_init(void) 384 + { 385 + if (MACHINE_IS_LPAR) 386 + return diag224_get_name_table(); 387 + return 0; 388 + } 389 + 390 + void __hypfs_diag_fs_exit(void) 391 + { 392 + diag224_delete_name_table(); 393 + }
+11 -164
arch/s390/hypfs/hypfs_vm.c
··· 14 14 #include <asm/diag.h> 15 15 #include <asm/ebcdic.h> 16 16 #include <asm/timex.h> 17 + #include "hypfs_vm.h" 17 18 #include "hypfs.h" 18 19 19 - #define NAME_LEN 8 20 20 #define DBFS_D2FC_HDR_VERSION 0 21 21 22 22 static char local_guest[] = " "; 23 23 static char all_guests[] = "* "; 24 24 static char *all_groups = all_guests; 25 - static char *guest_query; 26 - 27 - struct diag2fc_data { 28 - __u32 version; 29 - __u32 flags; 30 - __u64 used_cpu; 31 - __u64 el_time; 32 - __u64 mem_min_kb; 33 - __u64 mem_max_kb; 34 - __u64 mem_share_kb; 35 - __u64 mem_used_kb; 36 - __u32 pcpus; 37 - __u32 lcpus; 38 - __u32 vcpus; 39 - __u32 ocpus; 40 - __u32 cpu_max; 41 - __u32 cpu_shares; 42 - __u32 cpu_use_samp; 43 - __u32 cpu_delay_samp; 44 - __u32 page_wait_samp; 45 - __u32 idle_samp; 46 - __u32 other_samp; 47 - __u32 total_samp; 48 - char guest_name[NAME_LEN]; 49 - }; 50 - 51 - struct diag2fc_parm_list { 52 - char userid[NAME_LEN]; 53 - char aci_grp[NAME_LEN]; 54 - __u64 addr; 55 - __u32 size; 56 - __u32 fmt; 57 - }; 25 + char *diag2fc_guest_query; 58 26 59 27 static int diag2fc(int size, char* query, void *addr) 60 28 { ··· 30 62 unsigned long rc; 31 63 struct diag2fc_parm_list parm_list; 32 64 33 - memcpy(parm_list.userid, query, NAME_LEN); 34 - ASCEBC(parm_list.userid, NAME_LEN); 35 - memcpy(parm_list.aci_grp, all_groups, NAME_LEN); 36 - ASCEBC(parm_list.aci_grp, NAME_LEN); 65 + memcpy(parm_list.userid, query, DIAG2FC_NAME_LEN); 66 + ASCEBC(parm_list.userid, DIAG2FC_NAME_LEN); 67 + memcpy(parm_list.aci_grp, all_groups, DIAG2FC_NAME_LEN); 68 + ASCEBC(parm_list.aci_grp, DIAG2FC_NAME_LEN); 37 69 parm_list.addr = (unsigned long)addr; 38 70 parm_list.size = size; 39 71 parm_list.fmt = 0x02; ··· 55 87 /* 56 88 * Allocate buffer for "query" and store diag 2fc at "offset" 57 89 */ 58 - static void *diag2fc_store(char *query, unsigned int *count, int offset) 90 + void *diag2fc_store(char *query, unsigned int *count, int offset) 59 91 { 60 92 void *data; 61 93 int size; ··· 76 108 return data; 77 109 } 78 110 79 - static void diag2fc_free(const void *data) 111 + void diag2fc_free(const void *data) 80 112 { 81 113 vfree(data); 82 - } 83 - 84 - #define ATTRIBUTE(dir, name, member) \ 85 - do { \ 86 - void *rc; \ 87 - rc = hypfs_create_u64(dir, name, member); \ 88 - if (IS_ERR(rc)) \ 89 - return PTR_ERR(rc); \ 90 - } while(0) 91 - 92 - static int hypfs_vm_create_guest(struct dentry *systems_dir, 93 - struct diag2fc_data *data) 94 - { 95 - char guest_name[NAME_LEN + 1] = {}; 96 - struct dentry *guest_dir, *cpus_dir, *samples_dir, *mem_dir; 97 - int dedicated_flag, capped_value; 98 - 99 - capped_value = (data->flags & 0x00000006) >> 1; 100 - dedicated_flag = (data->flags & 0x00000008) >> 3; 101 - 102 - /* guest dir */ 103 - memcpy(guest_name, data->guest_name, NAME_LEN); 104 - EBCASC(guest_name, NAME_LEN); 105 - strim(guest_name); 106 - guest_dir = hypfs_mkdir(systems_dir, guest_name); 107 - if (IS_ERR(guest_dir)) 108 - return PTR_ERR(guest_dir); 109 - ATTRIBUTE(guest_dir, "onlinetime_us", data->el_time); 110 - 111 - /* logical cpu information */ 112 - cpus_dir = hypfs_mkdir(guest_dir, "cpus"); 113 - if (IS_ERR(cpus_dir)) 114 - return PTR_ERR(cpus_dir); 115 - ATTRIBUTE(cpus_dir, "cputime_us", data->used_cpu); 116 - ATTRIBUTE(cpus_dir, "capped", capped_value); 117 - ATTRIBUTE(cpus_dir, "dedicated", dedicated_flag); 118 - ATTRIBUTE(cpus_dir, "count", data->vcpus); 119 - /* 120 - * Note: The "weight_min" attribute got the wrong name. 121 - * The value represents the number of non-stopped (operating) 122 - * CPUS. 123 - */ 124 - ATTRIBUTE(cpus_dir, "weight_min", data->ocpus); 125 - ATTRIBUTE(cpus_dir, "weight_max", data->cpu_max); 126 - ATTRIBUTE(cpus_dir, "weight_cur", data->cpu_shares); 127 - 128 - /* memory information */ 129 - mem_dir = hypfs_mkdir(guest_dir, "mem"); 130 - if (IS_ERR(mem_dir)) 131 - return PTR_ERR(mem_dir); 132 - ATTRIBUTE(mem_dir, "min_KiB", data->mem_min_kb); 133 - ATTRIBUTE(mem_dir, "max_KiB", data->mem_max_kb); 134 - ATTRIBUTE(mem_dir, "used_KiB", data->mem_used_kb); 135 - ATTRIBUTE(mem_dir, "share_KiB", data->mem_share_kb); 136 - 137 - /* samples */ 138 - samples_dir = hypfs_mkdir(guest_dir, "samples"); 139 - if (IS_ERR(samples_dir)) 140 - return PTR_ERR(samples_dir); 141 - ATTRIBUTE(samples_dir, "cpu_using", data->cpu_use_samp); 142 - ATTRIBUTE(samples_dir, "cpu_delay", data->cpu_delay_samp); 143 - ATTRIBUTE(samples_dir, "mem_delay", data->page_wait_samp); 144 - ATTRIBUTE(samples_dir, "idle", data->idle_samp); 145 - ATTRIBUTE(samples_dir, "other", data->other_samp); 146 - ATTRIBUTE(samples_dir, "total", data->total_samp); 147 - return 0; 148 - } 149 - 150 - int hypfs_vm_create_files(struct dentry *root) 151 - { 152 - struct dentry *dir, *file; 153 - struct diag2fc_data *data; 154 - unsigned int count = 0; 155 - int rc, i; 156 - 157 - data = diag2fc_store(guest_query, &count, 0); 158 - if (IS_ERR(data)) 159 - return PTR_ERR(data); 160 - 161 - /* Hypervisor Info */ 162 - dir = hypfs_mkdir(root, "hyp"); 163 - if (IS_ERR(dir)) { 164 - rc = PTR_ERR(dir); 165 - goto failed; 166 - } 167 - file = hypfs_create_str(dir, "type", "z/VM Hypervisor"); 168 - if (IS_ERR(file)) { 169 - rc = PTR_ERR(file); 170 - goto failed; 171 - } 172 - 173 - /* physical cpus */ 174 - dir = hypfs_mkdir(root, "cpus"); 175 - if (IS_ERR(dir)) { 176 - rc = PTR_ERR(dir); 177 - goto failed; 178 - } 179 - file = hypfs_create_u64(dir, "count", data->lcpus); 180 - if (IS_ERR(file)) { 181 - rc = PTR_ERR(file); 182 - goto failed; 183 - } 184 - 185 - /* guests */ 186 - dir = hypfs_mkdir(root, "systems"); 187 - if (IS_ERR(dir)) { 188 - rc = PTR_ERR(dir); 189 - goto failed; 190 - } 191 - 192 - for (i = 0; i < count; i++) { 193 - rc = hypfs_vm_create_guest(dir, &(data[i])); 194 - if (rc) 195 - goto failed; 196 - } 197 - diag2fc_free(data); 198 - return 0; 199 - 200 - failed: 201 - diag2fc_free(data); 202 - return rc; 203 114 } 204 115 205 116 struct dbfs_d2fc_hdr { ··· 99 252 struct dbfs_d2fc *d2fc; 100 253 unsigned int count; 101 254 102 - d2fc = diag2fc_store(guest_query, &count, sizeof(d2fc->hdr)); 255 + d2fc = diag2fc_store(diag2fc_guest_query, &count, sizeof(d2fc->hdr)); 103 256 if (IS_ERR(d2fc)) 104 257 return PTR_ERR(d2fc); 105 258 store_tod_clock_ext(&d2fc->hdr.tod_ext); ··· 124 277 if (!MACHINE_IS_VM) 125 278 return 0; 126 279 if (diag2fc(0, all_guests, NULL) > 0) 127 - guest_query = all_guests; 280 + diag2fc_guest_query = all_guests; 128 281 else if (diag2fc(0, local_guest, NULL) > 0) 129 - guest_query = local_guest; 282 + diag2fc_guest_query = local_guest; 130 283 else 131 284 return -EACCES; 132 285 hypfs_dbfs_create_file(&dbfs_file_2fc);
+50
arch/s390/hypfs/hypfs_vm.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* 3 + * Hypervisor filesystem for Linux on s390. z/VM implementation. 4 + * 5 + * Copyright IBM Corp. 2006 6 + * Author(s): Michael Holzheu <holzheu@de.ibm.com> 7 + */ 8 + 9 + #ifndef _S390_HYPFS_VM_H_ 10 + #define _S390_HYPFS_VM_H_ 11 + 12 + #define DIAG2FC_NAME_LEN 8 13 + 14 + struct diag2fc_data { 15 + __u32 version; 16 + __u32 flags; 17 + __u64 used_cpu; 18 + __u64 el_time; 19 + __u64 mem_min_kb; 20 + __u64 mem_max_kb; 21 + __u64 mem_share_kb; 22 + __u64 mem_used_kb; 23 + __u32 pcpus; 24 + __u32 lcpus; 25 + __u32 vcpus; 26 + __u32 ocpus; 27 + __u32 cpu_max; 28 + __u32 cpu_shares; 29 + __u32 cpu_use_samp; 30 + __u32 cpu_delay_samp; 31 + __u32 page_wait_samp; 32 + __u32 idle_samp; 33 + __u32 other_samp; 34 + __u32 total_samp; 35 + char guest_name[DIAG2FC_NAME_LEN]; 36 + }; 37 + 38 + struct diag2fc_parm_list { 39 + char userid[DIAG2FC_NAME_LEN]; 40 + char aci_grp[DIAG2FC_NAME_LEN]; 41 + __u64 addr; 42 + __u32 size; 43 + __u32 fmt; 44 + }; 45 + 46 + void *diag2fc_store(char *query, unsigned int *count, int offset); 47 + void diag2fc_free(const void *data); 48 + extern char *diag2fc_guest_query; 49 + 50 + #endif /* _S390_HYPFS_VM_H_ */
+139
arch/s390/hypfs/hypfs_vm_fs.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* 3 + * Hypervisor filesystem for Linux on s390. z/VM implementation. 4 + * 5 + * Copyright IBM Corp. 2006 6 + * Author(s): Michael Holzheu <holzheu@de.ibm.com> 7 + */ 8 + 9 + #include <linux/types.h> 10 + #include <linux/errno.h> 11 + #include <linux/string.h> 12 + #include <linux/vmalloc.h> 13 + #include <asm/extable.h> 14 + #include <asm/diag.h> 15 + #include <asm/ebcdic.h> 16 + #include <asm/timex.h> 17 + #include "hypfs_vm.h" 18 + #include "hypfs.h" 19 + 20 + #define ATTRIBUTE(dir, name, member) \ 21 + do { \ 22 + void *rc; \ 23 + rc = hypfs_create_u64(dir, name, member); \ 24 + if (IS_ERR(rc)) \ 25 + return PTR_ERR(rc); \ 26 + } while (0) 27 + 28 + static int hypfs_vm_create_guest(struct dentry *systems_dir, 29 + struct diag2fc_data *data) 30 + { 31 + char guest_name[DIAG2FC_NAME_LEN + 1] = {}; 32 + struct dentry *guest_dir, *cpus_dir, *samples_dir, *mem_dir; 33 + int dedicated_flag, capped_value; 34 + 35 + capped_value = (data->flags & 0x00000006) >> 1; 36 + dedicated_flag = (data->flags & 0x00000008) >> 3; 37 + 38 + /* guest dir */ 39 + memcpy(guest_name, data->guest_name, DIAG2FC_NAME_LEN); 40 + EBCASC(guest_name, DIAG2FC_NAME_LEN); 41 + strim(guest_name); 42 + guest_dir = hypfs_mkdir(systems_dir, guest_name); 43 + if (IS_ERR(guest_dir)) 44 + return PTR_ERR(guest_dir); 45 + ATTRIBUTE(guest_dir, "onlinetime_us", data->el_time); 46 + 47 + /* logical cpu information */ 48 + cpus_dir = hypfs_mkdir(guest_dir, "cpus"); 49 + if (IS_ERR(cpus_dir)) 50 + return PTR_ERR(cpus_dir); 51 + ATTRIBUTE(cpus_dir, "cputime_us", data->used_cpu); 52 + ATTRIBUTE(cpus_dir, "capped", capped_value); 53 + ATTRIBUTE(cpus_dir, "dedicated", dedicated_flag); 54 + ATTRIBUTE(cpus_dir, "count", data->vcpus); 55 + /* 56 + * Note: The "weight_min" attribute got the wrong name. 57 + * The value represents the number of non-stopped (operating) 58 + * CPUS. 59 + */ 60 + ATTRIBUTE(cpus_dir, "weight_min", data->ocpus); 61 + ATTRIBUTE(cpus_dir, "weight_max", data->cpu_max); 62 + ATTRIBUTE(cpus_dir, "weight_cur", data->cpu_shares); 63 + 64 + /* memory information */ 65 + mem_dir = hypfs_mkdir(guest_dir, "mem"); 66 + if (IS_ERR(mem_dir)) 67 + return PTR_ERR(mem_dir); 68 + ATTRIBUTE(mem_dir, "min_KiB", data->mem_min_kb); 69 + ATTRIBUTE(mem_dir, "max_KiB", data->mem_max_kb); 70 + ATTRIBUTE(mem_dir, "used_KiB", data->mem_used_kb); 71 + ATTRIBUTE(mem_dir, "share_KiB", data->mem_share_kb); 72 + 73 + /* samples */ 74 + samples_dir = hypfs_mkdir(guest_dir, "samples"); 75 + if (IS_ERR(samples_dir)) 76 + return PTR_ERR(samples_dir); 77 + ATTRIBUTE(samples_dir, "cpu_using", data->cpu_use_samp); 78 + ATTRIBUTE(samples_dir, "cpu_delay", data->cpu_delay_samp); 79 + ATTRIBUTE(samples_dir, "mem_delay", data->page_wait_samp); 80 + ATTRIBUTE(samples_dir, "idle", data->idle_samp); 81 + ATTRIBUTE(samples_dir, "other", data->other_samp); 82 + ATTRIBUTE(samples_dir, "total", data->total_samp); 83 + return 0; 84 + } 85 + 86 + int hypfs_vm_create_files(struct dentry *root) 87 + { 88 + struct dentry *dir, *file; 89 + struct diag2fc_data *data; 90 + unsigned int count = 0; 91 + int rc, i; 92 + 93 + data = diag2fc_store(diag2fc_guest_query, &count, 0); 94 + if (IS_ERR(data)) 95 + return PTR_ERR(data); 96 + 97 + /* Hypervisor Info */ 98 + dir = hypfs_mkdir(root, "hyp"); 99 + if (IS_ERR(dir)) { 100 + rc = PTR_ERR(dir); 101 + goto failed; 102 + } 103 + file = hypfs_create_str(dir, "type", "z/VM Hypervisor"); 104 + if (IS_ERR(file)) { 105 + rc = PTR_ERR(file); 106 + goto failed; 107 + } 108 + 109 + /* physical cpus */ 110 + dir = hypfs_mkdir(root, "cpus"); 111 + if (IS_ERR(dir)) { 112 + rc = PTR_ERR(dir); 113 + goto failed; 114 + } 115 + file = hypfs_create_u64(dir, "count", data->lcpus); 116 + if (IS_ERR(file)) { 117 + rc = PTR_ERR(file); 118 + goto failed; 119 + } 120 + 121 + /* guests */ 122 + dir = hypfs_mkdir(root, "systems"); 123 + if (IS_ERR(dir)) { 124 + rc = PTR_ERR(dir); 125 + goto failed; 126 + } 127 + 128 + for (i = 0; i < count; i++) { 129 + rc = hypfs_vm_create_guest(dir, &data[i]); 130 + if (rc) 131 + goto failed; 132 + } 133 + diag2fc_free(data); 134 + return 0; 135 + 136 + failed: 137 + diag2fc_free(data); 138 + return rc; 139 + }
+4 -31
arch/s390/hypfs/inode.c
··· 460 460 .show_options = hypfs_show_options, 461 461 }; 462 462 463 - static int __init hypfs_init(void) 463 + int __init __hypfs_fs_init(void) 464 464 { 465 465 int rc; 466 466 467 - hypfs_dbfs_init(); 468 - 469 - if (hypfs_diag_init()) { 470 - rc = -ENODATA; 471 - goto fail_dbfs_exit; 472 - } 473 - if (hypfs_vm_init()) { 474 - rc = -ENODATA; 475 - goto fail_hypfs_diag_exit; 476 - } 477 - hypfs_sprp_init(); 478 - if (hypfs_diag0c_init()) { 479 - rc = -ENODATA; 480 - goto fail_hypfs_sprp_exit; 481 - } 482 467 rc = sysfs_create_mount_point(hypervisor_kobj, "s390"); 483 468 if (rc) 484 - goto fail_hypfs_diag0c_exit; 469 + return rc; 485 470 rc = register_filesystem(&hypfs_type); 486 471 if (rc) 487 - goto fail_filesystem; 472 + goto fail; 488 473 return 0; 489 - 490 - fail_filesystem: 474 + fail: 491 475 sysfs_remove_mount_point(hypervisor_kobj, "s390"); 492 - fail_hypfs_diag0c_exit: 493 - hypfs_diag0c_exit(); 494 - fail_hypfs_sprp_exit: 495 - hypfs_sprp_exit(); 496 - hypfs_vm_exit(); 497 - fail_hypfs_diag_exit: 498 - hypfs_diag_exit(); 499 - pr_err("Initialization of hypfs failed with rc=%i\n", rc); 500 - fail_dbfs_exit: 501 - hypfs_dbfs_exit(); 502 476 return rc; 503 477 } 504 - device_initcall(hypfs_init)